code stringlengths 17 6.64M |
|---|
def rf_module_to_pt_module(rf_module: rf.Module, *, aux_params_as_buffers: bool=True) -> torch.nn.Module:
"\n :param rf_module: RF module\n :param aux_params_as_buffers: whether to map RF auxiliary parameters to PyTorch buffers,\n otherwise to normal parameters, i.e. they occur in model.named_parameters().\n Note that even when they are part of model.named_parameters(),\n aux params usually don't have a gradient, and then they are not updated by the optimizer.\n Historically, this was False.\n Now, this is True by default, as this is more reasonable.\n Note that the optimizer state dict will change if you change this,\n however, we will automatically convert such optimizer state dict.\n :return: torch module\n "
assert isinstance(rf_module, rf.Module)
if isinstance(rf_module, _PTModuleAsRFModule):
return rf_module.pt_module
return _RFModuleAsPTModule(rf_module=rf_module, aux_params_as_buffers=aux_params_as_buffers)
|
class _PTModuleAsRFModule(rf.Module):
def __init__(self, pt_module: torch.nn.Module):
super().__init__()
self._pt_module = pt_module
for (name, pt_param) in pt_module.named_parameters(recurse=False):
rf_param = rf.Parameter(raw_tensor=pt_param, dims=[Dim(d) for d in pt_param.shape], dtype=str(pt_param.dtype).split('.')[(- 1)])
setattr(self, name, rf_param)
for (name, pt_param) in pt_module.named_buffers(recurse=False):
rf_param = rf.Parameter(raw_tensor=pt_param, dims=[Dim(d) for d in pt_param.shape], dtype=str(pt_param.dtype).split('.')[(- 1)], auxiliary=True)
setattr(self, name, rf_param)
for (name, rf_mod) in pt_module.named_children():
pt_mod = rf_module_to_pt_module(rf_mod)
setattr(self, name, pt_mod)
@property
def pt_module(self) -> rf.Module:
'RF module'
return self._pt_module
def __call__(self, *args, **kwargs):
'forward'
return self._pt_module(*args, **kwargs)
|
class _RFModuleAsPTModule(torch.nn.Module):
def __init__(self, rf_module: rf.Module, *, aux_params_as_buffers: bool=True):
super().__init__()
self._rf_module = rf_module
self._aux_params_as_buffers = aux_params_as_buffers
self._is_initializing = True
for (name, rf_param) in rf_module.named_parameters(recurse=False):
pt_param = rf_param.raw_tensor
assert isinstance(pt_param, torch.nn.Parameter)
if (rf_param.auxiliary and aux_params_as_buffers):
self.register_buffer(name, pt_param)
else:
self.register_parameter(name, pt_param)
for (name, rf_mod) in rf_module.named_children():
pt_mod = rf_module_to_pt_module(rf_mod, aux_params_as_buffers=aux_params_as_buffers)
self.add_module(name, pt_mod)
self._is_initializing = False
def _get_name(self):
return (self._rf_module.__class__.__name__ + '[RF→PT]')
@property
def rf_module(self) -> rf.Module:
'RF module'
return self._rf_module
def forward(self, *args, **kwargs):
'forward'
return self._rf_module(*args, **kwargs)
def _apply(self, *args, **kwargs):
super()._apply(*args, **kwargs)
for (name, rf_param) in self._rf_module.named_parameters(recurse=False):
pt_param = getattr(self, name)
if (rf_param.auxiliary and self._aux_params_as_buffers):
assert isinstance(pt_param, torch.Tensor)
pt_param = torch.nn.Parameter(pt_param, pt_param.requires_grad)
else:
assert isinstance(pt_param, torch.nn.Parameter), f'{self}.{name} is not a Parameter but {type(pt_param).__name__}'
rf_param.raw_tensor = pt_param
def register_parameter(self, name: str, param: Optional[torch.nn.Parameter]) -> None:
'(re)register parameter'
super().register_parameter(name, param)
if ((param is None) or self._is_initializing):
return
rf_param = getattr(self._rf_module, name, None)
if (not isinstance(rf_param, rf.Parameter)):
return
assert isinstance(param, torch.nn.Parameter), f'{self} register_parameter {name}: did not get a Parameter but {type(param).__name__}'
rf_param.raw_tensor = param
def register_buffer(self, name: str, tensor: Optional[torch.Tensor], persistent: bool=True) -> None:
'(re)register buffer'
super().register_buffer(name, tensor, persistent=persistent)
if ((tensor is not None) and persistent and self._aux_params_as_buffers and (not self._is_initializing)):
rf_param = getattr(self._rf_module, name, None)
if (not isinstance(rf_param, rf.Parameter)):
return
pt_param = torch.nn.Parameter(tensor, tensor.requires_grad)
rf_param.raw_tensor = pt_param
|
def squared_difference(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
'\n :param a:\n :param b:\n :return: (a - b) ** 2\n '
return torch.square((a - b))
|
def _init_optimizer_classes_dict():
'\n Initializes a global dictionary with all optimizers available in PyTorch.\n '
global _OptimizerClassesDictInitialized
if _OptimizerClassesDictInitialized:
return
_OptimizerClassesDictInitialized = True
for (name, cls) in list(vars(torch.optim).items()):
assert isinstance(name, str)
if ((not isinstance(cls, type)) or (not issubclass(cls, torch.optim.Optimizer))):
continue
assert (name not in _OptimizerClassesDict)
_OptimizerClassesDict[name.lower()] = cls
|
def get_optimizer_class(class_name) -> Type[torch.optim.Optimizer]:
'\n :param str|()->torch.optim.Optimizer|type[torch.optim.Optimizer] class_name:\n Optimizer data, e.g. "adam", torch.optim.Adam...\n :return: Optimizer class\n '
_init_optimizer_classes_dict()
if isinstance(class_name, type):
assert issubclass(class_name, torch.optim.Optimizer)
elif callable(class_name):
class_name = class_name()
else:
assert isinstance(class_name, str)
assert (class_name.lower() in _OptimizerClassesDict), ('%s not found in the available torch optimizers list: %s.' % (class_name.lower(), ', '.join((("'%s'" % key) for key in _OptimizerClassesDict))))
class_name = _OptimizerClassesDict[class_name.lower()]
return class_name
|
def _get_class_init_kwargs(optim_class):
'\n Obtains the keyword arguments of the class provided as parameter that the user can add to their optimizer.\n\n :param type[torch.optim.Optimizer] optim_class: Optimizer class.\n :return: Keyword arguments of the provided class.\n :rtype: List[str]\n '
from returnn.util.basic import collect_class_init_kwargs
optim_class_init_kwargs = collect_class_init_kwargs(optim_class)
optim_class_init_kwargs.remove('params')
return optim_class_init_kwargs
|
class Updater(object):
'\n Wraps a torch.optim.Optimizer, and extends it by some further functionality.\n '
def __init__(self, *, config, network, device, initial_learning_rate=1.0):
'\n :param returnn.config.Config config: config defining the training conditions.\n :param torch.nn.Module network: PyTorch Module defining the network.\n :param torch.device|str device:\n :param float initial_learning_rate:\n '
self.config = config
self.learning_rate = initial_learning_rate
self._effective_learning_rate = self.learning_rate
self.network = network
self._device = device
self._current_train_step = 0
self._current_epoch = 0
self.learning_rate_function = self.config.typed_value('dynamic_learning_rate', None)
if (self.learning_rate_function is not None):
print('Using dynamic learning rate scheduler that updates based on global train steps', file=log.v2)
if callable(self.learning_rate_function):
import inspect
signature = inspect.signature(self.learning_rate_function)
assert any([(arg.kind == inspect.Parameter.VAR_KEYWORD) for arg in signature.parameters.values()]), 'please specify **kwargs in dynamic_learning_rate for future compatibility'
if ('network' in signature.parameters):
raise ValueError('Torch updater: dynamic_learning_rate network is TF specific')
else:
raise NotImplementedError('not implemented for not callable dynamic_learning_rate')
self._optimizer_opts = None
self.optimizer = None
self._grad_clip = self.config.float('gradient_clip', 0.0)
self._grad_clip_global_norm = self.config.float('gradient_clip_global_norm', 0.0)
self._grad_noise = self.config.float('gradient_noise', 0.0)
for opt_name in ['gradient_clip_norm', 'gradient_clip_avg_norm', 'global_norm_tag', 'gradient_clip_global_norm_tag', 'grad_norm_to_clip_to_zero', 'maximize_grad_norm', 'debug_grad_summaries', 'gradient_nan_inf_filter']:
if self.config.float(opt_name, 0.0):
raise NotImplementedError(f'PyTorch updater: option {opt_name} not supported currently')
if self.config.float('grad_clip', 0.0):
raise ValueError('You set grad_clip in the config, but the option is called gradient_clip_global_norm (or other options).')
self._update_effective_learning_rate()
def set_learning_rate(self, value):
'\n Updates the learning rate of the optimizer at each (sub)epoch.\n\n :param float value: New learning rate.\n '
self.learning_rate = value
self._update_effective_learning_rate()
def get_effective_learning_rate(self) -> float:
'\n :return: get the actual learning rate\n '
return self._effective_learning_rate
def _update_effective_learning_rate(self):
self._effective_learning_rate = self.learning_rate
if (self.learning_rate_function is not None):
self._effective_learning_rate = self.learning_rate_function(global_train_step=self._current_train_step, epoch=self._current_epoch, learning_rate=self.learning_rate)
if self.optimizer:
for param_group in self.optimizer.param_groups:
param_group['lr'] = self._effective_learning_rate
def set_current_train_step(self, *, global_train_step: int, epoch: int):
'\n Obtains an updated learning rate for the current training step inside a (sub)epoch.\n '
self._current_train_step = global_train_step
self._current_epoch = epoch
self._update_effective_learning_rate()
def step(self, *, grad_scaler: Optional[torch.cuda.amp.GradScaler]=None):
'\n Perform one step, i.e. update the parameters using the optimizer given the current calculated gradients.\n '
if self._grad_noise:
gradient_noise_(self.network.parameters(), self._grad_noise)
if self._grad_clip:
torch.nn.utils.clip_grad_value_(self.network.parameters(), self._grad_clip)
if self._grad_clip_global_norm:
torch.nn.utils.clip_grad_norm_(self.network.parameters(), self._grad_clip_global_norm)
if (grad_scaler is not None):
grad_scaler.step(self.optimizer)
grad_scaler.update()
else:
self.optimizer.step()
def create_optimizer(self):
'\n Creates an optimizer and stores it in self.optimizer.\n '
optimizer_opts = self.config.typed_value('optimizer', None)
if (optimizer_opts is None):
raise ValueError("config field 'optimizer' needs to be set explicitely for the Torch backend")
self._optimizer_opts = optimizer_opts
self.optimizer = self._create_optimizer(optimizer_opts)
def load_optimizer(self, filename):
'\n Loads a torch.optim.Optimizer from disk and stores it in self.optimizer.\n\n :param str filename: File from which to load the optimizer state.\n '
print(('Load optimizer %s' % filename), file=log.v4)
optimizer_state = torch.load(filename, map_location=self._device)
assert isinstance(optimizer_state, dict), f'optimizer_state is not a dict but {type(optimizer_state)}'
if (('optimizer' not in optimizer_state) and ('param_groups' in optimizer_state) and ('state' in optimizer_state)):
optimizer_state = {'optimizer': optimizer_state}
if (optimizer_state.get('param_names') is not None):
if (len(self.optimizer.param_groups) != len(optimizer_state['optimizer']['param_groups'])):
raise ValueError(('loaded state dict has a different number of parameter groups: ckpt %i vs. self %i' % (len(optimizer_state['optimizer']['param_groups']), len(self.optimizer.param_groups))))
(self_param_names, param_id_to_name) = self._get_opt_param_names()
ckpt_param_names = optimizer_state['param_names']
if (self_param_names != ckpt_param_names):
self_param_names_dict = {name: i for (i, name) in enumerate(self_param_names)}
self_param_names_critical_set = set()
ckpt_param_names_dict = {name: i for (i, name) in enumerate(ckpt_param_names)}
map_ckpt_param_idx_to_self_param_idx = {}
self_params_not_in_ckpt = []
self_params_not_in_ckpt_critical = []
for param_name in self_param_names:
param = self.network.get_parameter(param_name)
if param.requires_grad:
self_param_names_critical_set.add(param_name)
if (param_name not in ckpt_param_names_dict):
self_params_not_in_ckpt.append(param_name)
if param.requires_grad:
self_params_not_in_ckpt_critical.append(param_name)
ckpt_params_not_in_self = []
for (i, param_name) in enumerate(ckpt_param_names):
if (param_name not in self_param_names_dict):
ckpt_params_not_in_self.append(param_name)
else:
map_ckpt_param_idx_to_self_param_idx[i] = self_param_names_dict[param_name]
if self_params_not_in_ckpt_critical:
raise ValueError(('load_optimizer: required params not in ckpt: %s' % ', '.join(self_params_not_in_ckpt_critical)))
if (self_params_not_in_ckpt or ckpt_params_not_in_self):
print(('load_optimizer: params not in ckpt: %s\n ckpt params not existing: %s' % ((', '.join(self_params_not_in_ckpt) or '(None)'), (', '.join(ckpt_params_not_in_self) or '(None)'))), file=log.v3)
if self_params_not_in_ckpt:
print('load_optimizer: All params not in ckpt have required_grad=False, thus not critical.', file=log.v3)
else:
print('load_optimizer: Params in different order.', file=log.v3)
print('load_optimizer: Will remap the state dict.', file=log.v3)
for (ckpt_group, self_group) in zip(optimizer_state['optimizer']['param_groups'], self.optimizer.param_groups):
self_group_param_names = set((param_id_to_name[id(p)] for p in self_group['params']))
ckpt_group_param_names = set((ckpt_param_names[p] for p in ckpt_group['params']))
self_group_param_names.intersection_update(self_param_names_critical_set)
ckpt_group_param_names.intersection_update(self_param_names_critical_set)
if (ckpt_group_param_names != self_group_param_names):
raise ValueError(('load_optimizer: params in group not in ckpt: %s\n ckpt params not existing: %s' % ((', '.join((ckpt_group_param_names - self_group_param_names)) or '(None)'), (', '.join((self_group_param_names - ckpt_group_param_names)) or '(None)'))))
ckpt_group['params'] = [self_param_names_dict[param_id_to_name[id(p)]] for p in self_group['params']]
optimizer_state['optimizer']['state'] = {map_ckpt_param_idx_to_self_param_idx[i]: s for (i, s) in optimizer_state['optimizer']['state'].items() if (i in map_ckpt_param_idx_to_self_param_idx)}
self.optimizer.load_state_dict(optimizer_state['optimizer'])
del optimizer_state
gc.collect()
def _get_opt_param_names(self) -> Tuple[(List[str], Dict[(int, str)])]:
param_id_to_name = {}
for (name, p) in self.network.named_parameters():
param_id_to_name[id(p)] = name
param_names = []
for group in self.optimizer.param_groups:
for p in group['params']:
param_names.append(param_id_to_name[id(p)])
return (param_names, param_id_to_name)
def save_optimizer(self, filename):
'\n Saves the state of self.optimizer to a file.\n\n :param str filename: File in which to save the optimizer state.\n '
directory = os.path.dirname(filename)
if (directory and (not os.path.exists(directory))):
os.makedirs(directory, exist_ok=True)
(param_names, _) = self._get_opt_param_names()
print(('Save optimizer under %s' % filename), file=log.v4)
tmp_filename = (filename + '.tmp_write')
if os.path.exists(tmp_filename):
os.unlink(tmp_filename)
torch.save({'optimizer': self.optimizer.state_dict(), 'optimizer_class_name': self.optimizer.__class__.__name__, 'optimizer_opts': self._optimizer_opts, 'param_names': param_names, 'epoch': self._current_epoch, 'step': self._current_train_step, 'effective_learning_rate': self.get_effective_learning_rate(), 'returnn_version': returnn.__long_version__}, tmp_filename)
os.rename(tmp_filename, filename)
def get_optimizer(self):
'\n :return: Wrapped optimizer object.\n :rtype: torch.optim.Optimizer\n '
return self.optimizer
def _create_optimizer(self, optimizer_opts):
'\n Returns a valid optimizer considering the dictionary given by the user in the config.\n\n :param dict[str]|str optimizer_opts: Optimizer configuration specified by the user.\n If it\'s a dict, it must contain "class" with the optimizer name or callable.\n If it\'s a str, it must be the optimizer name.\n :return: A valid optimizer.\n :rtype: torch.optim.Optimizer\n '
lr = self.learning_rate
if isinstance(optimizer_opts, torch.optim.Optimizer):
return optimizer_opts
elif callable(optimizer_opts):
optimizer_opts: Dict[(str, Any)] = {'class': optimizer_opts}
else:
if (not isinstance(optimizer_opts, dict)):
raise ValueError("'optimizer' must of type dict, callable or torch.optim.Optimizer instance.")
if ('class' not in optimizer_opts):
raise ValueError("'class' field of 'optimizer' dict was not set (use e.g. 'SGD', 'Adam', ...)")
optimizer_opts = optimizer_opts.copy()
optim_class_name = optimizer_opts.pop('class')
optim_class = get_optimizer_class(optim_class_name)
opt_kwargs = optimizer_opts.copy()
optim_class_init_kwargs = _get_class_init_kwargs(optim_class)
if (('eps' in optim_class_init_kwargs) and ('epsilon' in opt_kwargs)):
opt_kwargs['eps'] = opt_kwargs.pop('epsilon')
if ('learning_rate' in opt_kwargs):
raise ValueError("'learning_rate' should be set outside of the 'optimizer' dict.")
lr = (lr * opt_kwargs.pop('learning_rate_multiplier', 1.0))
opt_kwargs['lr'] = lr
param_groups = self._get_optimizer_param_groups(optim_class, opt_kwargs)
optimizer = optim_class(param_groups, **opt_kwargs)
print(('Optimizer: %s' % optimizer), file=log.v1)
assert isinstance(optimizer, torch.optim.Optimizer)
return optimizer
def _create_default_optimizer(self):
'\n :return: SGD optimizer.\n :rtype: torch.optim.SGD\n '
print('Create SGD optimizer (default).', file=log.v2)
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.learning_rate)
return optimizer
def _get_optimizer_param_groups(self, optim_class: Type[torch.optim.Optimizer], optimizer_opts: Dict[(str, Any)]) -> List[Dict[(str, Any)]]:
'\n The weight_decay parameter from AdamW affects the weights of layers such as LayerNorm and Embedding.\n This function creates a blacklist of network modules and splits the optimizer groups in two:\n those who will receive weight decay, and those who won\'t receive it.\n The weight_decay parameter of the rest of the optimizers is L2 regularization.\n\n For further reading, see https://github.com/karpathy/minGPT/pull/24#issuecomment-679316025 and\n https://discuss.pytorch.org/t/weight-decay-in-the-optimizers-is-a-bad-idea-especially-with-batchnorm/16994.\n\n This code is based on https://github.com/karpathy/minGPT (MIT license):\n https://github.com/karpathy/minGPT/blob/3ed14b2cec0dfdad3f4b2831f2b4a86d11aef150/mingpt/model.py#L136.\n\n Three variants how this can be configured by the user in the optimizer options dict:\n\n - ``param_groups_custom``: callable which returns a list of param groups.\n This is the most flexible option, and could also go beyond just weight decay logic,\n or having more than two param groups (weight decay disabled/enabled).\n - ``weight_decay_custom_include_check``: callable which returns True/False for each param,\n to either include it in the weight decay group or not,\n or None to use the default logic.\n - ``weight_decay_modules_blacklist``: list of modules types which should not get weight decay.\n Those can be RF modules or pure PyTorch modules.\n The types can be specified as string (e.g. ``"torch.nn.LayerNorm"``) or as the type itself.\n\n :param optim_class: Optimizer class.\n :param optimizer_opts: Optimizer configuration specified by the user. Might be modified inplace here.\n :return: List of configurations for the different sets of parameters.\n '
custom_param_groups = optimizer_opts.pop('param_groups_custom', None)
if (custom_param_groups is not None):
assert callable(custom_param_groups), f'invalid param_groups_custom {custom_param_groups!r}'
rf_model = pt_module_to_wrapped_rf_module(self.network)
custom_param_groups = custom_param_groups(model=self.network, rf_model=rf_model, optimizer_class=optim_class, optimizer_opts=optimizer_opts)
return custom_param_groups
network_params = self.network.parameters()
assert self.config.bool('decouple_constraints', True), 'L2/weight_decay constraints are decoupled in PyTorch, but decouple_constraints=False was explicitly specified in the config.'
cls_init_kwargs = _get_class_init_kwargs(optim_class)
if ('weight_decay' not in cls_init_kwargs):
assert ('weight_decay' not in optimizer_opts), ('weight_decay not accepted by the chosen optimizer. Accepted values: %s' % ', '.join((('%s' % optim_name) for optim_name in cls_init_kwargs)))
return [{'params': network_params}]
weight_decay = optimizer_opts.get('weight_decay', 0.0)
if (not weight_decay):
return [{'params': network_params}]
wd_params = set()
no_wd_params = set()
blacklist_wd_modules = optimizer_opts.pop('weight_decay_modules_blacklist', None)
if (blacklist_wd_modules is None):
blacklist_wd_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
else:
blacklist_wd_modules = _wrap_user_blacklist_wd_modules(blacklist_wd_modules)
custom_include_check = optimizer_opts.pop('weight_decay_custom_include_check', None)
if custom_include_check:
assert callable(custom_include_check), f'invalid weight_decay_custom_include_check {custom_include_check!r}'
visited_params: Set[RefIdEq[torch.nn.Parameter]] = set()
for (module_name, module) in self.network.named_modules():
module_name: str
module: torch.nn.Module
rf_module = pt_module_to_wrapped_rf_module(module)
for (param_name, param) in module.named_parameters(recurse=False):
param_name: str
param: torch.nn.Parameter
if (RefIdEq(param) in visited_params):
continue
visited_params.add(RefIdEq(param))
full_param_name = (('%s.%s' % (module_name, param_name)) if module_name else param_name)
custom_include = None
if custom_include_check:
custom_include = custom_include_check(module=module, rf_module=rf_module, full_param_name=param_name, param=param)
if (custom_include is not None):
assert isinstance(custom_include, bool), 'weight_decay_custom_include_check did not return bool'
if custom_include:
wd_params.add(full_param_name)
else:
no_wd_params.add(full_param_name)
elif (param_name.endswith('bias') or isinstance(module, blacklist_wd_modules) or isinstance(rf_module, blacklist_wd_modules)):
no_wd_params.add(full_param_name)
else:
wd_params.add(full_param_name)
param_dict = {pn: p for (pn, p) in self.network.named_parameters()}
optim_groups = [{'params': [param_dict[pn] for pn in sorted(list(wd_params))], 'weight_decay': weight_decay}, {'params': [param_dict[pn] for pn in sorted(list(no_wd_params))], 'weight_decay': 0.0}]
return optim_groups
|
def _wrap_user_blacklist_wd_modules(mods: Sequence[Union[(str, Type[rf.Module], Type[torch.nn.Module])]]) -> Tuple[(type, ...)]:
assert isinstance(mods, (list, tuple)), f'invalid blacklist_weight_decay_modules {mods!r}'
res = []
for mod in mods:
if isinstance(mod, str):
assert (mod.startswith('torch.') or mod.startswith('rf.')), f'invalid blacklist_weight_decay_modules {mods!r}'
mod = eval(mod)
assert issubclass(mod, (rf.Module, torch.nn.Module)), f'invalid blacklist_weight_decay_modules {mods!r}'
res.append(mod)
return tuple(res)
|
def gradient_noise_(params: Iterable[torch.nn.Parameter], std: float):
'\n Add gradient noise to parameters, using a truncated normal distribution.\n '
(a, b) = (((- 2) * std), (2 * std))
for param in params:
if (param.requires_grad and (param.grad is not None)):
noise = torch.empty_like(param.grad)
torch.nn.init.trunc_normal_(noise, std=std, a=a, b=b)
param.grad += noise
|
def print_available_devices(*, file: Optional[TextIO]=None):
'\n Print available devices, GPU (CUDA or other), etc.\n\n :param file: where to print to. stdout by default\n '
if (file is None):
file = sys.stdout
cuda_visible_devs = None
if ('CUDA_VISIBLE_DEVICES' in os.environ):
print(('CUDA_VISIBLE_DEVICES is set to %r.' % os.environ['CUDA_VISIBLE_DEVICES']), file=file)
cuda_visible_devs = dict(enumerate([int(d) for d in os.environ['CUDA_VISIBLE_DEVICES'].split(',') if d]))
elif torch.cuda.is_available():
print('CUDA_VISIBLE_DEVICES is not set.', file=file)
if torch.cuda.is_available():
print('Available CUDA devices:')
count = torch.cuda.device_count()
if ((cuda_visible_devs is not None) and (len(cuda_visible_devs) != count)):
print(f'(Mismatch between CUDA device count {count} and CUDA_VISIBLE_DEVICES {cuda_visible_devs} count {len(cuda_visible_devs)}?)', file=file)
for i in range(count):
print(f' {(i + 1)}/{count}: cuda:{i}', file=file)
props = torch.cuda.get_device_properties(i)
print(f' name: {props.name}', file=file)
print(f' total_memory: {human_bytes_size(props.total_memory)}', file=file)
print(f' capability: {props.major}.{props.minor}', file=file)
if (cuda_visible_devs is not None):
if (len(cuda_visible_devs) == count):
dev_idx_s = cuda_visible_devs[i]
else:
dev_idx_s = '?'
else:
dev_idx_s = i
print(f' device_index: {dev_idx_s}', file=file)
if (not count):
print(' (None)')
else:
print('(CUDA not available)')
|
def print_using_cuda_device_report(dev: Union[(str, torch.device)], *, file: Optional[TextIO]=None):
'\n Theano and TensorFlow print sth like: Using gpu device 2: GeForce GTX 980 (...)\n Print in a similar format so that some scripts which grep our stdout work just as before.\n '
if (file is None):
file = sys.stdout
if isinstance(dev, str):
dev = torch.device(dev)
assert (dev.type == 'cuda'), f'expected CUDA device, got {dev}'
if (dev.index is not None):
idx = dev.index
else:
idx = torch.cuda.current_device()
if ('CUDA_VISIBLE_DEVICES' in os.environ):
cuda_visible_devs = dict(enumerate([int(d) for d in os.environ['CUDA_VISIBLE_DEVICES'].split(',') if d]))
idx_s = cuda_visible_devs.get(idx, (torch.cuda.device_count() + idx))
else:
idx_s = idx
print(f'Using gpu device {idx_s}:', torch.cuda.get_device_name(idx), file=file)
|
def diagnose_no_gpu() -> List[str]:
'\n Diagnose why we have no GPU.\n Print to stdout, but also prepare summary strings.\n\n :return: summary strings\n '
res = []
print('CUDA_VISIBLE_DEVICES:', os.environ.get('CUDA_VISIBLE_DEVICES', None))
print('LD_LIBRARY_PATH:', os.environ.get('LD_LIBRARY_PATH', None))
try:
torch.cuda.init()
except Exception as exc:
print('torch.cuda.init() failed:', exc)
better_exchook(*sys.exc_info(), debugshell=False)
res.append(f'torch.cuda.init() failed: {type(exc).__name__} {exc}')
try:
subprocess.check_call(['nvidia-smi'])
except Exception as exc:
print('nvidia-smi failed:', exc)
better_exchook(*sys.exc_info(), debugshell=False)
res.append(f'nvidia-smi failed')
return res
|
class _ScaledGradient(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor, scale: float) -> torch.Tensor:
ctx.scale = scale
return x
@staticmethod
def backward(ctx, grad_output):
return ((grad_output * ctx.scale), None)
|
def scaled_gradient(x: torch.Tensor, scale: float) -> torch.Tensor:
'\n :param x:\n :param scale:\n :return: just x, however, in backward pass, the gradient is scaled by the given factor\n '
return _ScaledGradient.apply(x, scale)
|
class _ScaledGradientExt(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor, scale: Union[(float, torch.Tensor)]=1.0, shift: Optional[Union[(float, torch.Tensor)]]=None, scale_shift_by_sum_over_axis: Optional[int]=None):
ctx.scale = scale
ctx.shift = shift
ctx.scale_shift_by_sum_over_axis = scale_shift_by_sum_over_axis
return x
@staticmethod
def backward(ctx, grad):
grad_out = grad
if (isinstance(ctx.scale, torch.Tensor) or (ctx.scale != 1)):
grad_out = (grad_out * ctx.scale)
if ((ctx.shift is not None) and (isinstance(ctx.shift, torch.Tensor) or (ctx.shift != 0))):
if (ctx.scale_shift_by_sum_over_axis is not None):
m = torch.sum(torch.abs(grad), dim=ctx.scale_shift_by_sum_over_axis, keepdim=True)
grad_out = (grad_out + (ctx.shift * m))
else:
grad_out = (grad_out + ctx.shift)
return (grad_out, None, None, None)
|
def scaled_gradient_ext(x: torch.Tensor, *, scale: Union[(float, torch.Tensor)]=1.0, shift: Optional[Union[(float, torch.Tensor)]]=None, scale_shift_by_sum_over_axis: Optional[int]=None):
'\n :param x:\n :param scale: will scale gradient by this value\n :param shift: will shift gradient by this value\n :param scale_shift_by_sum_over_axis: if given, will scale and shift by the sum over the given axis\n :return: just x, but gradient in backward pass will be transformed accordingly\n '
return _ScaledGradientExt.apply(x, scale, shift, scale_shift_by_sum_over_axis)
|
def parse_py_statement(line):
'\n Parse Python statement into tokens.\n Note that this is incomplete.\n It should be simple and fast and just barely enough for what we need here.\n\n Reference:\n https://docs.python.org/3/reference/lexical_analysis.html\n\n :param str line:\n :return: yields (type, value)\n :rtype: typing.Iterator[typing.Tuple[str,str]]\n '
state = 0
cur_token = ''
spaces = ' \t\n'
ops = '.,;:+-*/%&!=|(){}[]^<>'
i = 0
def _escape_char(_c):
if (_c == 'n'):
return '\n'
elif (_c == 't'):
return '\t'
else:
return _c
while (i < len(line)):
c = line[i]
i += 1
if (state == 0):
if (c in spaces):
pass
elif (c in ops):
(yield ('op', c))
elif (c == '#'):
state = 6
elif (c == '"'):
state = 1
elif (c == "'"):
state = 2
else:
cur_token = c
state = 3
elif (state == 1):
if (c == '\\'):
state = 4
elif (c == '"'):
(yield ('str', cur_token))
cur_token = ''
state = 0
else:
cur_token += c
elif (state == 2):
if (c == '\\'):
state = 5
elif (c == "'"):
(yield ('str', cur_token))
cur_token = ''
state = 0
else:
cur_token += c
elif (state == 3):
if (c in ((spaces + ops) + '#')):
(yield ('id', cur_token))
cur_token = ''
state = 0
i -= 1
elif (c == '"'):
cur_token = ''
state = 1
elif (c == "'"):
cur_token = ''
state = 2
else:
cur_token += c
elif (state == 4):
cur_token += _escape_char(c)
state = 1
elif (state == 5):
cur_token += _escape_char(c)
state = 2
elif (state == 6):
cur_token += c
if (state == 3):
(yield ('id', cur_token))
elif (state == 6):
(yield ('comment', cur_token))
|
def parse_py_statements(source_code):
'\n :param str source_code:\n :return: via :func:`parse_py_statement`\n :rtype: typing.Iterator[typing.Tuple[str,str]]\n '
for line in source_code.splitlines():
for t in parse_py_statement(line):
(yield t)
|
def grep_full_py_identifiers(tokens):
'\n :param typing.Iterable[(str,str)] tokens:\n :rtype: typing.Iterator[str]\n '
global py_keywords
tokens = list(tokens)
i = 0
while (i < len(tokens)):
(token_type, token) = tokens[i]
i += 1
if (token_type != 'id'):
continue
while (((i + 1) < len(tokens)) and (tokens[i] == ('op', '.')) and (tokens[(i + 1)][0] == 'id')):
token += ('.' + tokens[(i + 1)][1])
i += 2
if (token == ''):
continue
if (token in py_keywords):
continue
if (token[0] in '.0123456789'):
continue
(yield token)
|
def set_linecache(filename, source):
'\n The :mod:`linecache` module has some cache of the source code for the current source.\n Sometimes it fails to find the source of some files.\n We can explicitly set the source for some filename.\n\n :param str filename:\n :param str source:\n :return: nothing\n '
import linecache
linecache.cache[filename] = (None, None, [(line + '\n') for line in source.splitlines()], filename)
|
def simple_debug_shell(globals, locals):
'\n :param dict[str] globals:\n :param dict[str] locals:\n :return: nothing\n '
try:
import readline
except ImportError:
pass
compile_string_fn = '<simple_debug_shell input>'
while True:
try:
s = raw_input('> ')
except (KeyboardInterrupt, EOFError):
print(('breaked debug shell: ' + sys.exc_info()[0].__name__))
break
if (s.strip() == ''):
continue
try:
c = compile(s, compile_string_fn, 'single')
except Exception as e:
print(('%s : %s in %r' % (e.__class__.__name__, str(e), s)))
else:
set_linecache(compile_string_fn, s)
try:
ret = eval(c, globals, locals)
except (KeyboardInterrupt, SystemExit):
print(('debug shell exit: ' + sys.exc_info()[0].__name__))
break
except Exception:
print(('Error executing %r' % s))
better_exchook(*sys.exc_info(), autodebugshell=False)
else:
try:
if (ret is not None):
print(ret)
except Exception:
print(('Error printing return value of %r' % s))
better_exchook(*sys.exc_info(), autodebugshell=False)
|
def debug_shell(user_ns, user_global_ns, traceback=None, execWrapper=None):
'\n Spawns some interactive shell. Tries to use IPython if available.\n Falls back to :func:`pdb.post_mortem` or :func:`simple_debug_shell`.\n\n :param dict[str] user_ns:\n :param dict[str] user_global_ns:\n :param traceback:\n :param execWrapper:\n :return: nothing\n '
ipshell = None
try:
import IPython
have_ipython = True
except ImportError:
have_ipython = False
if ((not ipshell) and traceback and have_ipython):
try:
from IPython.core.debugger import Pdb
from IPython.terminal.debugger import TerminalPdb
from IPython.terminal.ipapp import TerminalIPythonApp
ipapp = TerminalIPythonApp.instance()
ipapp.interact = False
ipapp.initialize(argv=[])
def_colors = ipapp.shell.colors
pdb_obj = TerminalPdb(def_colors)
pdb_obj.botframe = None
def ipshell():
'\n Run the IPython shell.\n '
pdb_obj.interaction(None, traceback=traceback)
except Exception:
print('IPython Pdb exception:')
better_exchook(*sys.exc_info(), autodebugshell=False, file=sys.stdout)
if ((not ipshell) and have_ipython):
try:
import IPython
import IPython.terminal.embed
class DummyMod(object):
'Dummy module'
module = DummyMod()
module.__dict__ = user_global_ns
module.__name__ = '_DummyMod'
if ('__name__' not in user_ns):
user_ns = user_ns.copy()
user_ns['__name__'] = '_DummyUserNsMod'
ipshell = IPython.terminal.embed.InteractiveShellEmbed.instance(user_ns=user_ns, user_module=module)
except Exception:
print('IPython not available:')
better_exchook(*sys.exc_info(), autodebugshell=False, file=sys.stdout)
else:
if execWrapper:
old = ipshell.run_code
ipshell.run_code = (lambda code: execWrapper((lambda : old(code))))
if ipshell:
ipshell()
else:
print('Use simple pdb debug shell:')
if traceback:
import pdb
pdb.post_mortem(traceback)
else:
simple_debug_shell(user_global_ns, user_ns)
|
def output_limit():
'\n :return: num chars\n :rtype: int\n '
return 300
|
def fallback_findfile(filename):
'\n :param str filename:\n :return: try to find the full filename, e.g. in modules, etc\n :rtype: str|None\n '
mods = [m for m in list(sys.modules.values()) if (m and getattr(m, '__file__', None) and (filename in m.__file__))]
if (len(mods) == 0):
return None
alt_fn = mods[0].__file__
if (alt_fn[(- 4):(- 1)] == '.py'):
alt_fn = alt_fn[:(- 1)]
if ((not os.path.exists(alt_fn)) and alt_fn.startswith('./')):
alt_fn2 = (_cur_pwd + alt_fn[1:])
if os.path.exists(alt_fn2):
return alt_fn2
for m in ['__main__', 'better_exchook']:
if hasattr(sys.modules.get(m), '__file__'):
alt_fn2 = (os.path.dirname(sys.modules[m].__file__) + alt_fn[1:])
if os.path.exists(alt_fn2):
return alt_fn2
return alt_fn
|
def is_source_code_missing_brackets(source_code, prioritize_missing_open=False):
'\n We check whether this source code snippet (e.g. one line) is complete/even w.r.t. opening/closing brackets.\n\n :param str source_code:\n :param bool prioritize_missing_open: once we found any missing open bracket, directly return -1\n :return: 1 if missing_close, -1 if missing_open, 0 otherwise.\n I.e. whether there are missing open/close brackets.\n E.g. this would mean that you might want to include the prev/next source code line as well in the stack trace.\n :rtype: int\n '
open_brackets = '[{('
close_brackets = ']})'
last_bracket = [(- 1)]
counters = ([0] * len(open_brackets))
missing_open = False
for (t_type, t_content) in list(parse_py_statements(source_code)):
if (t_type != 'op'):
continue
if (t_content in open_brackets):
idx = open_brackets.index(t_content)
counters[idx] += 1
last_bracket.append(idx)
elif (t_content in close_brackets):
idx = close_brackets.index(t_content)
if (last_bracket[(- 1)] == idx):
counters[idx] -= 1
del last_bracket[(- 1)]
else:
if prioritize_missing_open:
return (- 1)
missing_open = True
missing_close = (not all([(c == 0) for c in counters]))
if missing_close:
return 1
if missing_open:
return (- 1)
return 0
|
def is_source_code_missing_open_brackets(source_code):
'\n We check whether this source code snippet (e.g. one line) is complete/even w.r.t. opening/closing brackets.\n\n :param str source_code:\n :return: whether there are missing open brackets.\n E.g. this would mean that you might want to include the previous source code line as well in the stack trace.\n :rtype: bool\n '
return (is_source_code_missing_brackets(source_code, prioritize_missing_open=True) < 0)
|
def get_source_code(filename, lineno, module_globals=None):
'\n :param str filename:\n :param int lineno:\n :param dict[str]|None module_globals:\n :return: source code of that line (including newline)\n :rtype: str\n '
import linecache
linecache.checkcache(filename)
source_code = linecache.getline(filename, lineno, module_globals)
start_line = end_line = lineno
lines = None
while True:
missing_bracket_level = is_source_code_missing_brackets(source_code)
if (missing_bracket_level == 0):
break
if (not lines):
lines = linecache.getlines(filename, module_globals)
if (missing_bracket_level < 0):
start_line -= 1
if (start_line < 1):
break
else:
end_line += 1
if (end_line > len(lines)):
break
source_code = ''.join(lines[(start_line - 1):end_line])
return source_code
|
def str_visible_len(s):
'\n :param str s:\n :return: len without escape chars\n :rtype: int\n '
import re
s = re.sub('[\x1b\x9b][\\[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-PRZcf-nqry=><]', '', s)
return len(s)
|
def add_indent_lines(prefix, s):
'\n :param str prefix:\n :param str s:\n :return: s with prefix indent added to all lines\n :rtype: str\n '
if (not s):
return prefix
prefix_len = str_visible_len(prefix)
lines = s.splitlines(True)
return ''.join(([(prefix + lines[0])] + [((' ' * prefix_len) + line) for line in lines[1:]]))
|
def get_indent_prefix(s):
'\n :param str s:\n :return: the indent spaces of s\n :rtype: str\n '
return s[:(len(s) - len(s.lstrip()))]
|
def get_same_indent_prefix(lines):
'\n :param list[] lines:\n :rtype: str|None\n '
if (not lines):
return ''
prefix = get_indent_prefix(lines[0])
if (not prefix):
return ''
if all([line.startswith(prefix) for line in lines]):
return prefix
return None
|
def remove_indent_lines(s):
'\n :param str s:\n :return: remove as much indentation as possible\n :rtype: str\n '
if (not s):
return ''
lines = s.splitlines(True)
prefix = get_same_indent_prefix(lines)
if (prefix is None):
return ''.join([line.lstrip() for line in lines])
return ''.join([line[len(prefix):] for line in lines])
|
def replace_tab_indent(s, replace=' '):
'\n :param str s: string with tabs\n :param str replace: e.g. 4 spaces\n :rtype: str\n '
prefix = get_indent_prefix(s)
return (prefix.replace('\t', replace) + s[len(prefix):])
|
def replace_tab_indents(s, replace=' '):
'\n :param str s: multi-line string with tabs\n :param str replace: e.g. 4 spaces\n :rtype: str\n '
lines = s.splitlines(True)
return ''.join([replace_tab_indent(line, replace) for line in lines])
|
def to_bool(s, fallback=None):
'\n :param str s: str to be converted to bool, e.g. "1", "0", "true", "false"\n :param T fallback: if s is not recognized as a bool\n :return: boolean value, or fallback\n :rtype: bool|T\n '
if (not s):
return fallback
s = s.lower()
if (s in ['1', 'true', 'yes', 'y']):
return True
if (s in ['0', 'false', 'no', 'n']):
return False
return fallback
|
class Color():
'\n Helper functions provided to perform terminal coloring.\n '
ColorIdxTable = {k: i for (i, k) in enumerate(['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'])}
@classmethod
def get_global_color_enabled(cls):
'\n :rtype: bool\n '
return to_bool(os.environ.get('CLICOLOR', ''), fallback=True)
@classmethod
def is_dark_terminal_background(cls):
'\n :return: Whether we have a dark Terminal background color, or None if unknown.\n We currently just check the env var COLORFGBG,\n which some terminals define like "<foreground-color>:<background-color>",\n and if <background-color> in {0,1,2,3,4,5,6,8}, then we have some dark background.\n There are many other complex heuristics we could do here, which work in some cases but not in others.\n See e.g. `here <https://stackoverflow.com/questions/2507337/terminals-background-color>`__.\n But instead of adding more heuristics, we think that explicitly setting COLORFGBG would be the best thing,\n in case it\'s not like you want it.\n :rtype: bool|None\n '
if os.environ.get('COLORFGBG', None):
parts = os.environ['COLORFGBG'].split(';')
try:
last_number = int(parts[(- 1)])
if ((0 <= last_number <= 6) or (last_number == 8)):
return True
else:
return False
except ValueError:
pass
return None
def __init__(self, enable=None):
'\n :param bool|None enable:\n '
if (enable is None):
enable = self.get_global_color_enabled()
self.enable = enable
self._dark_terminal_background = self.is_dark_terminal_background()
if self._dark_terminal_background:
self.fg_colors = ['yellow', 'red', 'cyan', 'white', 'magenta']
else:
self.fg_colors = ['blue', 'red', 'cyan', 'white', 'magenta']
def color(self, s, color=None, bold=False):
'\n :param str s:\n :param str|None color: sth in self.ColorIdxTable\n :param bool bold:\n :return: s optionally wrapped with ansi escape codes\n :rtype: str\n '
if (not self.enable):
return s
code_seq = []
if color:
code_seq += [(30 + self.ColorIdxTable[color])]
if bold:
code_seq += [1]
if (not code_seq):
return s
start = ('\x1b[%sm' % ';'.join(map(str, code_seq)))
end = '\x1b[0m'
while (s[:1] == ' '):
start = (' ' + start)
s = s[1:]
while (s[(- 1):] == ' '):
end += ' '
s = s[:(- 1)]
return ((start + s) + end)
def __call__(self, *args, **kwargs):
return self.color(*args, **kwargs)
def py_syntax_highlight(self, s):
'\n :param str s:\n :rtype: str\n '
if (not self.enable):
return s
state = 0
spaces = ' \t\n'
ops = '.,;:+-*/%&!=|(){}[]^<>'
i = 0
cur_token = ''
color_args = {0: {}, len(s): {}}
def finish_identifier():
'\n Reset color to standard for current identifier.\n '
if (cur_token in py_keywords):
color_args[max([k for k in color_args.keys() if (k < i)])] = {'color': self.fg_colors[0]}
while (i < len(s)):
c = s[i]
i += 1
if (c == '\n'):
if (state == 3):
finish_identifier()
color_args[i] = {}
state = 0
elif (state == 0):
if (c in spaces):
pass
elif (c in ops):
color_args[(i - 1)] = {'color': self.fg_colors[0]}
color_args[i] = {}
elif (c == '#'):
color_args[(i - 1)] = {'color': self.fg_colors[3]}
state = 6
elif (c == '"'):
color_args[(i - 1)] = {'color': self.fg_colors[2]}
state = 1
elif (c == "'"):
color_args[(i - 1)] = {'color': self.fg_colors[2]}
state = 2
else:
cur_token = c
color_args[(i - 1)] = {}
state = 3
elif (state == 1):
if (c == '\\'):
state = 4
elif (c == '"'):
color_args[i] = {}
state = 0
elif (state == 2):
if (c == '\\'):
state = 5
elif (c == "'"):
color_args[i] = {}
state = 0
elif (state == 3):
if (c in ((spaces + ops) + '#"\'')):
finish_identifier()
color_args[i] = {}
state = 0
i -= 1
else:
cur_token += c
elif (state == 4):
state = 1
elif (state == 5):
state = 2
elif (state == 6):
pass
if (state == 3):
finish_identifier()
out = ''
i = 0
while (i < len(s)):
j = min([k for k in color_args.keys() if (k > i)])
out += self.color(s[i:j], **color_args[i])
i = j
return out
|
class DomTerm():
'\n DomTerm (https://github.com/PerBothner/DomTerm/) is a terminal emulator\n with many extended escape codes, such as folding text away, or even generic HTML.\n We can make use of some of these features (currently just folding text).\n '
_is_domterm = None
@classmethod
def is_domterm(cls):
'\n :return: whether we are inside DomTerm\n :rtype: bool\n '
import os
if (cls._is_domterm is not None):
return cls._is_domterm
if (not os.environ.get('DOMTERM')):
cls._is_domterm = False
return False
cls._is_domterm = True
return True
@contextlib.contextmanager
def logical_block(self, file=sys.stdout):
'\n :param io.TextIOBase|io.StringIO file:\n '
file.write('\x1b]110\x07')
(yield)
file.write('\x1b]111\x07')
@contextlib.contextmanager
def hide_button_span(self, mode, file=sys.stdout):
'\n :param int mode: 1 or 2\n :param io.TextIOBase|io.StringIO file:\n '
file.write(('\x1b[83;%iu' % mode))
(yield)
file.write('\x1b[83;0u')
def indentation(self, file=sys.stdout):
'\n :param io.TextIOBase|io.StringIO file:\n '
file.write('\x1b]114;"│"\x07')
def hide_button(self, file=sys.stdout):
'\n :param io.TextIOBase|io.StringIO file:\n '
file.write('\x1b[16u▶▼\x1b[17u')
@contextlib.contextmanager
def _temp_replace_attrib(self, obj, attr, new_value):
old_value = getattr(obj, attr)
setattr(obj, attr, new_value)
(yield old_value)
setattr(obj, attr, old_value)
@contextlib.contextmanager
def fold_text_stream(self, prefix, postfix='', hidden_stream=None, **kwargs):
'\n :param str prefix: always visible\n :param str postfix: always visible, right after.\n :param io.TextIOBase|io.StringIO hidden_stream: sys.stdout by default.\n If this is sys.stdout, it will replace that stream,\n and collect the data during the context (in the `with` block).\n '
import io
if (hidden_stream is None):
hidden_stream = sys.stdout
assert isinstance(hidden_stream, io.IOBase)
assert (hidden_stream is sys.stdout), 'currently not supported otherwise'
hidden_buf = io.StringIO()
with self._temp_replace_attrib(sys, 'stdout', hidden_buf):
(yield)
self.fold_text(prefix=prefix, postfix=postfix, hidden=hidden_buf.getvalue(), **kwargs)
def fold_text(self, prefix, hidden, postfix='', file=None, align=0):
'\n :param str prefix: always visible\n :param str hidden: hidden\n If this is sys.stdout, it will replace that stream,\n and collect the data during the context (in the `with` block).\n :param str postfix: always visible, right after. "" by default.\n :param io.TextIOBase|io.StringIO file: sys.stdout by default.\n :param int align: remove this number of initial chars from hidden\n '
if (file is None):
file = sys.stdout
if ('\n' in hidden):
if (hidden[:1] != '\n'):
hidden = ('\n' + hidden)
if (hidden[(- 1):] == '\n'):
hidden = hidden[:(- 1)]
postfix += '\n'
if self.is_domterm():
with self.logical_block(file=file):
self.indentation(file=file)
self.hide_button(file=file)
file.write(prefix)
if prefix.endswith('\x1b[0m'):
file.write(' ')
with self.hide_button_span(2, file=file):
hidden_ls = hidden.split('\n')
hidden_ls = [s[align:] for s in hidden_ls]
hidden = '\x1b]118\x07'.join(hidden_ls)
file.write(hidden)
else:
file.write(prefix)
file.write(hidden.replace('\n', '\n '))
file.write(postfix)
file.flush()
def fold_text_string(self, prefix, hidden, **kwargs):
'\n :param str prefix:\n :param str hidden:\n :param kwargs: passed to :func:`fold_text`\n :rtype: str\n '
import io
output_buf = io.StringIO()
self.fold_text(prefix=prefix, hidden=hidden, file=output_buf, **kwargs)
return output_buf.getvalue()
|
def is_at_exit():
'\n Some heuristics to figure out whether this is called at a stage where the Python interpreter is shutting down.\n\n :return: whether the Python interpreter is currently in the process of shutting down\n :rtype: bool\n '
if (_threading_main_thread is not None):
if (not hasattr(threading, 'main_thread')):
return True
if (threading.main_thread() != _threading_main_thread):
return True
if (not _threading_main_thread.is_alive()):
return True
return False
|
class _OutputLinesCollector():
def __init__(self, color):
'\n :param Color color:\n '
self.color = color
self.lines = []
self.dom_term = (DomTerm() if DomTerm.is_domterm() else None)
def __call__(self, s1, s2=None, **kwargs):
'\n Adds to self.lines.\n This strange function signature is for historical reasons.\n\n :param str s1:\n :param str|None s2:\n :param kwargs: passed to self.color\n '
if kwargs:
s1 = self.color(s1, **kwargs)
if (s2 is not None):
s1 = add_indent_lines(s1, s2)
self.lines.append((s1 + '\n'))
@contextlib.contextmanager
def fold_text_ctx(self, line):
'\n Folds text, via :class:`DomTerm`, if available.\n Notes that this temporarily overwrites self.lines.\n\n :param str line: always visible\n '
if (not self.dom_term):
self.__call__(line)
(yield)
return
(self.lines, old_lines) = ([], self.lines)
(yield)
(self.lines, new_lines) = (old_lines, self.lines)
hidden_text = ''.join(new_lines)
import io
output_buf = io.StringIO()
prefix = ''
while (line[:1] == ' '):
prefix += ' '
line = line[1:]
self.dom_term.fold_text(line, hidden=hidden_text, file=output_buf, align=len(prefix))
output_text = (prefix[1:] + output_buf.getvalue())
self.lines.append(output_text)
def _pp_extra_info(self, obj, depth_limit=3):
'\n :param typing.Any|typing.Sized obj:\n :param int depth_limit:\n :rtype: str\n '
if hasattr(obj, 'shape'):
return ''
s = []
if hasattr(obj, '__len__'):
try:
if ((type(obj) in (str, unicode, list, tuple, dict)) and (len(obj) <= 5)):
pass
else:
s += [('len = ' + str(obj.__len__()))]
except Exception:
pass
if ((depth_limit > 0) and hasattr(obj, '__getitem__')):
try:
if (type(obj) in (str, unicode)):
pass
else:
subobj = obj.__getitem__(0)
extra_info = self._pp_extra_info(subobj, (depth_limit - 1))
if (extra_info != ''):
s += [(('_[0]: {' + extra_info) + '}')]
except Exception:
pass
return ', '.join(s)
def pretty_print(self, obj):
'\n :param object obj:\n :rtype: str\n '
s = repr(obj)
limit = output_limit()
if (len(s) > limit):
if self.dom_term:
s = self.color.py_syntax_highlight(s)
s = self.dom_term.fold_text_string('', s)
else:
s = s[:(limit - 3)]
s = self.color.py_syntax_highlight(s)
s += '...'
else:
s = self.color.py_syntax_highlight(s)
extra_info = self._pp_extra_info(obj)
if (extra_info != ''):
s += (', ' + self.color.py_syntax_highlight(extra_info))
return s
|
def format_tb(tb=None, limit=None, allLocals=None, allGlobals=None, withTitle=False, with_color=None, with_vars=None):
'\n :param types.TracebackType|types.FrameType|StackSummary tb: traceback. if None, will use sys._getframe\n :param int|None limit: limit the traceback to this number of frames. by default, will look at sys.tracebacklimit\n :param dict[str]|None allLocals: if set, will update it with all locals from all frames\n :param dict[str]|None allGlobals: if set, will update it with all globals from all frames\n :param bool withTitle:\n :param bool|None with_color: output with ANSI escape codes for color\n :param bool with_vars: will print var content which are referenced in the source code line. by default enabled.\n :return: list of strings (line-based)\n :rtype: list[str]\n '
color = Color(enable=with_color)
output = _OutputLinesCollector(color=color)
def format_filename(s):
'\n :param str s:\n :rtype: str\n '
base = os.path.basename(s)
return ((color(('"' + s[:(- len(base))]), color.fg_colors[2]) + color(base, color.fg_colors[2], bold=True)) + color('"', color.fg_colors[2]))
format_py_obj = output.pretty_print
if (tb is None):
try:
tb = get_current_frame()
assert tb
except Exception:
output(color('format_tb: tb is None and sys._getframe() failed', color.fg_colors[1], bold=True))
return output.lines
def is_stack_summary(_tb):
'\n :param StackSummary|object _tb:\n :rtype: bool\n '
return isinstance(_tb, StackSummary)
isframe = inspect.isframe
if withTitle:
if (isframe(tb) or is_stack_summary(tb)):
output(color('Traceback (most recent call first):', color.fg_colors[0]))
else:
output(color('Traceback (most recent call last):', color.fg_colors[0]))
if ((with_vars is None) and is_at_exit()):
with_vars = False
if withTitle:
output('(Exclude vars because we are exiting.)')
if (with_vars is None):
if any([(f.f_code.co_name == '__del__') for f in iter_traceback()]):
with_vars = False
if withTitle:
output('(Exclude vars because we are on a GC stack.)')
if (with_vars is None):
with_vars = True
try:
if (limit is None):
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
n = 0
_tb = tb
class NotFound(Exception):
'\n Identifier not found.\n '
def _resolve_identifier(namespace, keys):
'\n :param dict[str] namespace:\n :param tuple[str] keys:\n :return: namespace[name[0]][name[1]]...\n '
if (keys[0] not in namespace):
raise NotFound()
obj = namespace[keys[0]]
for part in keys[1:]:
obj = getattr(obj, part)
return obj
def _try_set(old, prefix, func):
'\n :param None|str old:\n :param str prefix:\n :param func:\n :return: old\n '
if (old is not None):
return old
try:
return add_indent_lines(prefix, func())
except NotFound:
return old
except Exception as e:
return ((((prefix + '!') + e.__class__.__name__) + ': ') + str(e))
while ((_tb is not None) and ((limit is None) or (n < limit))):
if isframe(_tb):
f = _tb
elif is_stack_summary(_tb):
if isinstance(_tb[0], ExtendedFrameSummary):
f = _tb[0].tb_frame
else:
f = DummyFrame.from_frame_summary(_tb[0])
else:
f = _tb.tb_frame
if (allLocals is not None):
allLocals.update(f.f_locals)
if (allGlobals is not None):
allGlobals.update(f.f_globals)
if hasattr(_tb, 'tb_lineno'):
lineno = _tb.tb_lineno
elif is_stack_summary(_tb):
lineno = _tb[0].lineno
else:
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
if (not os.path.isfile(filename)):
alt_fn = fallback_findfile(filename)
if alt_fn:
filename = alt_fn
name = get_func_str_from_code_object(co)
file_descr = ''.join([' ', color('File ', color.fg_colors[0], bold=True), format_filename(filename), ', ', color('line ', color.fg_colors[0]), color(('%d' % lineno), color.fg_colors[4]), ', ', color('in ', color.fg_colors[0]), name])
with output.fold_text_ctx(file_descr):
source_code = get_source_code(filename, lineno, f.f_globals)
if source_code:
source_code = remove_indent_lines(replace_tab_indents(source_code)).rstrip()
output(' line: ', color.py_syntax_highlight(source_code), color=color.fg_colors[0])
if (not with_vars):
pass
elif (isinstance(f, DummyFrame) and (not f.have_vars_available)):
pass
else:
with output.fold_text_ctx(color(' locals:', color.fg_colors[0])):
already_printed_locals = set()
for token_str in grep_full_py_identifiers(parse_py_statement(source_code)):
splitted_token = tuple(token_str.split('.'))
for token in [splitted_token[0:i] for i in range(1, (len(splitted_token) + 1))]:
if (token in already_printed_locals):
continue
token_value = None
token_value = _try_set(token_value, color('<local> ', color.fg_colors[0]), (lambda : format_py_obj(_resolve_identifier(f.f_locals, token))))
token_value = _try_set(token_value, color('<global> ', color.fg_colors[0]), (lambda : format_py_obj(_resolve_identifier(f.f_globals, token))))
token_value = _try_set(token_value, color('<builtin> ', color.fg_colors[0]), (lambda : format_py_obj(_resolve_identifier(f.f_builtins, token))))
token_value = (token_value or color('<not found>', color.fg_colors[0]))
prefix = ((' %s ' % color('.', color.fg_colors[0], bold=True).join(token)) + color('= ', color.fg_colors[0], bold=True))
output(prefix, token_value)
already_printed_locals.add(token)
if (len(already_printed_locals) == 0):
output(color(' no locals', color.fg_colors[0]))
else:
output(color(' -- code not available --', color.fg_colors[0]))
if isframe(_tb):
_tb = _tb.f_back
elif is_stack_summary(_tb):
_tb = StackSummary.from_list(_tb[1:])
if (not _tb):
_tb = None
else:
_tb = _tb.tb_next
n += 1
except Exception:
output(color('ERROR: cannot get more detailed exception info because:', color.fg_colors[1], bold=True))
import traceback
for line in traceback.format_exc().split('\n'):
output((' ' + line))
return output.lines
|
def print_tb(tb, file=None, **kwargs):
'\n :param types.TracebackType|types.FrameType|StackSummary tb:\n :param io.TextIOBase|io.StringIO|typing.TextIO|None file: stderr by default\n :return: nothing, prints to ``file``\n '
if (file is None):
file = sys.stderr
for line in format_tb(tb=tb, **kwargs):
file.write(line)
file.flush()
|
def better_exchook(etype, value, tb, debugshell=False, autodebugshell=True, file=None, with_color=None, with_preamble=True):
'\n Replacement for sys.excepthook.\n\n :param etype: exception type\n :param value: exception value\n :param tb: traceback\n :param bool debugshell: spawn a debug shell at the context of the exception\n :param bool autodebugshell: if env DEBUG is an integer != 0, it will spawn a debug shell\n :param io.TextIOBase|io.StringIO|typing.TextIO|None file: output stream where we will print the traceback\n and exception information. stderr by default.\n :param bool|None with_color: whether to use ANSI escape codes for colored output\n :param bool with_preamble: print a short preamble for the exception\n '
if (file is None):
file = sys.stderr
color = Color(enable=with_color)
output = _OutputLinesCollector(color=color)
rec_args = dict(autodebugshell=False, file=file, with_color=with_color, with_preamble=with_preamble)
if getattr(value, '__cause__', None):
better_exchook(type(value.__cause__), value.__cause__, value.__cause__.__traceback__, **rec_args)
output('')
output('The above exception was the direct cause of the following exception:')
output('')
elif getattr(value, '__context__', None):
better_exchook(type(value.__context__), value.__context__, value.__context__.__traceback__, **rec_args)
output('')
output('During handling of the above exception, another exception occurred:')
output('')
def format_filename(s):
'\n :param str s:\n :rtype: str\n '
base = os.path.basename(s)
return ((color(('"' + s[:(- len(base))]), color.fg_colors[2]) + color(base, color.fg_colors[2], bold=True)) + color('"', color.fg_colors[2]))
if with_preamble:
output(color('EXCEPTION', color.fg_colors[1], bold=True))
(all_locals, all_globals) = ({}, {})
if (tb is not None):
output.lines.extend(format_tb(tb=tb, allLocals=all_locals, allGlobals=all_globals, withTitle=True, with_color=color.enable))
else:
output(color('better_exchook: traceback unknown', color.fg_colors[1]))
if isinstance(value, SyntaxError):
filename = value.filename
file_descr = ''.join([' ', color('File ', color.fg_colors[0], bold=True), format_filename(filename), ', ', color('line ', color.fg_colors[0]), color(('%d' % value.lineno), color.fg_colors[4])])
with output.fold_text_ctx(file_descr):
if (not os.path.isfile(filename)):
alt_fn = fallback_findfile(filename)
if alt_fn:
output((color(" -- couldn't find file, trying this instead: ", color.fg_colors[0]) + format_filename(alt_fn)))
filename = alt_fn
source_code = get_source_code(filename, value.lineno)
if source_code:
source_code = replace_tab_indents(source_code)
lines = source_code.splitlines(True)
indent_prefix = get_same_indent_prefix(lines)
if (indent_prefix is None):
indent_prefix = ''
source_code = ''.join([line[len(indent_prefix):] for line in lines])
source_code = source_code.rstrip()
prefix = ' line: '
output(prefix, color.py_syntax_highlight(source_code), color=color.fg_colors[0])
output(((' ' * (((len(prefix) + value.offset) - len(indent_prefix)) - 1)) + '^'), color=color.fg_colors[4])
import types
def _some_str(value):
'\n :param object value:\n :rtype: str\n '
try:
return str(value)
except Exception:
return ('<unprintable %s object>' % type(value).__name__)
def _format_final_exc_line(etype, value):
value_str = _some_str(value)
if ((value is None) or (not value_str)):
line = color(('%s' % etype), color.fg_colors[1])
else:
line = (color(('%s' % etype), color.fg_colors[1]) + (': %s' % (value_str,)))
return line
if (isinstance(etype, BaseException) or (hasattr(types, 'InstanceType') and isinstance(etype, types.InstanceType)) or (etype is None) or (type(etype) is str)):
output(_format_final_exc_line(etype, value))
else:
output(_format_final_exc_line(etype.__name__, value))
for line in output.lines:
file.write(line)
file.flush()
if autodebugshell:
try:
debugshell = (int(os.environ['DEBUG']) != 0)
except Exception:
pass
if debugshell:
output('---------- DEBUG SHELL -----------')
debug_shell(user_ns=all_locals, user_global_ns=all_globals, traceback=tb)
|
def dump_all_thread_tracebacks(exclude_thread_ids=None, file=None):
'\n Prints the traceback of all threads.\n\n :param set[int]|list[int]|None exclude_thread_ids: threads to exclude\n :param io.TextIOBase|io.StringIO|typing.TextIO|None file: output stream\n '
if (exclude_thread_ids is None):
exclude_thread_ids = []
if (not file):
file = sys.stdout
import threading
if hasattr(sys, '_current_frames'):
print('', file=file)
threads = {t.ident: t for t in threading.enumerate()}
for (tid, stack) in sys._current_frames().items():
if (tid in exclude_thread_ids):
continue
if (tid not in threads):
continue
tags = []
thread = threads.get(tid)
if thread:
assert isinstance(thread, threading.Thread)
if (thread is threading.currentThread()):
tags += ['current']
if isinstance(thread, threading._MainThread):
tags += ['main']
tags += [str(thread)]
else:
tags += [('unknown with id %i' % tid)]
print(('Thread %s:' % ', '.join(tags)), file=file)
print_tb(stack, file=file)
print('', file=file)
print('That were all threads.', file=file)
else:
print('Does not have sys._current_frames, cannot get thread tracebacks.', file=file)
|
def get_current_frame():
'\n :return: current frame object (excluding this function call)\n :rtype: types.FrameType\n\n Uses sys._getframe if available, otherwise some trickery with sys.exc_info and a dummy exception.\n '
if hasattr(sys, '_getframe'):
return sys._getframe(1)
try:
raise ZeroDivisionError
except ZeroDivisionError:
return sys.exc_info()[2].tb_frame.f_back
|
def get_func_str_from_code_object(co, frame=None):
'\n :param types.CodeType co:\n :param types.FrameType|None frame: if given, might provide a faster way to get the function name\n :return: co.co_name as fallback, but maybe sth better like the full func name if possible\n :rtype: str\n '
f = get_func_from_code_object(co, frame=frame)
if f:
if hasattr(f, '__qualname__'):
return f.__qualname__
return str(f)
return co.co_name
|
def get_func_from_code_object(co, frame=None):
'\n :param types.CodeType co:\n :param types.FrameType|None frame: if given, might provide a faster way to get the function name\n :return: function, such that ``func.__code__ is co``, or None\n :rtype: types.FunctionType\n\n This is CPython specific (to some degree; it uses the `gc` module to find references).\n Inspired from:\n https://stackoverflow.com/questions/12787108/getting-the-python-function-for-a-code-object\n https://stackoverflow.com/questions/54656758/get-function-object-from-stack-frame-object\n '
import gc
import types
assert isinstance(co, (types.CodeType, DummyFrame))
_attr_name = ('__code__' if PY3 else 'func_code')
if frame:
func_name = frame.f_code.co_name
if ('self' in frame.f_locals):
candidate = getattr(frame.f_locals['self'].__class__, func_name, None)
if (candidate and ((getattr(candidate, _attr_name, None) is co) or isinstance(co, DummyFrame))):
return candidate
try:
candidate = getattr(_get_loaded_module_from_filename(co.co_filename), co.co_name, None)
except ImportError:
candidate = None
if (candidate and ((getattr(candidate, _attr_name, None) is co) or isinstance(co, DummyFrame))):
return candidate
if isinstance(co, DummyFrame):
return None
candidates = gc.get_referrers(co)
candidates = [f for f in candidates if (getattr(f, _attr_name, None) is co)]
if candidates:
return candidates[0]
return None
|
def _get_loaded_module_from_filename(filename):
'\n Like inspect.getmodule but faster.\n\n :param str filename:\n :rtype: types.ModuleType|Any|None\n '
if (filename.endswith('.pyc') or filename.endswith('.pyo')):
filename = filename[:(- 1)]
if (filename in _loaded_module_from_filename_cache):
return sys.modules.get(_loaded_module_from_filename_cache[filename])
for (modname, module) in sys.modules.copy().items():
f = getattr(module, '__file__', None)
if f:
if (f.endswith('.pyc') or f.endswith('.pyo')):
f = f[:(- 1)]
_loaded_module_from_filename_cache[f] = modname
if (filename in _loaded_module_from_filename_cache):
return sys.modules.get(_loaded_module_from_filename_cache[filename])
return None
|
def iter_traceback(tb=None, enforce_most_recent_call_first=False):
'\n Iterates a traceback of various formats:\n - traceback (types.TracebackType)\n - frame object (types.FrameType)\n - stack summary (traceback.StackSummary)\n\n :param types.TracebackType|types.FrameType|StackSummary|None tb: traceback. if None, will use sys._getframe\n :param bool enforce_most_recent_call_first:\n Frame or stack summery: most recent call first (top of the stack is the first entry in the result)\n Traceback: most recent call last\n If True, and we get traceback, will unroll and reverse, such that we have always the most recent call first.\n :return: yields the frames (types.FrameType)\n :rtype: list[types.FrameType|DummyFrame]\n '
if (tb is None):
tb = get_current_frame()
def is_stack_summary(_tb):
'\n :param StackSummary|object _tb:\n :rtype: bool\n '
return isinstance(_tb, StackSummary)
is_frame = inspect.isframe
is_traceback = inspect.istraceback
assert (is_traceback(tb) or is_frame(tb) or is_stack_summary(tb))
if (is_traceback(tb) and enforce_most_recent_call_first):
frames = list(iter_traceback(tb))
for frame in frames[::(- 1)]:
(yield frame)
return
_tb = tb
while (_tb is not None):
if is_frame(_tb):
frame = _tb
elif is_stack_summary(_tb):
if isinstance(_tb[0], ExtendedFrameSummary):
frame = _tb[0].tb_frame
else:
frame = DummyFrame.from_frame_summary(_tb[0])
else:
frame = _tb.tb_frame
(yield frame)
if is_frame(_tb):
_tb = _tb.f_back
elif is_stack_summary(_tb):
_tb = StackSummary.from_list(_tb[1:])
if (not _tb):
_tb = None
else:
_tb = _tb.tb_next
|
class ExtendedFrameSummary(FrameSummary):
'\n Extends :class:`FrameSummary` by ``self.tb_frame``.\n '
def __init__(self, frame, **kwargs):
super(ExtendedFrameSummary, self).__init__(**kwargs)
self.tb_frame = frame
|
class DummyFrame():
'\n This class has the same attributes as a code and a frame object\n and is intended to be used as a dummy replacement.\n '
@classmethod
def from_frame_summary(cls, f):
'\n :param FrameSummary f:\n :rtype: DummyFrame\n '
return cls(filename=f.filename, lineno=f.lineno, name=f.name, f_locals=f.locals)
def __init__(self, filename, lineno, name, f_locals=None, f_globals=None, f_builtins=None):
self.lineno = lineno
self.tb_lineno = lineno
self.f_lineno = lineno
self.f_code = self
self.filename = filename
self.co_filename = filename
self.name = name
self.co_name = name
self.f_locals = (f_locals or {})
self.f_globals = (f_globals or {})
self.f_builtins = (f_builtins or {})
self.have_vars_available = ((f_locals is not None) or (f_globals is not None) or (f_builtins is not None))
|
def _StackSummary_extract(frame_gen, limit=None, lookup_lines=True, capture_locals=False):
'\n Replacement for :func:`StackSummary.extract`.\n\n Create a StackSummary from a traceback or stack object.\n Very simplified copy of the original StackSummary.extract().\n We want always to capture locals, that is why we overwrite it.\n Additionally, we also capture the frame.\n This is a bit hacky and also not like this is originally intended (to not keep refs).\n\n :param frame_gen: A generator that yields (frame, lineno) tuples to\n include in the stack.\n :param limit: None to include all frames or the number of frames to\n include.\n :param lookup_lines: If True, lookup lines for each frame immediately,\n otherwise lookup is deferred until the frame is rendered.\n :param capture_locals: If True, the local variables from each frame will\n be captured as object representations into the FrameSummary.\n '
result = StackSummary()
for (f, lineno) in frame_gen:
co = f.f_code
filename = co.co_filename
name = co.co_name
result.append(ExtendedFrameSummary(frame=f, filename=filename, lineno=lineno, name=name, lookup_line=False))
return result
|
def install():
'\n Replaces sys.excepthook by our better_exchook.\n '
sys.excepthook = better_exchook
|
def replace_traceback_format_tb():
'\n Replaces these functions from the traceback module by our own:\n\n - traceback.format_tb\n - traceback.StackSummary.format\n - traceback.StackSummary.extract\n\n Note that this kind of monkey patching might not be safe under all circumstances\n and is not officially supported by Python.\n '
import traceback
traceback.format_tb = format_tb
if hasattr(traceback, 'StackSummary'):
traceback.StackSummary.format = format_tb
traceback.StackSummary.extract = _StackSummary_extract
|
class StandardBytePairEncoder():
'\n Code is partly taken from subword-nmt/apply_bpe.py.\n Author: Rico Sennrich, code under MIT license.\n\n Use operations learned with learn_bpe.py to encode a new text.\n The text will not be smaller, but use only a fixed vocabulary, with rare words\n encoded as variable-length sequences of subword units.\n\n Reference:\n Rico Sennrich, Barry Haddow and Alexandra Birch (2016). Neural Machine Translation of Rare Words with Subword Units.\n Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany.\n\n '
def __init__(self, bpe_codes_file, labels=None):
'\n :param str bpe_codes_file: codes file\n :param list[str]|None labels: vocab\n '
self.labels = labels
self._load(bpe_codes_file)
self._bpe_encode_cache = {}
self._bpe_separator = BpeMergeSymbol
_file_cache = {}
def _load(self, bpe_codes_file):
'\n Load BPE codes from file\n\n :param str bpe_codes_file: codes file\n '
if (bpe_codes_file in self._file_cache):
(self._bpe_file_version, self._bpe_codes, self._bpe_codes_reverse) = self._file_cache[bpe_codes_file]
return
bpe_file_first_line = open(bpe_codes_file, 'r').readline()
if bpe_file_first_line.startswith('#version:'):
self._bpe_file_version = tuple([int(x) for x in re.sub('(\\.0+)*$', '', bpe_file_first_line.split()[(- 1)]).split('.')])
else:
self._bpe_file_version = (0, 1)
self._bpe_codes = [tuple(item.split()) for item in open(bpe_codes_file, 'rb').read().decode('utf8').splitlines()]
self._bpe_codes = dict([(code, i) for (i, code) in reversed(list(enumerate(self._bpe_codes)))])
self._bpe_codes_reverse = dict([((pair[0] + pair[1]), pair) for (pair, i) in self._bpe_codes.items()])
self._file_cache[bpe_codes_file] = (self._bpe_file_version, self._bpe_codes, self._bpe_codes_reverse)
@staticmethod
def _get_pairs(word):
'\n :param tuple[str] word: represented as tuple of symbols (symbols being variable-length strings)\n :return: set of symbol pairs in a word\n :rtype: set[(str,str)]\n '
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def _encode_word(self, orig):
'\n Encode word based on list of BPE merge operations, which are applied consecutively.\n :param str orig:\n :rtype: tuple[str]\n '
if (orig in self._bpe_encode_cache):
return self._bpe_encode_cache[orig]
if (self._bpe_file_version == (0, 1)):
word = (tuple(orig) + ('</w>',))
elif (self._bpe_file_version == (0, 2)):
word = (tuple(orig[:(- 1)]) + ((orig[(- 1)] + '</w>'),))
else:
raise NotImplementedError
pairs = self._get_pairs(word)
if (not pairs):
return orig
while True:
bigram = min(pairs, key=(lambda pair: self._bpe_codes.get(pair, float('inf'))))
if (bigram not in self._bpe_codes):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except ValueError:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = self._get_pairs(word)
if (word[(- 1)] == '</w>'):
word = word[:(- 1)]
elif word[(- 1)].endswith('</w>'):
word = (word[:(- 1)] + (word[(- 1)].replace('</w>', ''),))
if self.labels:
word = self._check_vocab_and_split(word, self._bpe_codes_reverse, self.labels, self._bpe_separator)
self._bpe_encode_cache[orig] = word
return word
def _check_vocab_and_split(self, orig, bpe_codes, vocab, separator):
'\n Check for each segment in word if it is in-vocabulary,\n and segment OOV segments into smaller units by reversing the BPE merge operations\n '
out = []
for segment in orig[:(- 1)]:
if ((segment + separator) in vocab):
out.append(segment)
else:
for item in self._recursive_split(segment, bpe_codes, vocab, separator, False):
out.append(item)
segment = orig[(- 1)]
if (segment in vocab):
out.append(segment)
else:
for item in self._recursive_split(segment, bpe_codes, vocab, separator, True):
out.append(item)
return out
def _recursive_split(self, segment, bpe_codes, vocab, separator, final=False):
'Recursively split segment into smaller units (by reversing BPE merges)\n until all units are either in-vocabulary, or cannot be split further.'
try:
if final:
(left, right) = bpe_codes[(segment + '</w>')]
right = right[:(- 4)]
else:
(left, right) = bpe_codes[segment]
except Exception:
(yield segment)
return
if ((left + separator) in vocab):
(yield left)
else:
for item in self._recursive_split(left, bpe_codes, vocab, separator, False):
(yield item)
if ((final and (right in vocab)) or ((not final) and ((right + separator) in vocab))):
(yield right)
else:
for item in self._recursive_split(right, bpe_codes, vocab, separator, final):
(yield item)
def segment_sentence(self, sentence):
'\n Segment single sentence (whitespace-tokenized string) with BPE encoding.\n\n :param str sentence:\n :rtype: list[str]\n '
output = []
found_category = False
skip_category = False
for word in sentence.split():
if ((word[0] == '$') and (len(word) > 1)):
found_category = True
output.append(word)
elif ((found_category is True) and (word[0] == '{')):
skip_category = True
output.append(word)
elif ((skip_category is True) and (word[0] != '}')):
output.append(word)
else:
found_category = False
skip_category = False
new_word = self._encode_word(word)
for item in new_word[:(- 1)]:
output.append((item + self._bpe_separator))
output.append(new_word[(- 1)])
return output
|
class PrefixTree():
'\n Prefix tree / trie.\n This class represents both a single node and the tree.\n '
def __init__(self, prefix='', root=None):
'\n :param str prefix:\n :param PrefixTree|None root:\n '
self.prefix = prefix
self.arcs = {}
self.finished = False
self.bpe_finished = False
self.is_root = (not root)
self.root = root
def add(self, postfix, root=None):
'\n :param str postfix:\n :param None|PrefixTree root:\n :rtype: PrefixTree\n '
if (not root):
if self.is_root:
root = self
else:
assert self.root
root = self.root
if (postfix == BpeMergeSymbol):
arc = postfix
postfix_ = ''
else:
arc = postfix[:1]
postfix_ = postfix[1:]
if (arc in self.arcs):
child = self.arcs[arc]
else:
child = PrefixTree(root=root, prefix=(self.prefix + arc))
self.arcs[arc] = child
if ((arc == BpeMergeSymbol) and (not postfix_)):
self.bpe_finished = True
if postfix_:
return child.add(postfix_, root=root)
else:
child.finished = True
return child
|
class Hyp():
'\n Represents a hypothesis in the search.\n '
def __init__(self, bpe_sym_history, cur_node):
'\n :param list[str] bpe_sym_history:\n :param PrefixTree cur_node:\n '
self.bpe_sym_history = bpe_sym_history
self.cur_node = cur_node
|
class CharSyncSearch():
'\n Covers the search hyps and the search itself.\n '
def __init__(self, bpe, word, word_pos=0):
'\n :param PrefixTree bpe:\n :param str word:\n :param int word_pos:\n '
self.bpe = bpe
self.word = word
self.word_pos = word_pos
self.hyps = [Hyp(bpe_sym_history=[], cur_node=bpe)]
self.final_bpe_seqs = None
def _get_finished(self):
assert (self.word_pos == len(self.word))
finals = []
for hyp in self.hyps:
if hyp.cur_node.finished:
finals.append((hyp.bpe_sym_history + [hyp.cur_node.prefix]))
self.final_bpe_seqs = finals
def _expand(self):
assert (self.word_pos < len(self.word))
char = self.word[self.word_pos]
new_hyps = []
for hyp in self.hyps:
if hyp.cur_node.bpe_finished:
next_node = self.bpe.arcs.get(char)
if next_node:
new_hyps.append(Hyp(bpe_sym_history=(hyp.bpe_sym_history + [(hyp.cur_node.prefix + BpeMergeSymbol)]), cur_node=next_node))
next_node = hyp.cur_node.arcs.get(char)
if next_node:
new_hyps.append(Hyp(bpe_sym_history=hyp.bpe_sym_history, cur_node=next_node))
self.hyps = new_hyps
def search(self):
'\n :return: collection of possible BPE symbol seqs\n :rtype: list[list[str]]\n '
while (self.word_pos < len(self.word)):
self._expand()
self.word_pos += 1
self._get_finished()
return self.final_bpe_seqs
|
class HypInPos():
'\n Represents a hypothesis in the search.\n '
def __init__(self, bpe_sym_history, cur_node, pos):
'\n :param list[str] bpe_sym_history:\n :param PrefixTree cur_node:\n :param int pos:\n '
self.bpe_sym_history = bpe_sym_history
self.cur_node = cur_node
self.pos = pos
|
class DepthFirstSearch():
'\n Depth-first search.\n '
def __init__(self, bpe, word, sampler=None):
'\n :param PrefixTree bpe:\n :param str word:\n :param (()->bool)|None sampler:\n '
self.bpe = bpe
self.word = word
self.sampler = sampler
self.hyps = []
self.final_bpe_seq = None
self._add_hyp(HypInPos(bpe_sym_history=[], cur_node=bpe, pos=0))
def _add_hyp(self, hyp):
'\n :param HypInPos hyp:\n '
if (hyp.pos >= len(self.word)):
if hyp.cur_node.finished:
self.final_bpe_seq = (hyp.bpe_sym_history + [hyp.cur_node.prefix])
else:
self.hyps.append(hyp)
def _expand(self):
hyp = self.hyps.pop((- 1))
char = self.word[hyp.pos]
new_hyps = []
if hyp.cur_node.bpe_finished:
next_node = self.bpe.arcs.get(char)
if next_node:
new_hyps.append(HypInPos(bpe_sym_history=(hyp.bpe_sym_history + [(hyp.cur_node.prefix + BpeMergeSymbol)]), cur_node=next_node, pos=(hyp.pos + 1)))
next_node = hyp.cur_node.arcs.get(char)
if next_node:
new_hyps.append(HypInPos(bpe_sym_history=hyp.bpe_sym_history, cur_node=next_node, pos=(hyp.pos + 1)))
if (self.sampler and self.sampler()):
new_hyps = list(reversed(new_hyps))
for hyp in new_hyps:
self._add_hyp(hyp)
def search(self):
'\n :return: BPE symbol seq if one is found\n :rtype: list[str]|None\n '
while (self.hyps and (self.final_bpe_seq is None)):
self._expand()
return self.final_bpe_seq
|
class SamplingBytePairEncoder():
'\n Will randomly sample from any possible BPE split.\n '
def __init__(self, labels, breadth_prob, rnd, unknown_label=None):
'\n :param list[str] labels: vocab\n :param float breadth_prob: 1.0 will lead to breadth-first search, 0.0 to depth-first search.\n other values are stochastic.\n :param numpy.random.RandomState rnd:\n :param str|None unknown_label:\n '
self.labels = labels
self.unknown_label = unknown_label
if unknown_label:
assert (unknown_label in self.labels)
self.breadth_prob = breadth_prob
self.rnd = rnd
bpe = PrefixTree()
for bpe_sym in labels:
bpe.add(bpe_sym)
self._bpe_prefix_tree = bpe
def _sampler(self):
return (self.rnd.random_sample() <= self.breadth_prob)
def get_bpe_split_for_word(self, word):
'\n :param str word:\n :rtype: list[str]|None\n '
return DepthFirstSearch(bpe=self._bpe_prefix_tree, word=word, sampler=self._sampler).search()
def segment_sentence(self, sentence):
'\n Segment single sentence (whitespace-tokenized string) with BPE encoding.\n\n :param str sentence:\n :rtype: list[str]\n '
output = []
for word in sentence.split():
bpe_sym_seq = self.get_bpe_split_for_word(word)
if (bpe_sym_seq is None):
if self.unknown_label:
output.append(self.unknown_label)
continue
else:
raise Exception(('no BPE-split for word %r' % word))
output.extend(bpe_sym_seq)
return output
|
def _demo():
import sys
import os
my_dir = os.path.dirname(os.path.abspath(__file__))
root_dir = os.path.dirname(os.path.dirname(my_dir))
assert os.path.exists(('%s/returnn/__init__.py' % root_dir))
sys.path.insert(0, root_dir)
from returnn.util import better_exchook
better_exchook.install()
import argparse
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--vocab', required=True)
arg_parser.add_argument('--unk')
arg_parser.add_argument('--input', help='text. if not given, will read from stdin')
arg_parser.add_argument('--seed', type=int, default=0)
arg_parser.add_argument('--all', action='store_true')
arg_parser.add_argument('--breadth-prob', type=float, default=0.0, help='1.0 will lead to breadth-first search, 0.0 to depth-first search. other values are stochastic.')
args = arg_parser.parse_args()
from returnn.datasets.util.vocabulary import Vocabulary
vocab = Vocabulary(vocab_file=args.vocab, unknown_label=None)
rnd = numpy.random.RandomState(args.seed)
if args.input:
bpe_prefix_tree = PrefixTree()
for bpe_sym in vocab.labels:
bpe_prefix_tree.add(bpe_sym)
def _sampler():
return (rnd.random_sample() <= args.breadth_prob)
for word in args.input.split():
if args.all:
bpe_sym_seqs = CharSyncSearch(bpe=bpe_prefix_tree, word=word).search()
print(('%s: %s' % (word, bpe_sym_seqs)))
else:
greedy = DepthFirstSearch(bpe=bpe_prefix_tree, word=word, sampler=_sampler).search()
print(('%s: %s' % (word, ' '.join(greedy))))
return
bpe = SamplingBytePairEncoder(labels=vocab.labels, breadth_prob=args.breadth_prob, rnd=rnd, unknown_label=args.unk)
print('Reading from stdin:')
while True:
try:
line = sys.stdin.readline()
if (line == ''):
return
except KeyboardInterrupt:
return
line = line.strip()
print(' '.join(bpe.segment_sentence(line)))
|
def auto_exclude_all_new_threads(func):
'\n :param T func:\n :return: func wrapped\n :rtype: T\n '
def wrapped(*args, **kwargs):
'\n :param args:\n :param kwargs:\n :return:\n '
old_threads = set(sys._current_frames().keys())
res = func(*args, **kwargs)
new_threads = set(sys._current_frames().keys())
new_threads -= old_threads
global_exclude_thread_ids.update(new_threads)
return res
return wrapped
|
def dump_all_thread_tracebacks(exclude_thread_ids=None, exclude_self=False):
'\n :param set[int] exclude_thread_ids:\n :param bool exclude_self:\n '
if (exclude_thread_ids is None):
exclude_thread_ids = set()
from returnn.util.better_exchook import print_tb
import threading
if exclude_self:
exclude_thread_ids = set((list(exclude_thread_ids) + [threading.current_thread().ident]))
if hasattr(sys, '_current_frames'):
print('')
threads = {t.ident: t for t in threading.enumerate()}
for (tid, stack) in sorted(sys._current_frames().items()):
if (tid not in threads):
continue
tags = []
thread_ = threads.get(tid)
if thread_:
assert isinstance(thread_, threading.Thread)
if (thread_ is threading.currentThread()):
tags += ['current']
if isinstance(thread_, threading._MainThread):
tags += ['main']
tags += [str(thread_)]
else:
tags += [('unknown with id %i' % tid)]
print(('Thread %s:' % ', '.join(tags)))
if (tid in global_exclude_thread_ids):
print('(Auto-ignored traceback.)')
elif (tid in exclude_thread_ids):
print('(Excluded thread.)')
else:
print_tb(stack, file=sys.stdout)
print('')
print('That were all threads.')
else:
print('Does not have sys._current_frames, cannot get thread tracebacks.')
|
def setup_warn_with_traceback():
'\n Installs some hook for ``warnings.showwarning``.\n '
import warnings
from returnn.util.better_exchook import print_tb
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
'\n :param message:\n :param category:\n :param filename:\n :param lineno:\n :param file:\n :param line:\n '
log = (file if hasattr(file, 'write') else sys.stderr)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
print_tb(sys._getframe(), file=log)
warnings.showwarning = warn_with_traceback
|
def init_better_exchook():
'\n Installs our own ``sys.excepthook``, which uses :mod:`better_exchook`,\n but adds some special handling for the main thread.\n '
from returnn.util.better_exchook import better_exchook
def excepthook(exc_type, exc_obj, exc_tb):
'\n :param exc_type:\n :param exc_obj:\n :param exc_tb:\n '
try:
is_main_thread = isinstance(threading.currentThread(), threading._MainThread)
except Exception:
if (exc_type is KeyboardInterrupt):
return
else:
if is_main_thread:
if ((exc_type is KeyboardInterrupt) and getattr(sys, 'exited', False)):
return
sys.exited = True
print(('Unhandled exception %s in thread %s, proc %i.' % (exc_type, threading.currentThread(), os.getpid())))
if (exc_type is KeyboardInterrupt):
return
if isinstance(threading.currentThread(), threading._MainThread):
main_thread_id = thread.get_ident()
if (not isinstance(exc_type, Exception)):
dump_all_thread_tracebacks(exclude_thread_ids={main_thread_id})
better_exchook(exc_type, exc_obj, exc_tb, file=sys.stdout)
sys.excepthook = excepthook
from returnn.util.basic import to_bool
if (os.environ.get('DEBUG_WARN_WITH_TRACEBACK') and to_bool(os.environ.get('DEBUG_WARN_WITH_TRACEBACK'))):
setup_warn_with_traceback()
|
def format_signum(signum):
'\n :param int signum:\n :return: string "signum (signame)"\n :rtype: str\n '
return ('%s (%s)' % (signum, signum_to_signame.get(signum, 'unknown')))
|
def signal_handler(signum, frame):
'\n Prints a message on stdout and dump all thread stacks.\n\n :param int signum: e.g. signal.SIGUSR1\n :param frame: ignored, will dump all threads\n '
print(('Signal handler: got signal %s' % format_signum(signum)))
dump_all_thread_tracebacks()
|
def install_signal_handler_if_default(signum, exceptions_are_fatal=False):
'\n :param int signum: e.g. signal.SIGUSR1\n :param bool exceptions_are_fatal: if True, will reraise any exceptions. if False, will just print a message\n :return: True iff no exception, False otherwise. not necessarily that we registered our own handler\n :rtype: bool\n '
try:
if (signal.getsignal(signum) == signal.SIG_DFL):
signal.signal(signum, signal_handler)
return True
except Exception as exc:
if exceptions_are_fatal:
raise
print(('Cannot install signal handler for signal %s, exception %s' % (format_signum(signum), exc)))
return False
|
def _get_native_signal_handler_lib_filename() -> str:
'\n :return: path to our native_signal_handler lib. see :func:`install_native_signal_handler`\n '
global _native_signal_handler_lib_filename
if _native_signal_handler_lib_filename:
return _native_signal_handler_lib_filename
from returnn.util.basic import NativeCodeCompiler
import textwrap
native = NativeCodeCompiler(base_name='native_signal_handler', code_version=1, code=textwrap.dedent(' #include <stdio.h>\n #include <execinfo.h>\n #include <signal.h>\n #include <stdlib.h>\n #include <unistd.h>\n\n\n // https://github.com/ruby/ruby/blob/bbfd735b887/vm_core.h#L118\n #if defined(NSIG_MAX) /* POSIX issue 8 */\n # undef NSIG\n # define NSIG NSIG_MAX\n #elif defined(_SIG_MAXSIG) /* FreeBSD */\n # undef NSIG\n # define NSIG _SIG_MAXSIG\n #elif defined(_SIGMAX) /* QNX */\n # define NSIG (_SIGMAX + 1)\n #elif defined(NSIG) /* 99% of everything else */\n # /* take it */\n #else /* Last resort */\n # define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)\n #endif\n\n\n sig_t old_signal_handler[NSIG];\n\n\n void signal_handler(int sig) {\n void *array[16 * 1024];\n size_t size;\n\n // get void*\'s for all entries on the stack\n size = backtrace(array, sizeof(array)/sizeof(array[0]));\n\n // print out all the frames to stderr\n fprintf(stderr, "Signal handler: signal %d:\\n", sig);\n backtrace_symbols_fd(array, size, STDERR_FILENO);\n\n // call previous handler\n signal(sig, old_signal_handler[sig]);\n raise(sig);\n }\n\n void install_signal_handler() {\n old_signal_handler[SIGSEGV] = signal(SIGSEGV, signal_handler);\n old_signal_handler[SIGBUS] = signal(SIGBUS, signal_handler);\n old_signal_handler[SIGILL] = signal(SIGILL, signal_handler);\n old_signal_handler[SIGABRT] = signal(SIGABRT, signal_handler);\n old_signal_handler[SIGFPE] = signal(SIGFPE, signal_handler);\n }\n '), is_cpp=False)
_native_signal_handler_lib_filename = native.get_lib_filename()
return _native_signal_handler_lib_filename
|
def install_native_signal_handler(*, reraise_exceptions: bool=False):
'\n Installs some own custom C signal handler.\n '
try:
import ctypes
lib = ctypes.CDLL(_get_native_signal_handler_lib_filename())
lib.install_signal_handler.return_type = None
lib.install_signal_handler()
print('Installed native_signal_handler.so.')
except Exception as exc:
print(('installNativeSignalHandler exception: %s' % exc))
if reraise_exceptions:
raise
|
def install_lib_sig_segfault():
'\n Installs libSegFault (common on Unix/Linux).\n '
try:
os.environ.setdefault('SEGFAULT_SIGNALS', 'all')
import ctypes
import ctypes.util
libfn = ctypes.util.find_library('SegFault')
assert libfn, 'libSegFault not found'
ctypes.CDLL(libfn)
print('Installed libSegFault.so.')
except Exception as exc:
print(('installLibSigSegfault exception: %s' % exc))
|
def init_faulthandler(sigusr1_chain=False):
'\n Maybe installs signal handlers, SIGUSR1 and SIGUSR2 and others.\n If no signals handlers are installed yet for SIGUSR1/2, we try to install our own Python handler.\n This also tries to install the handler from the fauldhandler module,\n esp for SIGSEGV and others.\n\n :param bool sigusr1_chain: whether the default SIGUSR1 handler should also be called.\n '
install_native_signal_handler()
if (sys.platform != 'win32'):
if install_signal_handler_if_default(signal.SIGUSR1):
sigusr1_chain = True
install_signal_handler_if_default(signal.SIGUSR2)
try:
import faulthandler
except ImportError as e:
print(('faulthandler import error. %s' % e))
else:
if (not faulthandler.is_enabled()):
faulthandler.enable()
if (sys.platform != 'win32'):
faulthandler.register(signal.SIGUSR1, all_threads=True, chain=sigusr1_chain)
|
@auto_exclude_all_new_threads
def init_ipython_kernel():
'\n Runs IPython in some background kernel, where you can connect to.\n '
try:
import IPython.kernel.zmq.ipkernel
from IPython.kernel.zmq.ipkernel import Kernel
from IPython.kernel.zmq.heartbeat import Heartbeat
from IPython.kernel.zmq.session import Session
from IPython.kernel import write_connection_file
import zmq
from zmq.eventloop import ioloop
from zmq.eventloop.zmqstream import ZMQStream
IPython.kernel.zmq.ipkernel.signal = (lambda sig, f: None)
except ImportError as e:
print(('IPython import error, cannot start IPython kernel. %s' % e))
return
import atexit
import socket
import logging
import threading
assert isinstance(threading.currentThread(), threading._MainThread)
try:
ip = socket.gethostbyname(socket.gethostname())
connection_file = ('ipython-kernel-%s-%s.json' % (ip, os.getpid()))
def cleanup_connection_file():
'\n Cleanup.\n '
try:
os.remove(connection_file)
except (IOError, OSError):
pass
atexit.register(cleanup_connection_file)
logger = logging.Logger('IPython')
logger.addHandler(logging.NullHandler())
session = Session(username='kernel')
context = zmq.Context.instance()
transport = 'tcp'
addr = ('%s://%s' % (transport, ip))
shell_socket = context.socket(zmq.ROUTER)
shell_port = shell_socket.bind_to_random_port(addr)
iopub_socket = context.socket(zmq.PUB)
iopub_port = iopub_socket.bind_to_random_port(addr)
control_socket = context.socket(zmq.ROUTER)
control_port = control_socket.bind_to_random_port(addr)
hb_ctx = zmq.Context()
heartbeat = Heartbeat(hb_ctx, (transport, ip, 0))
hb_port = heartbeat.port
heartbeat.start()
shell_stream = ZMQStream(shell_socket)
control_stream = ZMQStream(control_socket)
kernel = Kernel(session=session, shell_streams=[shell_stream, control_stream], iopub_socket=iopub_socket, log=logger)
write_connection_file(connection_file, shell_port=shell_port, iopub_port=iopub_port, control_port=control_port, hb_port=hb_port, ip=ip)
except Exception as e:
print(('Exception while initializing IPython ZMQ kernel. %s' % e))
return
def ipython_thread():
'\n IPython thread.\n '
kernel.start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
thread_ = threading.Thread(target=ipython_thread, name='IPython kernel')
thread_.daemon = True
thread_.start()
|
def init_cuda_not_in_main_proc_check():
'\n Installs some hook to Theano which checks that CUDA is only used in the main proc.\n '
import theano.sandbox.cuda as cuda
if (cuda.use.device_number is not None):
print(('CUDA already initialized in proc %i' % os.getpid()))
return
use_original = cuda.use
def use_wrapped(device, **kwargs):
'\n :param device:\n :param kwargs:\n '
print(('CUDA.use %s in proc %i' % (device, os.getpid())))
use_original(device=device, **kwargs)
cuda.use = use_wrapped
cuda.use.device_number = None
|
def debug_shell(user_ns: Optional[Dict[(str, Any)]]=None, user_global_ns: Optional[Dict[(str, Any)]]=None, exit_afterwards: bool=True):
'\n Provides some interactive Python shell.\n Uses IPython if possible.\n Wraps to ``better_exchook.debug_shell``.\n\n :param user_ns:\n :param user_global_ns:\n :param exit_afterwards: will do sys.exit(1) at the end\n '
print('Debug shell:')
from returnn.util.basic import ObjAsDict
from . import debug_helpers
user_global_ns_new = dict(ObjAsDict(debug_helpers).items())
if user_global_ns:
user_global_ns_new.update(user_global_ns)
user_global_ns_new['debug'] = debug_helpers
print('Available debug functions/utils (via DebugHelpers):')
for (k, v) in sorted(vars(debug_helpers).items()):
if (k[:1] == '_'):
continue
print((' %s (%s)' % (k, type(v))))
print("Also DebugHelpers available as 'debug'.")
if (not user_ns):
user_ns = {}
if user_ns:
print('Locals:')
for (k, v) in sorted(user_ns.items()):
print((' %s (%s)' % (k, type(v))))
from returnn.util.better_exchook import debug_shell
debug_shell(user_ns, user_global_ns_new)
if exit_afterwards:
print('Debug shell exit. Exit now.')
sys.exit(1)
|
def literal_eval(s):
'\n This can be used as an alternative to ``ast.literal_eval``.\n In contrast to ``ast.literal_eval``, it also accepts bytes,\n and it should be ~5x faster.\n\n :param str|bytes s:\n :return: any object\n '
raw_pickle = py_to_pickle(s)
return pickle.loads(raw_pickle)
|
def py_to_pickle(s):
'\n :param str|bytes s:\n :rtype: bytes\n '
if isinstance(s, bytes):
in_bytes = s
else:
assert isinstance(s, str)
in_bytes = s.encode('utf8')
in_ = ctypes.create_string_buffer(in_bytes)
in_len = len(in_bytes)
out_len = (in_len + 1000)
out_ = ctypes.create_string_buffer(out_len)
global _lib
if (not _lib):
_lib = ctypes.CDLL(_get_native_lib_filename())
_lib.py_to_pickle.argtypes = (ctypes.c_char_p, ctypes.c_size_t, ctypes.c_char_p, ctypes.c_size_t)
_lib.py_to_pickle.restype = ctypes.c_int
res = _lib.py_to_pickle(in_, in_len, out_, out_len)
assert (res == 0), 'there was some error'
return out_.raw
|
def _get_native_lib_filename():
'\n :return: path to our patch_atfork lib. see :func:`maybe_restart_returnn_with_atfork_patch`\n :rtype: str\n '
global _native_lib_filename
if _native_lib_filename:
return _native_lib_filename
native = NativeCodeCompiler(base_name='pytopickle', code_version=1, code=open(_native_cpp_filename).read(), is_cpp=True, c_macro_defines={'LIB': 1})
_native_lib_filename = native.get_lib_filename()
return _native_lib_filename
|
def next_power_of_two(n: int) -> int:
'next power of two, >= n'
return (2 ** int((n - 1)).bit_length())
|
class NativeCodeCompiler(object):
'\n Helper class to compile native C/C++ code on-the-fly.\n '
CacheDirName = 'returnn_native'
CollectedCompilers = None
def __init__(self, base_name, code_version, code, is_cpp=True, c_macro_defines=None, ld_flags=None, include_paths=(), include_deps=None, static_version_name=None, should_cleanup_old_all=True, should_cleanup_old_mydir=False, use_cxx11_abi=False, log_stream=None, verbose=False):
'\n :param str base_name: base name for the module, e.g. "zero_out"\n :param int|tuple[int] code_version: check for the cache whether to reuse\n :param str code: the source code itself\n :param bool is_cpp: if False, C is assumed\n :param dict[str,str|int|None]|None c_macro_defines: e.g. {"TENSORFLOW": 1}\n :param list[str]|None ld_flags: e.g. ["-lblas"]\n :param list[str]|tuple[str] include_paths:\n :param list[str]|None include_deps: if provided and an existing lib file,\n we will check if any dependency is newer\n and we need to recompile. we could also do it automatically via -MD but that seems overkill and too slow.\n :param str|None static_version_name: normally, we use .../base_name/hash as the dir\n but this would use .../base_name/static_version_name.\n :param bool should_cleanup_old_all: whether we should look in the cache dir\n and check all ops if we can delete some old ones which are older than some limit\n (self._cleanup_time_limit_days)\n :param bool should_cleanup_old_mydir: whether we should delete our op dir before we compile there.\n :param typing.TextIO|None log_stream: file stream for print statements\n :param bool verbose: be slightly more verbose\n '
if (self.CollectedCompilers is not None):
self.CollectedCompilers.append(self)
self.verbose = verbose
self.cache_dir = ('%s/%s' % (util.get_cache_dir(), self.CacheDirName))
self._include_paths = list(include_paths)
self.base_name = base_name
self.code_version = code_version
self.code = code
self.is_cpp = is_cpp
self.c_macro_defines = {k: v for (k, v) in (c_macro_defines or {}).items() if (v is not None)}
self.ld_flags = (ld_flags or [])
self.include_deps = include_deps
self.static_version_name = static_version_name
self._code_hash = self._make_code_hash()
self._info_dict = self._make_info_dict()
self._hash = self._make_hash()
self._ctypes_lib = None
if should_cleanup_old_all:
self._cleanup_old()
self._should_cleanup_old_mydir = should_cleanup_old_mydir
self.use_cxx11_abi = use_cxx11_abi
self._log_stream = log_stream
if self.verbose:
print(('%s: %r' % (self.__class__.__name__, self)), file=log_stream)
def __repr__(self):
return ('<%s %r in %r>' % (self.__class__.__name__, self.base_name, self._mod_path))
@property
def _mod_path(self):
return ('%s/%s/%s' % (self.cache_dir, self.base_name, (self.static_version_name or self._hash[:10])))
@property
def _info_filename(self):
return ('%s/info.py' % (self._mod_path,))
@property
def _so_filename(self):
return ('%s/%s.so' % (self._mod_path, self.base_name))
@property
def _c_filename(self):
if self.is_cpp:
return ('%s/%s.cc' % (self._mod_path, self.base_name))
return ('%s/%s.c' % (self._mod_path, self.base_name))
_cleanup_time_limit_days = 60
def _cleanup_old(self):
mod_path = self._mod_path
base_mod_path = os.path.dirname(mod_path)
my_mod_path_name = os.path.basename(mod_path)
if (not os.path.exists(base_mod_path)):
return
import time
cleanup_time_limit_secs = (((self._cleanup_time_limit_days * 24) * 60) * 60)
for p in os.listdir(base_mod_path):
if (p == my_mod_path_name):
continue
full_dir_path = ('%s/%s' % (base_mod_path, p))
if (not os.path.isdir(full_dir_path)):
continue
lock = util.LockFile(full_dir_path)
if lock.is_locked():
continue
lock.maybe_remove_old_lockfile()
info_path = ('%s/info.py' % full_dir_path)
if (not os.path.exists(info_path)):
self._cleanup_old_path(full_dir_path, reason='corrupt dir, missing info.py')
continue
so_path = ('%s/%s.so' % (full_dir_path, self.base_name))
if (not os.path.exists(so_path)):
self._cleanup_old_path(full_dir_path, reason='corrupt dir, missing so')
continue
dt = (time.time() - os.path.getmtime(so_path))
if (dt > cleanup_time_limit_secs):
self._cleanup_old_path(full_dir_path, reason=('%s old' % util.hms(dt)))
def _cleanup_old_path(self, p, reason):
print(('%s delete old, %s: %s' % (self.__class__.__name__, reason, p)))
assert os.path.exists(p)
import shutil
try:
shutil.rmtree(p)
except OSError as exc:
print(('%s delete exception (%s). Will ignore and try to continue anyway.' % (self.__class__.__name__, exc)))
def _load_info(self):
'\n :rtype: dict[str]|None\n '
filename = self._info_filename
if (not os.path.exists(filename)):
return None
s = open(filename).read()
res = eval(s)
assert isinstance(res, dict)
return res
_relevant_info_keys = ('code_version', 'code_hash', 'c_macro_defines', 'ld_flags', 'compiler_bin', 'platform')
def _make_info_dict(self):
'\n :rtype: dict[str]\n '
import platform
return {'base_name': self.base_name, 'include_paths': self._include_paths, 'code_version': self.code_version, 'code_hash': self._code_hash, 'c_macro_defines': self.c_macro_defines, 'ld_flags': self.ld_flags, 'compiler_bin': self._get_compiler_bin(), 'platform': platform.platform()}
def _make_code_hash(self):
import hashlib
h = hashlib.md5()
h.update(self.code.encode('utf8'))
return h.hexdigest()
def _make_hash(self):
import hashlib
h = hashlib.md5()
h.update('{'.encode('utf8'))
for key in self._relevant_info_keys:
h.update(('%s:{%s}' % (key, self._info_dict[key])).encode('utf8'))
h.update('}'.encode('utf8'))
return h.hexdigest()
def _save_info(self):
filename = self._info_filename
with open(filename, 'w') as f:
f.write(('%s\n' % util.better_repr(self._info_dict)))
def _need_recompile(self):
'\n :rtype: bool\n '
if (not os.path.exists(self._so_filename)):
return True
if self.include_deps:
so_mtime = os.path.getmtime(self._so_filename)
for fn in self.include_deps:
if (os.path.getmtime(fn) > so_mtime):
return True
old_info = self._load_info()
new_info = self._make_info_dict()
if (not old_info):
return True
for key in self._relevant_info_keys:
if (key not in old_info):
return True
if (old_info[key] != new_info[key]):
return True
return False
def _maybe_compile(self):
'\n On successful return, self._so_filename should exist and be up-to-date.\n '
if (not self._need_recompile()):
if self.verbose:
print(('%s: No need to recompile: %s' % (self.__class__.__name__, self._so_filename)))
os.utime(self._info_filename, None)
return
lock = util.LockFile(self._mod_path)
if (not self._need_recompile()):
if self.verbose:
print(('%s: No need to recompile after we waited: %s' % (self.__class__.__name__, self._so_filename)))
os.utime(self._info_filename, None)
return
if (self._should_cleanup_old_mydir and (not lock.is_locked())):
if os.path.exists(self._mod_path):
self._cleanup_old_path(self._mod_path, reason='need recompile')
with lock:
self._maybe_compile_inner()
def _get_compiler_bin(self):
'\n :rtype: str\n '
if self.is_cpp:
return 'g++'
return 'gcc'
def _transform_compiler_opts(self, opts):
'\n :param list[str] opts:\n :rtype: list[str]\n '
return opts
def _extra_common_opts(self):
'\n :rtype: list[str]\n '
if self.is_cpp:
return ['-std=c++11']
return []
@classmethod
def _transform_ld_flag(cls, opt):
'\n :param str opt:\n :rtype: str\n '
if (sys.platform == 'darwin'):
if (opt.startswith('-l:lib') and opt.endswith('.dylib')):
return ('-l%s' % opt[len('-l:lib'):(- len('.dylib'))])
return opt
def _maybe_compile_inner(self):
assert os.path.exists(self._mod_path)
with open(self._c_filename, 'w') as f:
f.write(self.code)
common_opts = ['-shared', '-O2']
common_opts += self._extra_common_opts()
if (sys.platform == 'darwin'):
common_opts += ['-undefined', 'dynamic_lookup']
for include_path in self._include_paths:
common_opts += ['-I', include_path]
compiler_opts = ['-fPIC', '-v']
common_opts += self._transform_compiler_opts(compiler_opts)
common_opts += [('-D_GLIBCXX_USE_CXX11_ABI=%i' % (1 if self.use_cxx11_abi else 0))]
common_opts += [('-D%s=%s' % item) for item in sorted(self.c_macro_defines.items())]
common_opts += ['-g']
opts = (common_opts + [self._c_filename, '-o', self._so_filename])
opts += list(map(self._transform_ld_flag, self.ld_flags))
cmd_bin = self._get_compiler_bin()
cmd_args = ([cmd_bin] + opts)
from subprocess import Popen, PIPE, STDOUT, CalledProcessError
print(('%s call: %s' % (self.__class__.__name__, ' '.join(cmd_args))), file=self._log_stream)
proc = Popen(cmd_args, cwd=self._mod_path, stdout=PIPE, stderr=STDOUT)
(stdout, stderr) = proc.communicate()
assert (stderr is None)
if (proc.returncode != 0):
print(('%s: %s failed.' % (self.__class__.__name__, cmd_bin)))
print('Original stdout/stderr:')
print(stdout.decode('utf8'))
print()
if (cmd_bin.endswith('/nvcc') and (b'error: constexpr function return is non-constant' in stdout)):
print('This might be the error: https://github.com/tensorflow/tensorflow/issues/22766')
print()
if (cmd_bin.endswith('/nvcc') and (b'gcc versions later than' in stdout)):
print('Your GCC version might be too new. This is a problem with some nvcc versions.')
print()
raise CalledProcessError(returncode=proc.returncode, cmd=cmd_args)
assert os.path.exists(self._so_filename)
with open(('%s/compile.log' % self._mod_path), 'wb') as f:
if self.verbose:
print(('%s: write compile log to: %s' % (self.__class__.__name__, f.name)))
f.write(('+ %s\n' % ' '.join(cmd_args)).encode('utf8'))
f.write(stdout)
self._save_info()
assert (not self._need_recompile())
def load_lib_ctypes(self):
'\n :rtype: ctypes.CDLL\n '
if self._ctypes_lib:
return self._ctypes_lib
self._maybe_compile()
import ctypes
self._ctypes_lib = ctypes.cdll.LoadLibrary(self._so_filename)
return self._ctypes_lib
def get_lib_filename(self):
'\n :rtype: str\n '
self._maybe_compile()
return self._so_filename
|
def pprint(obj: Any, *, file=None, prefix='', postfix='', line_prefix='', line_postfix='\n') -> None:
'\n Pretty-print a Python object.\n '
if (file is None):
file = sys.stdout
if (('\n' in line_postfix) and (_type_simplicity_score(obj) <= _type_simplicity_limit)):
prefix = f'{line_prefix}{prefix}'
line_prefix = ''
postfix = (postfix + line_postfix)
line_postfix = ''
def _sub_pprint(obj_: Any, prefix_='', postfix_='', inc_indent=True):
multi_line = ('\n' in line_postfix)
if ((not multi_line) and postfix_.endswith(',')):
postfix_ += ' '
pprint(obj_, file=file, prefix=prefix_, postfix=postfix_, line_prefix=((line_prefix + (' ' * inc_indent)) if multi_line else ''), line_postfix=line_postfix)
def _print(s: str, is_end: bool=False):
nonlocal prefix
file.write(line_prefix)
file.write(prefix)
file.write(s)
if is_end:
file.write(postfix)
file.write(line_postfix)
if ('\n' in line_postfix):
file.flush()
prefix = ''
def _print_list():
for (i_, v_) in enumerate(obj):
_sub_pprint(v_, postfix_=(',' if (i_ < (len(obj) - 1)) else ''))
if isinstance(obj, list):
if (len(obj) == 0):
_print('[]', is_end=True)
return
_print('[')
_print_list()
_print(']', is_end=True)
return
if isinstance(obj, tuple):
if (len(obj) == 0):
_print('()', is_end=True)
return
if (len(obj) == 1):
_sub_pprint(obj[0], prefix_=f'{prefix}(', postfix_=f',){postfix}', inc_indent=False)
return
_print('(')
_print_list()
_print(')', is_end=True)
return
if isinstance(obj, set):
if (len(obj) == 0):
_print('set()', is_end=True)
return
_print('{')
_print_list()
_print('}', is_end=True)
return
if isinstance(obj, dict):
if (len(obj) == 0):
_print('{}', is_end=True)
return
_print('{')
for (i, (k, v)) in enumerate(obj.items()):
_sub_pprint(v, prefix_=f'{k!r}: ', postfix_=(',' if (i < (len(obj) - 1)) else ''))
_print('}', is_end=True)
return
if isinstance(obj, numpy.ndarray):
_sub_pprint(obj.tolist(), prefix_=f'{prefix}numpy.array(', postfix_=f', dtype=numpy.{obj.dtype}){postfix}', inc_indent=False)
return
_print(repr(obj), is_end=True)
|
def pformat(obj: Any) -> str:
'\n Pretty-format a Python object.\n '
import io
s = io.StringIO()
pprint(obj, file=s)
return s.getvalue()
|
def _type_simplicity_score(obj: Any, _offset=0.0) -> float:
'\n :param Any obj:\n :param float _offset:\n :return: a score, which is a very rough estimate of len(repr(o)), calculated efficiently\n '
_spacing = 2.0
if isinstance(obj, bool):
return (4.0 + _offset)
if isinstance(obj, (int, numpy.integer)):
if (obj == 0):
return (1.0 + _offset)
return ((1.0 + numpy.log10(abs(obj))) + _offset)
if isinstance(obj, str):
return ((2.0 + len(obj)) + _offset)
if isinstance(obj, (float, complex, numpy.number)):
return (len(repr(obj)) + _offset)
if isinstance(obj, (tuple, list, set)):
for x in obj:
_offset = _type_simplicity_score(x, _offset=(_offset + _spacing))
if (_offset > _type_simplicity_limit):
break
return _offset
if isinstance(obj, dict):
for x in obj.values():
_offset = _type_simplicity_score(x, _offset=((_offset + 10.0) + _spacing))
if (_offset > _type_simplicity_limit):
break
return _offset
if isinstance(obj, numpy.ndarray):
_offset += 10.0
if (((obj.size * 2.0) + _offset) > _type_simplicity_limit):
return ((obj.size * 2.0) + _offset)
if str(obj.dtype).startswith('int'):
a = (_type_simplicity_score(numpy.max(numpy.abs(obj))) + _spacing)
return ((obj.size * a) + _offset)
a = (max([_type_simplicity_score(x) for x in obj.flatten()]) + _spacing)
return ((obj.size * a) + _offset)
return ((_type_simplicity_limit + 1.0) + _offset)
|
class PyExtModCompiler(NativeCodeCompiler):
'\n Python extension module compiler\n '
CacheDirName = 'returnn_py_ext_mod_cache'
def __init__(self, include_paths=(), **kwargs):
py_compile_vars = sysconfig.get_config_vars()
include_paths = (list(include_paths) + [py_compile_vars['INCLUDEPY']])
super().__init__(include_paths=include_paths, **kwargs)
self._py_compile_vars = py_compile_vars
self._py_mod = None
_relevant_info_keys = (NativeCodeCompiler._relevant_info_keys + ('py_version',))
def _extra_common_opts(self):
base_flags = super()._extra_common_opts()
py_compile_flags = (self._py_compile_vars['CFLAGS'].split() if self._py_compile_vars['CFLAGS'] else [])
return (base_flags + py_compile_flags)
def _make_info_dict(self):
d = super()._make_info_dict()
d.update({'py_version': sys.version_info[:2]})
return d
def load_py_module(self):
'\n :return: Python extension module\n '
from importlib.util import spec_from_loader, module_from_spec
from importlib.machinery import ExtensionFileLoader
if self._py_mod:
return self._py_mod
self._maybe_compile()
mod_name = self.base_name
spec = spec_from_loader(mod_name, ExtensionFileLoader(mod_name, self._so_filename))
mod = module_from_spec(spec)
spec.loader.exec_module(mod)
self._py_mod = mod
return mod
|
@dataclass
class ResultWithReason(Generic[T]):
'\n This is a wrapper class for a result value, which can also have a reason.\n '
result: T
reason: Optional[str] = None
|
class SharedMem():
class ShmException(Exception):
pass
class CCallException(ShmException):
pass
if (sys.platform != 'win32'):
import ctypes
import ctypes.util
libc_so = ctypes.util.find_library('c')
libc = ctypes.CDLL(libc_so, use_errno=True)
shm_key_t = ctypes.c_int
IPC_PRIVATE = 0
IPC_RMID = 0
shmget = libc.shmget
shmget.restype = ctypes.c_int
shmget.argtypes = (shm_key_t, ctypes.c_size_t, ctypes.c_int)
shmat = libc.shmat
shmat.restype = ctypes.c_void_p
shmat.argtypes = (ctypes.c_int, ctypes.c_void_p, ctypes.c_int)
shmdt = libc.shmdt
shmdt.restype = ctypes.c_int
shmdt.argtypes = (ctypes.c_void_p,)
shmctl = libc.shmctl
shmctl.restype = ctypes.c_int
shmctl.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_void_p)
memcpy = libc.memcpy
memcpy.restype = ctypes.c_void_p
memcpy.argtypes = (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)
@classmethod
def check_ccall_error(cls, check, f):
import ctypes
if (not check):
errno = ctypes.get_errno()
errstr = os.strerror(errno)
raise cls.CCallException(('SharedMem: %s failed with error %i (%s)' % (f, errno, errstr)))
@classmethod
def is_shmget_functioning(cls):
shmid = cls.shmget(cls.IPC_PRIVATE, ((4 * 1024) * 1024), 384)
if (shmid <= 0):
return False
cls.shmctl(shmid, cls.IPC_RMID, 0)
return True
def __init__(self, size, shmid=None):
self.size = size
self.shmid = None
self.ptr = None
if (shmid is None):
self.is_creator = True
self.shmid = self.shmget(self.IPC_PRIVATE, self.size, 384)
self.check_ccall_error((self.shmid > 0), 'shmget')
print(('SharedMem[pid %i]: New shmid: %i (size %i)' % (os.getpid(), self.shmid, self.size)))
import atexit
atexit.register(self.remove)
else:
self.is_creator = False
self.shmid = shmid
assert (self.shmid > 0)
self.ptr = self.shmat(self.shmid, 0, 0)
self.check_ccall_error((self.ptr != self.ctypes.c_void_p((- 1)).value), 'shmat')
self.check_ccall_error((self.ptr > 0), 'shmat')
def remove(self):
if self.ptr:
self.shmdt(self.ptr)
self.ptr = None
if (self.shmid and (self.shmid > 0)):
if self.is_creator:
print(('SharedMem[pid %i]: Removing shmid %i (size %i)' % (os.getpid(), self.shmid, self.size)))
self.shmctl(self.shmid, self.IPC_RMID, 0)
self.shmid = None
def __del__(self):
self.remove()
def __getstate__(self):
return {'size': self.size, 'shmid': self.shmid}
def __setstate__(self, state):
self.__init__(**state)
def __repr__(self):
return ('<SharedMem shmid=%r size=%r is_creator=%r>' % (self.shmid, self.size, self.is_creator))
|
def next_power_of_two(n):
return (2 ** int((n - 1)).bit_length())
|
class SharedNumpyArray():
'\n This class provides a way to create Numpy arrays in shared memory.\n It adds some logic to mark whether some shared memory segment can be reused\n - that is when the client marks it as unused.\n\n Note that there are a few similar Python modules:\n https://pypi.python.org/pypi/SharedArray\n https://parad0x.org/git/python/shared-array/about\n https://bitbucket.org/cleemesser/numpy-sharedmem/src\n https://stackoverflow.com/questions/5033799/how-do-i-pass-large-numpy-arrays\n https://stackoverflow.com/questions/7894791/use-numpy-array-in-shared-memory\n '
ServerLock = Lock()
ServerInstances = set()
ServerArrayId = 0
class TooMuchInstances(SharedMem.ShmException):
pass
ExtraSpaceBytes = 4096
is_server = False
mem = None
(shape, strides, typestr) = (None, None, None)
@staticmethod
def numpy_strides_for_fortran(shape, typestr):
itemsize = numpy.dtype(typestr).itemsize
strides = [itemsize]
for s in shape:
strides += [(strides[(- 1)] * s)]
strides = strides[:(- 1)]
return tuple(strides)
@staticmethod
def numpy_strides_for_c_contiguous(shape, typestr):
itemsize = numpy.dtype(typestr).itemsize
strides = [(numpy.prod(shape[(i + 1):], dtype='int') * itemsize) for i in range(len(shape))]
return tuple(strides)
@classmethod
def needed_mem_size(cls, shape, typestr):
itemsize = numpy.dtype(typestr).itemsize
mem_size = (cls.ExtraSpaceBytes + (itemsize * numpy.prod(shape)))
return mem_size
@classmethod
def as_shared(cls, array):
assert isinstance(array, numpy.ndarray)
if isinstance(array.base, SharedNumpyArray):
assert array.base.is_in_use()
return array.base
return cls.create_copy(array)
@classmethod
def create_copy(cls, array):
assert isinstance(array, numpy.ndarray)
array_intf = array.__array_interface__
shape = array_intf['shape']
strides = array_intf['strides']
typestr = array_intf['typestr']
if (array.flags.c_contiguous or array.flags.f_contiguous):
pass
else:
assert strides
if (strides[0] == array.itemsize):
strides = cls.numpy_strides_for_fortran(shape=shape, typestr=typestr)
else:
strides = None
inst = cls.create_new(shape=shape, strides=strides, typestr=typestr)
inst.create_numpy_array()[...] = array
assert (inst._get_sanity_check_flag_ref().value == 42)
assert inst.is_in_use()
return inst
@classmethod
def create_new(cls, shape, strides, typestr):
needed_mem_size = cls.needed_mem_size(shape=shape, typestr=typestr)
with cls.ServerLock:
for inst in cls.ServerInstances:
assert isinstance(inst, SharedNumpyArray)
assert (inst._get_sanity_check_flag_ref().value == 42)
if inst.is_in_use():
continue
if (inst.mem.size < needed_mem_size):
inst._init_mem(shape=shape, typestr=typestr)
inst._set_new_array_id()
inst._set_is_used(1)
inst._set_numpy_format(shape=shape, strides=strides, typestr=typestr)
return inst
return cls(shape=shape, strides=strides, typestr=typestr)
@classmethod
def _get_new_array_id(cls):
array_id = cls.ServerArrayId
cls.ServerArrayId += 1
return array_id
def _set_new_array_id(self):
assert self.is_server
self.array_id = self._get_new_array_id()
self._get_array_id_ref().value = self.array_id
def __init__(self, shape, strides, typestr, mem=None, array_id=None):
if (not mem):
assert (array_id is None)
if (len(self.ServerInstances) >= SharedMemNumpyConfig['max_server_instances']):
raise self.TooMuchInstances(('too much instances (%i)' % len(self.ServerInstances)))
self.is_server = True
self._init_mem(shape=shape, typestr=typestr)
self._set_new_array_id()
self._set_is_used(1)
else:
assert (array_id is not None)
self.is_server = False
self.array_id = array_id
mem_size = self.needed_mem_size(shape=shape, typestr=typestr)
assert isinstance(mem, SharedMem)
assert (mem.size >= mem_size)
assert (mem.shmid > 0)
assert (mem.ptr > 0)
self.mem = mem
assert (self._get_sanity_check_flag_ref().value == 42)
assert (self._get_array_id_ref().value == self.array_id)
assert self.is_in_use()
self._set_numpy_format(shape=shape, strides=strides, typestr=typestr)
if self.is_server:
with self.ServerLock:
self.ServerInstances.add(self)
def _set_numpy_format(self, shape, strides, typestr):
itemsize = numpy.dtype(typestr).itemsize
if strides:
assert all([(st > 0) for st in strides])
assert ((sum([(st * (sh - 1)) for (st, sh) in zip(strides, shape)]) + itemsize) == (numpy.prod(shape) * itemsize))
self.shape = shape
self.strides = strides
self.typestr = typestr
def _init_mem(self, shape, typestr):
assert self.is_server
if self.mem:
self.mem.remove()
self.mem = None
assert (numpy.prod(shape) > 0)
mem_size = next_power_of_two(self.needed_mem_size(shape=shape, typestr=typestr))
mem_size = max(SharedMemNumpyConfig['min_shared_mem_size'], mem_size)
self.mem = SharedMem(size=mem_size)
self._get_sanity_check_flag_ref().value = 42
def get_numpy_array_data_ptr(self):
assert (self.mem.ptr > 0)
return (self.mem.ptr + self.ExtraSpaceBytes)
@property
def __array_interface__(self):
assert self.shape
return {'data': (self.get_numpy_array_data_ptr(), False), 'shape': self.shape, 'strides': self.strides, 'typestr': self.typestr, 'version': 3}
def create_numpy_array(self):
assert (self._get_sanity_check_flag_ref().value == 42)
assert (self._get_array_id_ref().value == self.array_id)
assert self.is_in_use()
a = numpy.array(self, copy=False)
assert (a.__array_interface__['data'][0] == self.get_numpy_array_data_ptr())
assert (not a.flags.owndata), ('a.__array_interface__ = %r' % a.__array_interface__)
assert (a.base is self)
assert ((a.nbytes + self.ExtraSpaceBytes) <= self.mem.size)
assert ((sum([(st * (sh - 1)) for (st, sh) in zip(a.strides, a.shape)]) + a.itemsize) == (numpy.prod(a.shape) * a.itemsize) == a.nbytes)
return a
def _get_sanity_check_flag_ref(self):
assert (self.mem.ptr > 0)
import ctypes
return ctypes.cast(ctypes.c_void_p(self.mem.ptr), ctypes.POINTER(ctypes.c_uint64)).contents
def _get_array_id_ref(self):
assert (self.mem.ptr > 0)
import ctypes
return ctypes.cast(ctypes.c_void_p((self.mem.ptr + 8)), ctypes.POINTER(ctypes.c_uint64)).contents
def _get_in_use_flag_ref(self):
assert (self.mem.ptr > 0)
import ctypes
return ctypes.cast(ctypes.c_void_p((self.mem.ptr + 16)), ctypes.POINTER(ctypes.c_uint64)).contents
def _set_is_used(self, n):
self._get_in_use_flag_ref().value = n
def is_in_use(self):
return (self._get_in_use_flag_ref().value > 0)
def set_unused(self):
if self.is_server:
return
if self.mem:
self._set_is_used(0)
self.mem.remove()
self.mem = None
def __getstate__(self):
return {'shape': self.shape, 'strides': self.strides, 'typestr': self.typestr, 'mem': self.mem, 'array_id': self.array_id}
def __setstate__(self, state):
self.__init__(**state)
def __del__(self):
self.set_unused()
def __repr__(self):
return ('<%s is_server=%r state=%r>' % (self.__class__.__name__, self.is_server, self.__getstate__()))
|
def attrChain(base, *attribs, **kwargs):
default = kwargs.get('default', None)
obj = base
for attr in attribs:
if (obj is None):
return default
obj = getattr(obj, attr, None)
if (obj is None):
return default
return obj
|
def funcCall(attrChainArgs, args=()):
f = attrChain(*attrChainArgs)
return f(*args)
|
def get_func_closure(f):
return f.__closure__
|
def get_func_tuple(f):
return (f.__code__, f.__globals__, f.__name__, f.__defaults__, f.__closure__)
|
def makeFuncCell(value):
return get_func_closure((lambda : value))[0]
|
def getModuleDict(modname, path=None):
'\n :param str modname: such that "import <modname>" would work\n :param list[str] path: sys.path\n :return: the dict of the mod\n :rtype: dict[str]\n '
try:
mod = import_module(modname)
except ImportError:
assert path
for p in path:
if (p not in sys.path):
sys.path.append(p)
mod = import_module(modname)
return mod.__dict__
|
def getModNameForModDict(obj):
"\n :type obj: dict\n :rtype: str | None\n :returns The module name or None. It will not return '__main__' in any case\n because that likely will not be the same in the unpickling environment.\n Also see: https://stackoverflow.com/questions/56171796/\n "
if ('__name__' not in obj):
return None
mod_name = obj['__name__']
if (mod_name == '__main__'):
return None
if (mod_name not in sys.modules):
return None
mod = sys.modules[mod_name]
if (mod.__dict__ is obj):
return mod_name
return None
|
def getNormalDict(d):
'\n :type d: dict[str] | dictproxy\n :rtype: dict[str]\n It also removes getset_descriptor. New-style classes have those.\n '
r = {}
for (k, v) in d.items():
if isinstance(v, types.GetSetDescriptorType):
continue
r[k] = v
return r
|
def assign_obj_attribs(obj, d: Dict[(str, Any)]):
'\n :param obj:\n :param d:\n :return: obj\n\n Note that obj.__dict__.update(d) does not always work,\n e.g. when obj is a type (then obj.__dict__ is a readonly mappingproxy).\n '
for (k, v) in d.items():
setattr(obj, k, v)
return obj
|
def make_numpy_ndarray_fromstring(s, dtype, shape):
return numpy.fromstring(s, dtype=dtype).reshape(shape)
|
def use_shared_mem_for_numpy_array(obj):
assert isinstance(obj, numpy.ndarray)
if (obj.shape == ()):
return False
if isinstance(obj.base, SharedNumpyArray):
assert obj.base.is_in_use()
return True
if (not SharedMemNumpyConfig['enabled']):
return False
return (obj.nbytes >= SharedMemNumpyConfig['auto_pickling_min_size'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.