edited_code stringlengths 17 978k | original_code stringlengths 17 978k |
|---|---|
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
from collections import defaultdict
import difflib
from functools import reduce
from glob import glob
from itertools import chain, product
import operator
import os
import re
import shutil
import subprocess
from time import localtime, strftime
import traceback
import numpy as np
from tqdm import tqdm
from .. import fmtxt
from .._config import CONFIG
from .._text import enumeration, n_of, plural
from .._utils import as_sequence, LazyProperty, ask
from .._utils.com import Notifier, NotNotifier
from .definitions import check_names, compound
def _etree_expand(node, state):
for tk, tv in node.items():
if tk == '.':
continue
for k, v in state.items():
name = '{%s}' % tk
if str(v).startswith(name):
tv[k] = {'.': v.replace(name, '')}
if len(tv) > 1:
_etree_expand(tv, state)
def _etree_node_repr(node, name, indent=0):
head = ' ' * indent
out = [(name, head + node['.'])]
for k, v in node.items():
if k == '.':
continue
out.extend(_etree_node_repr(v, k, indent=indent + 3))
return out
class LayeredDict(dict):
"""Dictionary which can store and restore states"""
def __init__(self):
self._states = []
dict.__init__(self)
def __repr__(self):
return ("<LayeredDict with %i stored states:\n"
"%r>" % (len(self._states), dict.__repr__(self)))
def get_stored(self, key, level, default=None):
"""Retrieve a field value from any level
Parameters
----------
key : str
the field name (dictionary key).
level : int
The level from which to retrieve the value. -1 = the current level.
"""
return self._states[level].get(key, default)
def restore_state(self, state=-1, discard_tip=True):
"""Restore a previously stored state
Parameters
----------
state : int | dict
Index of the state which to restore (specified as index into a
list of stored states, i.e., negative values access recently
stored states).
discard_tip : bool
Discard the relevant state after restoring it. All states stored
later are discarded either way.
See Also
--------
.get_stored(): Retrieve a stored value without losing stored states
"""
if isinstance(state, int):
index = state
state = self._states[index]
if discard_tip:
del self._states[index:]
elif index != -1: # -1 + 1 = 0
del self._states[index + 1:]
elif not isinstance(state, dict):
raise TypeError("state needs to be either int or dict, got %r" %
(state,))
self.clear()
self.update(state)
def store_state(self):
"Store the current state"
self._states.append(self.copy())
class _TempStateController:
def __init__(self, experiment):
self.experiment = experiment
def __enter__(self):
self.experiment._store_state()
def __exit__(self, exc_type, exc_value, traceback):
self.experiment._restore_state()
class TreeModel:
"""
A hierarchical collection of format strings and field values
Notes
-----
Any subclass should make sure to call the ``._store_state()`` method at the
end of initialization.
"""
owner = None # email address as string (for notification)
_auto_debug = False # in notification block
_fmt_pattern = re.compile(r'\{([\w-]+)\}')
# a dictionary of static templates (i.e., templates that do not have any hooks)
_templates = {}
defaults = {}
_repr_args = ()
def __init__(self, **state):
# scaffold for state
self._fields = LayeredDict()
self._field_values = LayeredDict()
self._terminal_fields = []
self._secondary_cache = defaultdict(tuple) # secondary cache-files
self._repr_kwargs = []
self._repr_kwargs_optional = []
# scaffold for hooks
self._compound_members = {}
self._compounds = defaultdict(list)
self._eval_handlers = defaultdict(list)
self._post_set_handlers = defaultdict(list)
self._set_handlers = {}
self._slave_fields = defaultdict(list)
self._slave_handlers = {}
# construct initial state: make all defaults available, then set as
# many values as we can
self._defaults = dict(self.defaults)
self._defaults.update(state)
for k, v in self._templates.items():
if v is None or isinstance(v, str):
self._register_constant(k, v)
elif isinstance(v, tuple):
self._register_field(k, v, v[0], allow_empty=True)
else:
raise TypeError(f"Invalid templates field value: {v!r}. Need None, tuple or string")
if self.owner:
task = self.__class__.__name__
self.notification = Notifier(self.owner, task, self._crash_report,
self._auto_debug)
else:
self.notification = NotNotifier()
def __repr__(self):
args = [f'{self._fields[arg]!r}' for arg in self._repr_args]
kwargs = [(arg, self._fields[arg]) for arg in self._repr_kwargs]
no_initial_state = len(self._fields._states) == 0
for k in self._repr_kwargs_optional:
v = self._fields[k]
if no_initial_state or v != self._fields.get_stored(k, level=0):
kwargs.append((k, v))
args.extend(f'{k}={v!r}' for k, v in kwargs)
return f"{self.__class__.__name__}({", ".join(args)})"
def _bind_eval(self, key, handler):
self._eval_handlers[key].append(handler)
def _bind_post_set(self, key, handler):
handlers = self._post_set_handlers[key]
if handler not in handlers:
handlers.append(handler)
def _bind_set(self, key, handler):
if key in self._set_handlers:
raise KeyError("set-handler for %r already set" % key)
self._set_handlers[key] = handler
def _crash_report(self):
out = []
# try:
# source = inspect.getsource(self.__class__)
# except Exception as e:
# source = "Failed to retrieve source:\n" + traceback.format_exc(e)
# out.append(source)
try:
tree = str(self.show_state())
except Exception as e:
tree = "Failed to retrieve state:\n" + traceback.format_exc(e)
out.append(tree)
# package versions
from .. import __version__
import mne
import scipy
out.append('\n'.join(("Eelbrain %s" % __version__,
"mne-python %s" % mne.__version__,
"SciPy %s" % scipy.__version__,
"NumPy %s" % np.__version__)))
return out
def _find_missing_fields(self):
"""Check that all field names occurring in templates are valid entries
Raises
------
KeyError
If any field names occurring in templates are not registered fields.
"""
# find field names occurring in field values but not as fields
missing = set()
for temp in self._fields.values():
for field in self._fmt_pattern.findall(temp):
if field not in self._fields:
missing.add(field)
if missing:
raise KeyError("The following fields occur in templates but "
"are undefined: %s" % ', '.join(sorted(missing)))
def _register_compound(self, key, elements):
"""Register a field that is composed out of other fields
The compound always reflects ``' '.join(elements)`` including only
elements that are not empty.
Parameters
----------
key : str
The name of the compound field.
elements : tuple of str
The field names of the elements.
"""
self._compound_members[key] = elements
for e in elements:
self._compounds[e].append(key)
self._bind_post_set(e, self._update_compounds)
self._fields[key] = None
self._update_compound(key)
def _register_constant(self, key, value):
value = self._defaults.get(key, value)
if value is None:
raise ValueError("The %r field needs to be set as default" % key)
self._fields[key] = value
def _register_field(self, key, values=None, default=None, set_handler=None,
eval_handler=None, post_set_handler=None,
depends_on=None, slave_handler=None,
allow_empty=False, repr=None):
"""Register an iterable field
Parameters
----------
key : str
Name of the field.
values : None | sequence of str
Possible values for this field, if known.
default : None | str
Set the default value (if None, the first element in values).
set_handler : None | callable
Function to call instead of updating the state value. The return
value of the set_handler is sent to the post_set_handler.
eval_handler : None | callable
Function to use for evaluating a value before setting. Can be
called without actually setting the value; any parameter changes
need to be evaluated in post_set_handlers.
post_set_handler : None | callable
Function to call after the value is changed. Needs to be able to
handle non-existing values for ``e.set(..., vmatch=False)`` calls.
depends_on : str | sequence of str
Slave fields: Fields in depends_on trigger change in ``key``.
slave_handler : func
Slave fields: Function that determines the new value of ``key``.
allow_empty : bool
Allow empty string in ``values``.
repr : bool
By default, fields are shown in ``repr`` if they are different from
the value at initialization. Set to ``True`` to always show them
(as long as there are at least 2 ``values``).
"""
if key in self._fields:
raise KeyError("Field already exists: %r" % key)
if depends_on is not None:
if (set_handler is not None or eval_handler is not None or
post_set_handler is not None):
raise RuntimeError("Slave values can't have other handlers")
elif slave_handler is None:
raise RuntimeError("Slave value requires slave_handler")
self._register_slave_field(key, depends_on, slave_handler)
if default is None:
default = slave_handler(self._fields)
if set_handler is not None:
self._bind_set(key, set_handler)
if eval_handler is not None:
self._bind_eval(key, eval_handler)
if post_set_handler is not None:
self._bind_post_set(key, post_set_handler)
default = self._defaults.get(key, default)
if values:
values = tuple(values)
check_names(values, key, allow_empty)
if default is None:
default = values[0]
elif default not in values:
raise ValueError(f"Default {default!r} for {key!r} not in values {values}")
self._field_values[key] = values
# repr
if key in self._repr_args:
pass
elif repr is True:
if values and len(values) > 1:
self._repr_kwargs.append(key)
elif repr is None:
if values and len(values) > 1:
self._repr_kwargs_optional.append(key)
elif repr is not False:
raise TypeError(f"repr={repr!r}")
self._terminal_fields.append(key)
self._fields[key] = ''
if default is not None:
self.set(**{key: default})
def _register_slave_field(self, key, depends_on, handler):
"""Register a field that strictly depends on one or more other fields
Parameters
----------
key : str
Field name.
depends_on : str | sequence of str
Fields that trigger change.
handler : func
Function that determines the new value.
Notes
-----
Restrictions:
- Slave fields can not have any other handlers
- Slave fields can not depend on other slave fields
"""
if isinstance(depends_on, str):
depends_on = (depends_on,)
for dep in depends_on:
self._slave_fields[dep].append(key)
self._slave_handlers[key] = handler
self._fields[key] = handler(self._fields)
def expand_template(self, temp, keep=()):
"""Expand all constant variables in a template
Parameters
----------
temp : str
Template or name of the template which should be expanded.
keep : container (implements __contains__)
Names of the variables which should not be expanded.
Returns
-------
formatted_temp : str
Template with all variables replaced by their values, except
variables which have entries in field_values or in ``keep``.
"""
temp = self._fields.get(temp, temp)
while True:
stop = True
for name in self._fmt_pattern.findall(temp):
if (name in keep) or (self._field_values.get(name, False)):
pass
else:
temp = temp.replace('{%s}' % name, self._fields[name])
stop = False
if stop:
break
return temp
def find_keys(self, temp, root=True):
"""Find all terminal field names that are relevant for a template.
Parameters
----------
temp : str
Template (or field name) for which to find terminal field names.
root : bool
Include "root" if present (default True).
Returns
-------
keys : list
All terminal field names that are relevant for formatting ``temp``.
"""
if temp in self._terminal_fields:
return [temp]
if temp in self._compound_members:
temporary_keys = list(self._compound_members[temp])
else:
temp = self._fields.get(temp, temp)
temporary_keys = self._fmt_pattern.findall(temp)
keys = []
while temporary_keys:
key = temporary_keys.pop(0)
if key == 'root':
if root:
keys.append('root')
elif key in self._terminal_fields:
keys.append(key)
else:
keys.extend(self.find_keys(key, root))
# remove duplicates
return list(dict.fromkeys(keys))
def format(self, string, vmatch=True, **kwargs):
"""Format a string (i.e., replace any '{xxx}' fields with their values)
Parameters
----------
string : str
Template string.
vmatch : bool
For fields with known names, only allow existing field names.
others :
State parameters.
Returns
-------
formatted_string : str
The template temp formatted with current state values.
"""
self.set(match=vmatch, **kwargs)
while self._fmt_pattern.search(string):
string = string.format(**self._fields)
return string
def get(self, temp, **state):
return self.format('{%s}' % temp, **state)
def _get_rel(self, temp, start):
"Get the path of ``temp`` relative to ``start`` (both field names)"
abs_ = self.get(temp)
start_ = self.get(start)
return os.path.relpath(abs_, start_)
def get_field_values(self, field, exclude=()):
"""Find values for a field taking into account exclusion
Parameters
----------
field : str
Field for which to find values.
exclude : list of str
Exclude these values.
"""
values = self._field_values[field]
if isinstance(exclude, str):
exclude = (exclude,)
if exclude:
values = [v for v in values if v not in exclude]
else:
values = list(values)
return values
def iter(self, fields, exclude=None, values=None, progress_bar=None, **constants):
"""
Cycle the experiment's state through all values on the given fields
Parameters
----------
fields : sequence | str
Field(s) over which should be iterated.
exclude : dict {str: iterator over str}
Exclude values from iteration (``{field: values_to_exclude}``).
values : dict {str: iterator over str}
Fields with custom values to iterate over (instead of the
corresponding field values) with {name: (sequence of values)}
entries.
progress_bar : str
Message to show in the progress bar.
...
Fields with constant values throughout the iteration.
"""
if isinstance(fields, str):
fields = (fields,)
yield_str = True
else:
yield_str = False
# find actual fields to iterate over:
iter_fields = []
for field in fields:
if field in constants:
continue
iter_fields.extend(f for f in self.find_keys(field) if f not in constants)
# check values and exclude
if values:
bad = set(values).difference(iter_fields)
if bad:
raise ValueError(f"values={values!r}: keys that are not iterated over ({", ".join(bad)})")
else:
values = {}
if exclude:
bad = set(exclude).difference(iter_fields)
if bad:
raise ValueError(f"exclude={exclude!r}: keys that are not iterated over ({", ".join(bad)})")
else:
exclude = {}
# set constants (before .get_field_values() call)
self.set(**constants)
# gather values to iterate over
v_lists = []
for field in iter_fields:
if field in values:
v_lists.append(as_sequence(values[field]))
else:
exclude_ = exclude.get(field, None)
v_lists.append(self.get_field_values(field, exclude_))
if len(v_lists):
n = reduce(operator.mul, map(len, v_lists))
with self._temporary_state:
disable = progress_bar is None or CONFIG['tqdm']
for v_list in tqdm(product(*v_lists), progress_bar, n, disable=disable):
self._restore_state(discard_tip=False)
self.set(**dict(zip(iter_fields, v_list)))
if yield_str:
yield self.get(fields[0])
else:
yield tuple(self.get(f) for f in fields)
else:
yield ()
def iter_temp(self, temp, exclude=None, values={}, **constants):
"""
Iterate through all paths conforming to a template given in ``temp``.
Parameters
----------
temp : str
Name of a template in the MneExperiment.templates dictionary, or
a path template with variables indicated as in ``'{var_name}'``
"""
# if the name is an existing template, retrieve it
temp = self.expand_template(temp, values.keys())
# find variables for iteration
variables = set(self._fmt_pattern.findall(temp))
variables.difference_update(constants)
for _ in self.iter(variables, exclude, values, **constants):
path = temp.format(**self._fields)
yield path
def _partial(self, temp, skip=()):
"Format a template while leaving some slots unfilled"
skip = set(skip)
fields = self._fields.copy()
fields.update({k: '{%s}' % k for k in skip})
string = '{%s}' % temp
while set(self._fmt_pattern.findall(string)).difference(skip):
string = string.format(**fields)
return string
def _copy_state(self):
"""Copy of the state that can be used with ``._restore_state()``"""
return self._fields.copy(), self._field_values.copy()
def _restore_state(self, state=-1, discard_tip=True):
"""Restore a previously stored state
Parameters
----------
state : int
Index of the state which to restore (specified as index into a
list of stored states, i.e., negative values access recently
stored states).
discard_tip : bool
Discard the relevant state after restoring it. All states stored
later are discarded either way.
"""
if isinstance(state, int):
s1 = s2 = state
else:
s1, s2 = state
self._fields.restore_state(s1, discard_tip)
self._field_values.restore_state(s2, discard_tip)
def reset(self):
"""Reset all field values to the state at initialization
This function can be used in cases where the same MneExperiment instance
is used to perform multiple independent operations, where parameters set
during one operation should not affect the next operation.
"""
self._restore_state(0, False)
def set(self, match=True, allow_asterisk=False, **state):
"""Set the value of one or more fields.
Parameters
----------
match : bool
For fields with pre-defined values, only allow valid values (default
``True``).
allow_asterisk : bool
If a value contains ``'*'``, set the value without the normal value
evaluation and checking mechanisms (default ``False``).
... :
Fields and values to set. Invalid fields raise a KeyError. Unless
match == False, Invalid values raise a ValueError.
"""
if not state:
return
# expand compounds
if state.pop('expand_compounds', True):
for k in list(state):
if k in self._compound_members:
fields = self._compound_members[k]
v = state.pop(k)
values = v.split(' ')
for i, field in enumerate(fields):
field_values = self._field_values[field]
vi = values[i] if len(values) > i else None
if vi in field_values:
continue
elif '' in field_values:
values.insert(i, '')
else:
raise ValueError(f"{k}={v!r}")
if len(values) != len(fields):
raise ValueError(f"{k}={v!r}")
state.update(zip(fields, values))
handled_state = {} # fields with special set handlers
for k in list(state):
v = state[k]
if k not in self._fields:
raise TypeError(f"{k}={v!r}: No template named {k!r}")
elif v is None:
state.pop(k)
continue
elif k in self._set_handlers:
handled_state[k] = self._set_handlers[k](state.pop(k))
continue
elif not isinstance(v, str):
raise TypeError(f"{k}={v!r}: Values have to be strings")
elif '*' in v and allow_asterisk:
continue
# eval values
eval_handlers = self._eval_handlers[k]
if eval_handlers:
for handler in eval_handlers:
try:
v = handler(v)
except ValueError:
if match:
raise
if not isinstance(v, str):
raise RuntimeError(f"Invalid conversion from handler {handler}: {k}={v!r}")
state[k] = v
elif match and k in self._field_values and v not in self._field_values[k]:
matches = difflib.get_close_matches(v, self._field_values[k], 1)
if matches:
alt = f"Did you mean {matches[0]!r}? "
else:
alt = ''
raise ValueError(f"{k}={v!r}. {alt}To see all valid values use e.show_fields(); To set a non-existent value, use e.set({k}={v!r}, match=False).")
self._fields.update(state)
# fields depending on changes in other fields
slave_state = {}
for state_key in set(state).union(handled_state).intersection(self._slave_fields):
for slave_key in self._slave_fields[state_key]:
if slave_key not in slave_state:
v = self._slave_handlers[slave_key](self._fields)
if v is not None:
slave_state[slave_key] = v
self._fields.update(slave_state)
# call post_set handlers
for k, v in chain(state.items(), handled_state.items(), slave_state.items()):
for handler in self._post_set_handlers[k]:
handler(k, v)
def show_fields(self, str_out=False):
"""
Generate a table for all iterable fields and ther values.
Parameters
----------
str_out : bool
Return the table as a string (instead of printing it).
"""
lines = []
for key in self._field_values:
values = list(self._field_values[key])
line = f'{key}:'
head_len = len(line) + 1
while values:
v = repr(values.pop(0))
if values:
v += ','
if len(v) < 80 - head_len:
line += ' ' + v
else:
lines.append(line)
line = ' ' * head_len + v
if not values:
lines.append(line)
table = '\n'.join(lines)
if str_out:
return table
else:
print(table)
def show_state(self, temp=None, empty=False, hide=()):
"""List all top-level fields and their values
(Top-level fields are fields whose values do not contain templates)
Parameters
----------
temp : None | str
Only show variables relevant to this template.
empty : bool
Show empty variables (items whose value is the empty string '').
hide : collection of str
State variables to hide.
Returns
-------
state : Table
Table of (relevant) variables and their values.
"""
table = fmtxt.Table('lll')
table.cells('Key', '*', 'Value')
table.caption('*: Value is modified from initialization state.')
table.midrule()
if temp is None:
keys = chain(self._repr_kwargs, self._repr_kwargs_optional)
else:
keys = self.find_keys(temp)
for k in sorted(keys):
if k in hide:
continue
v = self._fields[k]
if v != self._fields.get_stored(k, level=0):
mod = '*'
else:
mod = ''
if empty or mod or v:
table.cells(k, mod, repr(v))
return table
def show_tree(self, root='root', fields=None):
"""
Print a tree of the filehierarchy implicit in the templates
Parameters
----------
root : str
Name of the root template (e.g., 'besa-root').
fields : list of str
Which fields to include in the tree (default is all).
"""
if fields is None:
fields = self._fields
else:
# find all implied fields
new_fields = set(fields)
fields = {}
while new_fields:
k = new_fields.pop()
fields[k] = v = self._fields[k]
new_fields.update([f for f in self._fmt_pattern.findall(v) if f not in fields])
tree = {'.': self.get(root)}
root_temp = '{%s}' % root
for k, v in fields.items():
if str(v).startswith(root_temp):
tree[k] = {'.': v.replace(root_temp, '')}
_etree_expand(tree, fields)
nodes = _etree_node_repr(tree, root)
name_len = max(len(n) for n, _ in nodes)
path_len = max(len(p) for _, p in nodes)
pad = ' ' * (80 - name_len - path_len)
print('\n'.join(n.ljust(name_len) + pad + p.ljust(path_len) for n, p in nodes))
def _store_state(self):
"""Store the current state
See also
--------
._restore_state() : restore a previously stored state
"""
self._fields.store_state()
self._field_values.store_state()
@LazyProperty
def _temporary_state(self):
return _TempStateController(self)
def _update_compound(self, key):
items = [self.get(k) for k in self._compound_members[key]]
self.set(**{key: compound(items)}, expand_compounds=False)
def _update_compounds(self, key, _):
for compound in self._compounds[key]:
self._update_compound(compound)
class FileTree(TreeModel):
""":class:`TreeModel` subclass for a file system hierarchy"""
_repr_args = ('root',)
_safe_delete = 'root' # directory from which to rm without warning
def __init__(self, **state):
TreeModel.__init__(self, **state)
self._make_handlers = {}
self._cache_handlers = {}
self._register_field('root', eval_handler=self._eval_root)
def _bind_cache(self, key, handler):
"""Bind a cache function to a ``*-file`` key
The cache function is called every time the file name is retrieved and
should recreate the file if it is outdated.
The cache function can return the filename of the created file since
it is called every time the specific file is requested. Note that this
causes problems for ``glob()``.
"""
if key in self._cache_handlers:
raise RuntimeError(f"Cache handler for {key!r} already defined")
elif key in self._make_handlers:
raise RuntimeError(f"Already defined make handler for {key!r}")
self._cache_handlers[key] = handler
def _bind_make(self, key, handler):
"""Bind a make function to a ``*-file`` key
The make function is called only when the file name is retrieved and
the file does not exist.
"""
if key in self._cache_handlers:
raise RuntimeError(f"Already defined cache handler for {key!r}")
elif key in self._make_handlers:
raise RuntimeError(f"Make handler for {key!r} already defined")
self._make_handlers[key] = handler
@staticmethod
def _eval_root(root):
root = os.path.abspath(os.path.expanduser(root))
if root != '':
root = os.path.normpath(root)
return root
def get(self, temp, fmatch=False, vmatch=True, match=True, mkdir=False,
make=False, **kwargs):
"""
Retrieve a formatted template
With match=True, '*' are expanded to match a file,
and if there is not a unique match, an error is raised. With
mkdir=True, the directory containing the file is created if it does not
exist.
Parameters
----------
temp : str
Name of the requested template.
fmatch : bool
"File-match": If the template contains asterisk ('*'), use glob to
fill it in. An IOError is raised if the pattern does not match
exactly one file.
vmatch : bool
"Value match": Require existence of the assigned value (only
applies for fields with stored values).
match : bool
Do any matching (i.e., match=False sets fmatch as well as vmatch
to False).
mkdir : bool
If the directory containing the file does not exist, create it.
make : bool
If a requested file does not exists, make it if possible.
kwargs :
Set any state values.
"""
if not match:
fmatch = vmatch = False
path = TreeModel.get(self, temp, vmatch=vmatch, **kwargs)
path = os.path.expanduser(path)
# assert the presence of the file
if fmatch and ('*' in path):
paths = glob(path)
if len(paths) == 0 and make and temp in self._make_handlers:
self._make_handlers[temp]()
paths = glob(path)
if len(paths) == 1:
path = paths[0]
elif len(paths) > 1:
raise IOError(f"More than one files match {path!r}: {paths}")
else:
raise IOError(f"No file found for {path!r}")
# create the directory
if mkdir:
if temp.endswith('dir'):
dirname = path
else:
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
root = self.get('root')
if root == '':
raise IOError("Prevented from creating directories because root is not set")
elif os.path.exists(root):
os.makedirs(dirname)
else:
raise IOError(f"Prevented from creating directories because root does not exist: {root!r}")
# make the file
if make:
if temp in self._cache_handlers:
path = self._cache_handlers[temp]() or path
elif not os.path.exists(path):
if temp in self._make_handlers:
with self._temporary_state:
self._make_handlers[temp]()
elif temp.endswith('-dir'):
os.makedirs(path)
else:
raise RuntimeError(f"No make handler for {temp!r}")
return path
def glob(self, temp, inclusive=False, **state):
"""Find all files matching a certain pattern
Parameters
----------
temp : str
Name of the path template for which to find files.
inclusive : bool
Treat all unspecified fields as ``*`` (default False).
See Also
--------
copy : Copy files.
move : Move files.
rm : Delete files.
Notes
-----
State parameters can include an asterisk ('*') to match multiple files.
Uses :func:`glob.glob`.
"""
pattern = self._glob_pattern(temp, inclusive, **state)
return glob(pattern)
def _glob_pattern(self, temp, inclusive=False, **state):
if inclusive:
for key in self._terminal_fields:
if key in state or key == 'root':
continue
elif key in self._field_values and len(self._field_values[key]) == 1:
continue
state[key] = '*'
with self._temporary_state:
pattern = self.get(temp, allow_asterisk=True, **state)
return pattern
def _find_files_with_target(self, action, temp, dst_root, inclusive, overwrite, confirm, state):
if dst_root is None:
if 'root' not in state:
raise TypeError("Need to specify at least one of root and dst_root")
dst_root = self.get('root')
src_filenames = self.glob(temp, inclusive, **state)
n = len(src_filenames)
if n == 0:
print("No files matching pattern.")
return None, None
root = self.get('root')
errors = [filename for filename in src_filenames if not filename.startswith(root)]
if errors:
raise ValueError(f"{len(errors)} files are not located in the root directory ({errors[0]}, ...)")
rel_filenames = {src: os.path.relpath(src, root) for src in src_filenames}
dst_filenames = {src: os.path.join(dst_root, filename) for src, filename in rel_filenames.items()}
if overwrite is not True:
exist = [src for src, dst in dst_filenames.items() if os.path.exists(dst)]
if exist:
if overwrite is None:
raise ValueError(f"{len(exist)} of {n} files already exist")
elif overwrite is False:
if len(exist) == n:
print(f"All {n} files already exist.")
return None, None
n -= len(exist)
for src in exist:
src_filenames.remove(src)
else:
raise TypeError(f"overwrite={overwrite!r}")
if not confirm:
print(f"{action} {self.get("root")} -> {dst_root}:")
for src in src_filenames:
print(" " + rel_filenames[src])
if input(f"{action} {n} files? (confirm with 'yes'): ") != 'yes':
return None, None
return src_filenames, [dst_filenames[src] for src in src_filenames]
def copy(self, temp, dst_root=None, inclusive=False, confirm=False, overwrite=None, **state):
"""Copy files to a different root folder
Parameters
----------
temp : str
Name of the path template for which to find files.
dst_root : str
Path to the root to which the files should be moved. If the target
is the experiment's root directory, specify ``root`` as the source
root and leave ``dst_root`` unspecified.
inclusive : bool
Treat all unspecified fields as ``*`` (default False).
confirm : bool
Skip asking for confirmation before copying the files.
overwrite : bool
``True`` to overwrite target files if they already exist. ``False``
to quietly keep exising files.
See Also
--------
glob : Find all files matching a template.
move : Move files.
rm : Delete files.
make_copy : Copy a file by substituting a field
Notes
-----
State parameters can include an asterisk ('*') to match multiple files.
"""
src_filenames, dst_filenames = self._find_files_with_target('Copy', temp, dst_root, inclusive, overwrite, confirm, state)
if not src_filenames:
return
for src, dst in tqdm(zip(src_filenames, dst_filenames), "Copying", len(src_filenames)):
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy(src, dst)
def move(self, temp, dst_root=None, inclusive=False, confirm=False, overwrite=None, **state):
"""Move files to a different root folder
Parameters
----------
temp : str
Name of the path template for which to find files.
dst_root : str
Path to the root to which the files should be moved. If the target
is the experiment's root directory, specify ``root`` as the source
root and leave ``dst_root`` unspecified.
inclusive : bool
Treat all unspecified fields as ``*`` (default False).
confirm : bool
Skip asking for confirmation before moving the files.
overwrite : bool
Overwrite target files if they already exist.
See Also
--------
copy : Copy files.
glob : Find all files matching a template.
rm : Delete files.
Notes
-----
State parameters can include an asterisk ('*') to match multiple files.
"""
if overwrite is False:
raise ValueError(f"overwrite={overwrite!r}")
src_filenames, dst_filenames = self._find_files_with_target('Move', temp, dst_root, inclusive, overwrite, confirm, state)
if not src_filenames:
return
for src, dst in tqdm(zip(src_filenames, dst_filenames), "Moving", len(src_filenames)):
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
os.rename(src, dst)
def show_file_status(self, temp, row, col=None, count=True, present='time',
absent='-', **kwargs):
"""Compile a table about the existence of files
Parameters
----------
temp : str
The name of the path template for the files to examine.
row : str
Field over which to alternate rows.
col : None | str
Field over which to alternate columns (default is a single column).
count : bool
Add a column with a number for each line (default True).
present : 'time' | 'date' | str
String to display when a given file is present. 'time' to use last
modification date and time (default); 'date' for date only.
absent : str
String to display when a given file is absent (default '-').
others :
``self.iter()`` kwargs.
"""
if col is None:
col_v = (None,)
ncol = 1
else:
col_v = self.get_field_values(col)
ncol = len(col_v)
# table header
table = fmtxt.Table('r' * bool(count) + 'l' * (ncol + 1))
if count:
table.cell()
table.cell(row)
if col is None:
table.cell(temp)
else:
for name in col_v:
table.cell(name)
table.midrule()
# body
for i, row_v in enumerate(self.iter(row, **kwargs)):
if count:
table.cell(i)
table.cell(row_v)
for v in col_v:
if v is None:
path = self.get(temp)
else:
path = self.get(temp, **{col: v})
if os.path.exists(path):
if present == 'time':
r = strftime('%x %X', localtime(os.path.getmtime(path)))
elif present == 'date':
r = strftime('%x', localtime(os.path.getmtime(path)))
else:
r = present
else:
r = absent
table.cell(r)
return table
def show_file_status_mult(self, files, fields, count=True, present='X',
absent='-', **kwargs):
"""
Compile a table about the existence of multiple files
Parameters
----------
files : str | list of str
The names of the path templates whose existence to list.
fields : str | list of str
The names of the variables for which to list files (i.e., for each
unique combination of ``fields``, list ``files``).
count : bool
Add a column with a number for each subject.
present : str
String to display when a given file is present.
absent : str
String to display when a given file is absent.
Examples
--------
>>> e.show_file_status_mult(['raw-file', 'trans-file', 'fwd-file'],
... 'subject')
Subject Raw-file Trans-file Fwd-file
-----------------------------------------------
0 AD001 X X X
1 AD002 X X X
2 AD003 X X X
...
"""
if not isinstance(files, (list, tuple)):
files = [files]
if not isinstance(fields, (list, tuple)):
fields = [fields]
ncol = (len(fields) + len(files))
table = fmtxt.Table('r' * bool(count) + 'l' * ncol)
if count:
table.cell()
for name in fields + files:
table.cell(name.capitalize())
table.midrule()
for i, _ in enumerate(self.iter(fields, **kwargs)):
if count:
table.cell(i)
for field in fields:
table.cell(self.get(field))
for temp in files:
path = self.get(temp)
if os.path.exists(path):
table.cell(present)
else:
table.cell(absent)
return table
def show_in_finder(self, temp, **kwargs):
"Reveal the file corresponding to the ``temp`` template in the Finder."
fname = self.get(temp, **kwargs)
subprocess.call(["open", "-R", fname])
def rename(self, old, new, exclude=False):
"""Rename all files corresponding to a pattern (or template)
Parameters
----------
old : str
Template for the files to be renamed. Can interpret '*', but will
raise an error in cases where more than one file fit the pattern.
new : str
Template for the new names.
Examples
--------
The following command will collect a specific file for each subject and
place it in a common folder:
>>> e.rename('info-file', '/some_other_place/{subject}_info.txt')
"""
new = self.expand_template(new)
files = []
for old_name in self.iter_temp(old, exclude):
if '*' in old_name:
matches = glob(old_name)
if len(matches) == 1:
old_name = matches[0]
elif len(matches) > 1:
err = ("Several files fit the pattern %r" % old_name)
raise ValueError(err)
if os.path.exists(old_name):
new_name = self.format(new)
files.append((old_name, new_name))
if not files:
print("No files found for %r" % old)
return
old_pf = os.path.commonprefix([pair[0] for pair in files])
new_pf = os.path.commonprefix([pair[1] for pair in files])
n_pf_old = len(old_pf)
n_pf_new = len(new_pf)
table = fmtxt.Table('lll')
table.cells('Old', '', 'New')
table.midrule()
table.caption("%s -> %s" % (old_pf, new_pf))
for old, new in files:
table.cells(old[n_pf_old:], '->', new[n_pf_new:])
print(table)
msg = "Rename %s files (confirm with 'yes')? " % len(files)
if input(msg) == 'yes':
for old, new in files:
dirname = os.path.dirname(new)
if not os.path.exists(dirname):
os.makedirs(dirname)
os.rename(old, new)
def rename_field(self, temp, field, old, new, exclude=False, **kwargs):
"""Change the value of one field in paths corresponding to a template
Parameters
----------
temp : str
Template name.
field : str
Field to change.
old : str
Old value.
new : str
New value.
kwargs :
``self.iter_temp`` arguments.
"""
items = [] # (tag, src, dst)
kwargs[field] = old
dst_kwa = {field: new}
for src in self.iter_temp(temp, exclude, ** kwargs):
dst = self.get(temp, **dst_kwa)
if os.path.exists(src):
if os.path.exists(dst):
tag = 'o'
else:
tag = ' '
else:
tag = 'm'
items.append((tag, src, dst))
src_prefix = os.path.commonprefix(tuple(item[1] for item in items))
dst_prefix = os.path.commonprefix(tuple(item[2] for item in items))
src_crop = len(src_prefix)
dst_crop = len(dst_prefix)
# print info
if src_prefix == dst_prefix:
lines = ['in ' + src_prefix, '']
else:
lines = [src_prefix, '->' + dst_prefix, '']
for tag, src, dst in items:
lines.append('%s %s -> %s' % (tag, src[src_crop:], dst[dst_crop:]))
lines.append('')
msg = 'Legend m: source is missing; o: will overwite a file'
lines.append(msg)
print('\n'.join(lines))
rename = tuple(item for item in items if item[0] == ' ')
if not rename:
return
msg = "Rename %i files (confirm with 'yes')? " % len(rename)
if input(msg) != 'yes':
return
for _, src, dst in rename:
os.rename(src, dst)
print("Done")
def rm(self, temp, inclusive=False, confirm=False, **constants):
"""Remove all files corresponding to a template
Asks for confirmation before deleting anything. Uses glob, so
individual templates can be set to '*'.
Parameters
----------
temp : str
Name of the path template for which to find and delete files.
inclusive : bool
Treat all unspecified fields as ``*`` (default False).
confirm : bool
Confirm removal of the selected files. If False (default) the user
is prompted for confirmation with a list of files; if True, the
files are removed immediately.
**others** :
Set field values (values can be '*' to match all).
See Also
--------
glob : Find all files matching a template.
copy : Copy files
move : Move files.
"""
files = self.glob(temp, inclusive, **constants)
secondary_files = []
for stemp in self._secondary_cache[temp]:
secondary_files.extend(self.glob(stemp, inclusive, **constants))
options = {'yes': 'delete files', 'no': "don't delete files (default)"}
if files or secondary_files:
print("root: %s\n" % self.get('root'))
print('\n'.join(self._remove_root(files)))
is_dir = [os.path.isdir(path) for path in files]
# Confirm deletion
if not confirm:
n_dirs = sum(is_dir)
n_files = len(files) - n_dirs
desc = []
if n_dirs:
desc.append(n_of(n_dirs, 'directory'))
if n_files:
desc.append(n_of(n_files, 'file'))
if secondary_files:
desc.append(n_of(len(secondary_files), 'secondary file'))
info = f"Delete {enumeration(desc)}?"
# Confirm if deleting files not in managed space
safe_root = self.get(self._safe_delete)
n_unsafe = len(files) - sum(path.startswith(safe_root) for path in files)
if n_unsafe:
info += f"\n!\n! {plural("item", n_unsafe)} outside of {self._safe_delete}\n!"
if ask(info, options, allow_empty=True) != 'yes':
print('aborting...')
return
print('deleting...')
dirs = (p for p, isdir in zip(files, is_dir) if isdir)
files = (p for p, isdir in zip(files, is_dir) if not isdir)
for path in dirs:
shutil.rmtree(path)
for path in chain(files, secondary_files):
os.remove(path)
else:
print("No files found for %r" % temp)
def _remove_root(self, paths):
root = self.get('root')
root_len = len(root)
return (path[root_len:] if path.startswith(root) else path
for path in paths)
| # Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
from collections import defaultdict
import difflib
from functools import reduce
from glob import glob
from itertools import chain, product
import operator
import os
import re
import shutil
import subprocess
from time import localtime, strftime
import traceback
import numpy as np
from tqdm import tqdm
from .. import fmtxt
from .._config import CONFIG
from .._text import enumeration, n_of, plural
from .._utils import as_sequence, LazyProperty, ask
from .._utils.com import Notifier, NotNotifier
from .definitions import check_names, compound
def _etree_expand(node, state):
for tk, tv in node.items():
if tk == '.':
continue
for k, v in state.items():
name = '{%s}' % tk
if str(v).startswith(name):
tv[k] = {'.': v.replace(name, '')}
if len(tv) > 1:
_etree_expand(tv, state)
def _etree_node_repr(node, name, indent=0):
head = ' ' * indent
out = [(name, head + node['.'])]
for k, v in node.items():
if k == '.':
continue
out.extend(_etree_node_repr(v, k, indent=indent + 3))
return out
class LayeredDict(dict):
"""Dictionary which can store and restore states"""
def __init__(self):
self._states = []
dict.__init__(self)
def __repr__(self):
return ("<LayeredDict with %i stored states:\n"
"%r>" % (len(self._states), dict.__repr__(self)))
def get_stored(self, key, level, default=None):
"""Retrieve a field value from any level
Parameters
----------
key : str
the field name (dictionary key).
level : int
The level from which to retrieve the value. -1 = the current level.
"""
return self._states[level].get(key, default)
def restore_state(self, state=-1, discard_tip=True):
"""Restore a previously stored state
Parameters
----------
state : int | dict
Index of the state which to restore (specified as index into a
list of stored states, i.e., negative values access recently
stored states).
discard_tip : bool
Discard the relevant state after restoring it. All states stored
later are discarded either way.
See Also
--------
.get_stored(): Retrieve a stored value without losing stored states
"""
if isinstance(state, int):
index = state
state = self._states[index]
if discard_tip:
del self._states[index:]
elif index != -1: # -1 + 1 = 0
del self._states[index + 1:]
elif not isinstance(state, dict):
raise TypeError("state needs to be either int or dict, got %r" %
(state,))
self.clear()
self.update(state)
def store_state(self):
"Store the current state"
self._states.append(self.copy())
class _TempStateController:
def __init__(self, experiment):
self.experiment = experiment
def __enter__(self):
self.experiment._store_state()
def __exit__(self, exc_type, exc_value, traceback):
self.experiment._restore_state()
class TreeModel:
"""
A hierarchical collection of format strings and field values
Notes
-----
Any subclass should make sure to call the ``._store_state()`` method at the
end of initialization.
"""
owner = None # email address as string (for notification)
_auto_debug = False # in notification block
_fmt_pattern = re.compile(r'\{([\w-]+)\}')
# a dictionary of static templates (i.e., templates that do not have any hooks)
_templates = {}
defaults = {}
_repr_args = ()
def __init__(self, **state):
# scaffold for state
self._fields = LayeredDict()
self._field_values = LayeredDict()
self._terminal_fields = []
self._secondary_cache = defaultdict(tuple) # secondary cache-files
self._repr_kwargs = []
self._repr_kwargs_optional = []
# scaffold for hooks
self._compound_members = {}
self._compounds = defaultdict(list)
self._eval_handlers = defaultdict(list)
self._post_set_handlers = defaultdict(list)
self._set_handlers = {}
self._slave_fields = defaultdict(list)
self._slave_handlers = {}
# construct initial state: make all defaults available, then set as
# many values as we can
self._defaults = dict(self.defaults)
self._defaults.update(state)
for k, v in self._templates.items():
if v is None or isinstance(v, str):
self._register_constant(k, v)
elif isinstance(v, tuple):
self._register_field(k, v, v[0], allow_empty=True)
else:
raise TypeError(f"Invalid templates field value: {v!r}. Need None, tuple or string")
if self.owner:
task = self.__class__.__name__
self.notification = Notifier(self.owner, task, self._crash_report,
self._auto_debug)
else:
self.notification = NotNotifier()
def __repr__(self):
args = [f'{self._fields[arg]!r}' for arg in self._repr_args]
kwargs = [(arg, self._fields[arg]) for arg in self._repr_kwargs]
no_initial_state = len(self._fields._states) == 0
for k in self._repr_kwargs_optional:
v = self._fields[k]
if no_initial_state or v != self._fields.get_stored(k, level=0):
kwargs.append((k, v))
args.extend(f'{k}={v!r}' for k, v in kwargs)
return f"{self.__class__.__name__}({', '.join(args)})"
def _bind_eval(self, key, handler):
self._eval_handlers[key].append(handler)
def _bind_post_set(self, key, handler):
handlers = self._post_set_handlers[key]
if handler not in handlers:
handlers.append(handler)
def _bind_set(self, key, handler):
if key in self._set_handlers:
raise KeyError("set-handler for %r already set" % key)
self._set_handlers[key] = handler
def _crash_report(self):
out = []
# try:
# source = inspect.getsource(self.__class__)
# except Exception as e:
# source = "Failed to retrieve source:\n" + traceback.format_exc(e)
# out.append(source)
try:
tree = str(self.show_state())
except Exception as e:
tree = "Failed to retrieve state:\n" + traceback.format_exc(e)
out.append(tree)
# package versions
from .. import __version__
import mne
import scipy
out.append('\n'.join(("Eelbrain %s" % __version__,
"mne-python %s" % mne.__version__,
"SciPy %s" % scipy.__version__,
"NumPy %s" % np.__version__)))
return out
def _find_missing_fields(self):
"""Check that all field names occurring in templates are valid entries
Raises
------
KeyError
If any field names occurring in templates are not registered fields.
"""
# find field names occurring in field values but not as fields
missing = set()
for temp in self._fields.values():
for field in self._fmt_pattern.findall(temp):
if field not in self._fields:
missing.add(field)
if missing:
raise KeyError("The following fields occur in templates but "
"are undefined: %s" % ', '.join(sorted(missing)))
def _register_compound(self, key, elements):
"""Register a field that is composed out of other fields
The compound always reflects ``' '.join(elements)`` including only
elements that are not empty.
Parameters
----------
key : str
The name of the compound field.
elements : tuple of str
The field names of the elements.
"""
self._compound_members[key] = elements
for e in elements:
self._compounds[e].append(key)
self._bind_post_set(e, self._update_compounds)
self._fields[key] = None
self._update_compound(key)
def _register_constant(self, key, value):
value = self._defaults.get(key, value)
if value is None:
raise ValueError("The %r field needs to be set as default" % key)
self._fields[key] = value
def _register_field(self, key, values=None, default=None, set_handler=None,
eval_handler=None, post_set_handler=None,
depends_on=None, slave_handler=None,
allow_empty=False, repr=None):
"""Register an iterable field
Parameters
----------
key : str
Name of the field.
values : None | sequence of str
Possible values for this field, if known.
default : None | str
Set the default value (if None, the first element in values).
set_handler : None | callable
Function to call instead of updating the state value. The return
value of the set_handler is sent to the post_set_handler.
eval_handler : None | callable
Function to use for evaluating a value before setting. Can be
called without actually setting the value; any parameter changes
need to be evaluated in post_set_handlers.
post_set_handler : None | callable
Function to call after the value is changed. Needs to be able to
handle non-existing values for ``e.set(..., vmatch=False)`` calls.
depends_on : str | sequence of str
Slave fields: Fields in depends_on trigger change in ``key``.
slave_handler : func
Slave fields: Function that determines the new value of ``key``.
allow_empty : bool
Allow empty string in ``values``.
repr : bool
By default, fields are shown in ``repr`` if they are different from
the value at initialization. Set to ``True`` to always show them
(as long as there are at least 2 ``values``).
"""
if key in self._fields:
raise KeyError("Field already exists: %r" % key)
if depends_on is not None:
if (set_handler is not None or eval_handler is not None or
post_set_handler is not None):
raise RuntimeError("Slave values can't have other handlers")
elif slave_handler is None:
raise RuntimeError("Slave value requires slave_handler")
self._register_slave_field(key, depends_on, slave_handler)
if default is None:
default = slave_handler(self._fields)
if set_handler is not None:
self._bind_set(key, set_handler)
if eval_handler is not None:
self._bind_eval(key, eval_handler)
if post_set_handler is not None:
self._bind_post_set(key, post_set_handler)
default = self._defaults.get(key, default)
if values:
values = tuple(values)
check_names(values, key, allow_empty)
if default is None:
default = values[0]
elif default not in values:
raise ValueError(f"Default {default!r} for {key!r} not in values {values}")
self._field_values[key] = values
# repr
if key in self._repr_args:
pass
elif repr is True:
if values and len(values) > 1:
self._repr_kwargs.append(key)
elif repr is None:
if values and len(values) > 1:
self._repr_kwargs_optional.append(key)
elif repr is not False:
raise TypeError(f"repr={repr!r}")
self._terminal_fields.append(key)
self._fields[key] = ''
if default is not None:
self.set(**{key: default})
def _register_slave_field(self, key, depends_on, handler):
"""Register a field that strictly depends on one or more other fields
Parameters
----------
key : str
Field name.
depends_on : str | sequence of str
Fields that trigger change.
handler : func
Function that determines the new value.
Notes
-----
Restrictions:
- Slave fields can not have any other handlers
- Slave fields can not depend on other slave fields
"""
if isinstance(depends_on, str):
depends_on = (depends_on,)
for dep in depends_on:
self._slave_fields[dep].append(key)
self._slave_handlers[key] = handler
self._fields[key] = handler(self._fields)
def expand_template(self, temp, keep=()):
"""Expand all constant variables in a template
Parameters
----------
temp : str
Template or name of the template which should be expanded.
keep : container (implements __contains__)
Names of the variables which should not be expanded.
Returns
-------
formatted_temp : str
Template with all variables replaced by their values, except
variables which have entries in field_values or in ``keep``.
"""
temp = self._fields.get(temp, temp)
while True:
stop = True
for name in self._fmt_pattern.findall(temp):
if (name in keep) or (self._field_values.get(name, False)):
pass
else:
temp = temp.replace('{%s}' % name, self._fields[name])
stop = False
if stop:
break
return temp
def find_keys(self, temp, root=True):
"""Find all terminal field names that are relevant for a template.
Parameters
----------
temp : str
Template (or field name) for which to find terminal field names.
root : bool
Include "root" if present (default True).
Returns
-------
keys : list
All terminal field names that are relevant for formatting ``temp``.
"""
if temp in self._terminal_fields:
return [temp]
if temp in self._compound_members:
temporary_keys = list(self._compound_members[temp])
else:
temp = self._fields.get(temp, temp)
temporary_keys = self._fmt_pattern.findall(temp)
keys = []
while temporary_keys:
key = temporary_keys.pop(0)
if key == 'root':
if root:
keys.append('root')
elif key in self._terminal_fields:
keys.append(key)
else:
keys.extend(self.find_keys(key, root))
# remove duplicates
return list(dict.fromkeys(keys))
def format(self, string, vmatch=True, **kwargs):
"""Format a string (i.e., replace any '{xxx}' fields with their values)
Parameters
----------
string : str
Template string.
vmatch : bool
For fields with known names, only allow existing field names.
others :
State parameters.
Returns
-------
formatted_string : str
The template temp formatted with current state values.
"""
self.set(match=vmatch, **kwargs)
while self._fmt_pattern.search(string):
string = string.format(**self._fields)
return string
def get(self, temp, **state):
return self.format('{%s}' % temp, **state)
def _get_rel(self, temp, start):
"Get the path of ``temp`` relative to ``start`` (both field names)"
abs_ = self.get(temp)
start_ = self.get(start)
return os.path.relpath(abs_, start_)
def get_field_values(self, field, exclude=()):
"""Find values for a field taking into account exclusion
Parameters
----------
field : str
Field for which to find values.
exclude : list of str
Exclude these values.
"""
values = self._field_values[field]
if isinstance(exclude, str):
exclude = (exclude,)
if exclude:
values = [v for v in values if v not in exclude]
else:
values = list(values)
return values
def iter(self, fields, exclude=None, values=None, progress_bar=None, **constants):
"""
Cycle the experiment's state through all values on the given fields
Parameters
----------
fields : sequence | str
Field(s) over which should be iterated.
exclude : dict {str: iterator over str}
Exclude values from iteration (``{field: values_to_exclude}``).
values : dict {str: iterator over str}
Fields with custom values to iterate over (instead of the
corresponding field values) with {name: (sequence of values)}
entries.
progress_bar : str
Message to show in the progress bar.
...
Fields with constant values throughout the iteration.
"""
if isinstance(fields, str):
fields = (fields,)
yield_str = True
else:
yield_str = False
# find actual fields to iterate over:
iter_fields = []
for field in fields:
if field in constants:
continue
iter_fields.extend(f for f in self.find_keys(field) if f not in constants)
# check values and exclude
if values:
bad = set(values).difference(iter_fields)
if bad:
raise ValueError(f"values={values!r}: keys that are not iterated over ({', '.join(bad)})")
else:
values = {}
if exclude:
bad = set(exclude).difference(iter_fields)
if bad:
raise ValueError(f"exclude={exclude!r}: keys that are not iterated over ({', '.join(bad)})")
else:
exclude = {}
# set constants (before .get_field_values() call)
self.set(**constants)
# gather values to iterate over
v_lists = []
for field in iter_fields:
if field in values:
v_lists.append(as_sequence(values[field]))
else:
exclude_ = exclude.get(field, None)
v_lists.append(self.get_field_values(field, exclude_))
if len(v_lists):
n = reduce(operator.mul, map(len, v_lists))
with self._temporary_state:
disable = progress_bar is None or CONFIG['tqdm']
for v_list in tqdm(product(*v_lists), progress_bar, n, disable=disable):
self._restore_state(discard_tip=False)
self.set(**dict(zip(iter_fields, v_list)))
if yield_str:
yield self.get(fields[0])
else:
yield tuple(self.get(f) for f in fields)
else:
yield ()
def iter_temp(self, temp, exclude=None, values={}, **constants):
"""
Iterate through all paths conforming to a template given in ``temp``.
Parameters
----------
temp : str
Name of a template in the MneExperiment.templates dictionary, or
a path template with variables indicated as in ``'{var_name}'``
"""
# if the name is an existing template, retrieve it
temp = self.expand_template(temp, values.keys())
# find variables for iteration
variables = set(self._fmt_pattern.findall(temp))
variables.difference_update(constants)
for _ in self.iter(variables, exclude, values, **constants):
path = temp.format(**self._fields)
yield path
def _partial(self, temp, skip=()):
"Format a template while leaving some slots unfilled"
skip = set(skip)
fields = self._fields.copy()
fields.update({k: '{%s}' % k for k in skip})
string = '{%s}' % temp
while set(self._fmt_pattern.findall(string)).difference(skip):
string = string.format(**fields)
return string
def _copy_state(self):
"""Copy of the state that can be used with ``._restore_state()``"""
return self._fields.copy(), self._field_values.copy()
def _restore_state(self, state=-1, discard_tip=True):
"""Restore a previously stored state
Parameters
----------
state : int
Index of the state which to restore (specified as index into a
list of stored states, i.e., negative values access recently
stored states).
discard_tip : bool
Discard the relevant state after restoring it. All states stored
later are discarded either way.
"""
if isinstance(state, int):
s1 = s2 = state
else:
s1, s2 = state
self._fields.restore_state(s1, discard_tip)
self._field_values.restore_state(s2, discard_tip)
def reset(self):
"""Reset all field values to the state at initialization
This function can be used in cases where the same MneExperiment instance
is used to perform multiple independent operations, where parameters set
during one operation should not affect the next operation.
"""
self._restore_state(0, False)
def set(self, match=True, allow_asterisk=False, **state):
"""Set the value of one or more fields.
Parameters
----------
match : bool
For fields with pre-defined values, only allow valid values (default
``True``).
allow_asterisk : bool
If a value contains ``'*'``, set the value without the normal value
evaluation and checking mechanisms (default ``False``).
... :
Fields and values to set. Invalid fields raise a KeyError. Unless
match == False, Invalid values raise a ValueError.
"""
if not state:
return
# expand compounds
if state.pop('expand_compounds', True):
for k in list(state):
if k in self._compound_members:
fields = self._compound_members[k]
v = state.pop(k)
values = v.split(' ')
for i, field in enumerate(fields):
field_values = self._field_values[field]
vi = values[i] if len(values) > i else None
if vi in field_values:
continue
elif '' in field_values:
values.insert(i, '')
else:
raise ValueError(f"{k}={v!r}")
if len(values) != len(fields):
raise ValueError(f"{k}={v!r}")
state.update(zip(fields, values))
handled_state = {} # fields with special set handlers
for k in list(state):
v = state[k]
if k not in self._fields:
raise TypeError(f"{k}={v!r}: No template named {k!r}")
elif v is None:
state.pop(k)
continue
elif k in self._set_handlers:
handled_state[k] = self._set_handlers[k](state.pop(k))
continue
elif not isinstance(v, str):
raise TypeError(f"{k}={v!r}: Values have to be strings")
elif '*' in v and allow_asterisk:
continue
# eval values
eval_handlers = self._eval_handlers[k]
if eval_handlers:
for handler in eval_handlers:
try:
v = handler(v)
except ValueError:
if match:
raise
if not isinstance(v, str):
raise RuntimeError(f"Invalid conversion from handler {handler}: {k}={v!r}")
state[k] = v
elif match and k in self._field_values and v not in self._field_values[k]:
matches = difflib.get_close_matches(v, self._field_values[k], 1)
if matches:
alt = f"Did you mean {matches[0]!r}? "
else:
alt = ''
raise ValueError(f"{k}={v!r}. {alt}To see all valid values use e.show_fields(); To set a non-existent value, use e.set({k}={v!r}, match=False).")
self._fields.update(state)
# fields depending on changes in other fields
slave_state = {}
for state_key in set(state).union(handled_state).intersection(self._slave_fields):
for slave_key in self._slave_fields[state_key]:
if slave_key not in slave_state:
v = self._slave_handlers[slave_key](self._fields)
if v is not None:
slave_state[slave_key] = v
self._fields.update(slave_state)
# call post_set handlers
for k, v in chain(state.items(), handled_state.items(), slave_state.items()):
for handler in self._post_set_handlers[k]:
handler(k, v)
def show_fields(self, str_out=False):
"""
Generate a table for all iterable fields and ther values.
Parameters
----------
str_out : bool
Return the table as a string (instead of printing it).
"""
lines = []
for key in self._field_values:
values = list(self._field_values[key])
line = f'{key}:'
head_len = len(line) + 1
while values:
v = repr(values.pop(0))
if values:
v += ','
if len(v) < 80 - head_len:
line += ' ' + v
else:
lines.append(line)
line = ' ' * head_len + v
if not values:
lines.append(line)
table = '\n'.join(lines)
if str_out:
return table
else:
print(table)
def show_state(self, temp=None, empty=False, hide=()):
"""List all top-level fields and their values
(Top-level fields are fields whose values do not contain templates)
Parameters
----------
temp : None | str
Only show variables relevant to this template.
empty : bool
Show empty variables (items whose value is the empty string '').
hide : collection of str
State variables to hide.
Returns
-------
state : Table
Table of (relevant) variables and their values.
"""
table = fmtxt.Table('lll')
table.cells('Key', '*', 'Value')
table.caption('*: Value is modified from initialization state.')
table.midrule()
if temp is None:
keys = chain(self._repr_kwargs, self._repr_kwargs_optional)
else:
keys = self.find_keys(temp)
for k in sorted(keys):
if k in hide:
continue
v = self._fields[k]
if v != self._fields.get_stored(k, level=0):
mod = '*'
else:
mod = ''
if empty or mod or v:
table.cells(k, mod, repr(v))
return table
def show_tree(self, root='root', fields=None):
"""
Print a tree of the filehierarchy implicit in the templates
Parameters
----------
root : str
Name of the root template (e.g., 'besa-root').
fields : list of str
Which fields to include in the tree (default is all).
"""
if fields is None:
fields = self._fields
else:
# find all implied fields
new_fields = set(fields)
fields = {}
while new_fields:
k = new_fields.pop()
fields[k] = v = self._fields[k]
new_fields.update([f for f in self._fmt_pattern.findall(v) if f not in fields])
tree = {'.': self.get(root)}
root_temp = '{%s}' % root
for k, v in fields.items():
if str(v).startswith(root_temp):
tree[k] = {'.': v.replace(root_temp, '')}
_etree_expand(tree, fields)
nodes = _etree_node_repr(tree, root)
name_len = max(len(n) for n, _ in nodes)
path_len = max(len(p) for _, p in nodes)
pad = ' ' * (80 - name_len - path_len)
print('\n'.join(n.ljust(name_len) + pad + p.ljust(path_len) for n, p in nodes))
def _store_state(self):
"""Store the current state
See also
--------
._restore_state() : restore a previously stored state
"""
self._fields.store_state()
self._field_values.store_state()
@LazyProperty
def _temporary_state(self):
return _TempStateController(self)
def _update_compound(self, key):
items = [self.get(k) for k in self._compound_members[key]]
self.set(**{key: compound(items)}, expand_compounds=False)
def _update_compounds(self, key, _):
for compound in self._compounds[key]:
self._update_compound(compound)
class FileTree(TreeModel):
""":class:`TreeModel` subclass for a file system hierarchy"""
_repr_args = ('root',)
_safe_delete = 'root' # directory from which to rm without warning
def __init__(self, **state):
TreeModel.__init__(self, **state)
self._make_handlers = {}
self._cache_handlers = {}
self._register_field('root', eval_handler=self._eval_root)
def _bind_cache(self, key, handler):
"""Bind a cache function to a ``*-file`` key
The cache function is called every time the file name is retrieved and
should recreate the file if it is outdated.
The cache function can return the filename of the created file since
it is called every time the specific file is requested. Note that this
causes problems for ``glob()``.
"""
if key in self._cache_handlers:
raise RuntimeError(f"Cache handler for {key!r} already defined")
elif key in self._make_handlers:
raise RuntimeError(f"Already defined make handler for {key!r}")
self._cache_handlers[key] = handler
def _bind_make(self, key, handler):
"""Bind a make function to a ``*-file`` key
The make function is called only when the file name is retrieved and
the file does not exist.
"""
if key in self._cache_handlers:
raise RuntimeError(f"Already defined cache handler for {key!r}")
elif key in self._make_handlers:
raise RuntimeError(f"Make handler for {key!r} already defined")
self._make_handlers[key] = handler
@staticmethod
def _eval_root(root):
root = os.path.abspath(os.path.expanduser(root))
if root != '':
root = os.path.normpath(root)
return root
def get(self, temp, fmatch=False, vmatch=True, match=True, mkdir=False,
make=False, **kwargs):
"""
Retrieve a formatted template
With match=True, '*' are expanded to match a file,
and if there is not a unique match, an error is raised. With
mkdir=True, the directory containing the file is created if it does not
exist.
Parameters
----------
temp : str
Name of the requested template.
fmatch : bool
"File-match": If the template contains asterisk ('*'), use glob to
fill it in. An IOError is raised if the pattern does not match
exactly one file.
vmatch : bool
"Value match": Require existence of the assigned value (only
applies for fields with stored values).
match : bool
Do any matching (i.e., match=False sets fmatch as well as vmatch
to False).
mkdir : bool
If the directory containing the file does not exist, create it.
make : bool
If a requested file does not exists, make it if possible.
kwargs :
Set any state values.
"""
if not match:
fmatch = vmatch = False
path = TreeModel.get(self, temp, vmatch=vmatch, **kwargs)
path = os.path.expanduser(path)
# assert the presence of the file
if fmatch and ('*' in path):
paths = glob(path)
if len(paths) == 0 and make and temp in self._make_handlers:
self._make_handlers[temp]()
paths = glob(path)
if len(paths) == 1:
path = paths[0]
elif len(paths) > 1:
raise IOError(f"More than one files match {path!r}: {paths}")
else:
raise IOError(f"No file found for {path!r}")
# create the directory
if mkdir:
if temp.endswith('dir'):
dirname = path
else:
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
root = self.get('root')
if root == '':
raise IOError("Prevented from creating directories because root is not set")
elif os.path.exists(root):
os.makedirs(dirname)
else:
raise IOError(f"Prevented from creating directories because root does not exist: {root!r}")
# make the file
if make:
if temp in self._cache_handlers:
path = self._cache_handlers[temp]() or path
elif not os.path.exists(path):
if temp in self._make_handlers:
with self._temporary_state:
self._make_handlers[temp]()
elif temp.endswith('-dir'):
os.makedirs(path)
else:
raise RuntimeError(f"No make handler for {temp!r}")
return path
def glob(self, temp, inclusive=False, **state):
"""Find all files matching a certain pattern
Parameters
----------
temp : str
Name of the path template for which to find files.
inclusive : bool
Treat all unspecified fields as ``*`` (default False).
See Also
--------
copy : Copy files.
move : Move files.
rm : Delete files.
Notes
-----
State parameters can include an asterisk ('*') to match multiple files.
Uses :func:`glob.glob`.
"""
pattern = self._glob_pattern(temp, inclusive, **state)
return glob(pattern)
def _glob_pattern(self, temp, inclusive=False, **state):
if inclusive:
for key in self._terminal_fields:
if key in state or key == 'root':
continue
elif key in self._field_values and len(self._field_values[key]) == 1:
continue
state[key] = '*'
with self._temporary_state:
pattern = self.get(temp, allow_asterisk=True, **state)
return pattern
def _find_files_with_target(self, action, temp, dst_root, inclusive, overwrite, confirm, state):
if dst_root is None:
if 'root' not in state:
raise TypeError("Need to specify at least one of root and dst_root")
dst_root = self.get('root')
src_filenames = self.glob(temp, inclusive, **state)
n = len(src_filenames)
if n == 0:
print("No files matching pattern.")
return None, None
root = self.get('root')
errors = [filename for filename in src_filenames if not filename.startswith(root)]
if errors:
raise ValueError(f"{len(errors)} files are not located in the root directory ({errors[0]}, ...)")
rel_filenames = {src: os.path.relpath(src, root) for src in src_filenames}
dst_filenames = {src: os.path.join(dst_root, filename) for src, filename in rel_filenames.items()}
if overwrite is not True:
exist = [src for src, dst in dst_filenames.items() if os.path.exists(dst)]
if exist:
if overwrite is None:
raise ValueError(f"{len(exist)} of {n} files already exist")
elif overwrite is False:
if len(exist) == n:
print(f"All {n} files already exist.")
return None, None
n -= len(exist)
for src in exist:
src_filenames.remove(src)
else:
raise TypeError(f"overwrite={overwrite!r}")
if not confirm:
print(f"{action} {self.get('root')} -> {dst_root}:")
for src in src_filenames:
print(" " + rel_filenames[src])
if input(f"{action} {n} files? (confirm with 'yes'): ") != 'yes':
return None, None
return src_filenames, [dst_filenames[src] for src in src_filenames]
def copy(self, temp, dst_root=None, inclusive=False, confirm=False, overwrite=None, **state):
"""Copy files to a different root folder
Parameters
----------
temp : str
Name of the path template for which to find files.
dst_root : str
Path to the root to which the files should be moved. If the target
is the experiment's root directory, specify ``root`` as the source
root and leave ``dst_root`` unspecified.
inclusive : bool
Treat all unspecified fields as ``*`` (default False).
confirm : bool
Skip asking for confirmation before copying the files.
overwrite : bool
``True`` to overwrite target files if they already exist. ``False``
to quietly keep exising files.
See Also
--------
glob : Find all files matching a template.
move : Move files.
rm : Delete files.
make_copy : Copy a file by substituting a field
Notes
-----
State parameters can include an asterisk ('*') to match multiple files.
"""
src_filenames, dst_filenames = self._find_files_with_target('Copy', temp, dst_root, inclusive, overwrite, confirm, state)
if not src_filenames:
return
for src, dst in tqdm(zip(src_filenames, dst_filenames), "Copying", len(src_filenames)):
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy(src, dst)
def move(self, temp, dst_root=None, inclusive=False, confirm=False, overwrite=None, **state):
"""Move files to a different root folder
Parameters
----------
temp : str
Name of the path template for which to find files.
dst_root : str
Path to the root to which the files should be moved. If the target
is the experiment's root directory, specify ``root`` as the source
root and leave ``dst_root`` unspecified.
inclusive : bool
Treat all unspecified fields as ``*`` (default False).
confirm : bool
Skip asking for confirmation before moving the files.
overwrite : bool
Overwrite target files if they already exist.
See Also
--------
copy : Copy files.
glob : Find all files matching a template.
rm : Delete files.
Notes
-----
State parameters can include an asterisk ('*') to match multiple files.
"""
if overwrite is False:
raise ValueError(f"overwrite={overwrite!r}")
src_filenames, dst_filenames = self._find_files_with_target('Move', temp, dst_root, inclusive, overwrite, confirm, state)
if not src_filenames:
return
for src, dst in tqdm(zip(src_filenames, dst_filenames), "Moving", len(src_filenames)):
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
os.rename(src, dst)
def show_file_status(self, temp, row, col=None, count=True, present='time',
absent='-', **kwargs):
"""Compile a table about the existence of files
Parameters
----------
temp : str
The name of the path template for the files to examine.
row : str
Field over which to alternate rows.
col : None | str
Field over which to alternate columns (default is a single column).
count : bool
Add a column with a number for each line (default True).
present : 'time' | 'date' | str
String to display when a given file is present. 'time' to use last
modification date and time (default); 'date' for date only.
absent : str
String to display when a given file is absent (default '-').
others :
``self.iter()`` kwargs.
"""
if col is None:
col_v = (None,)
ncol = 1
else:
col_v = self.get_field_values(col)
ncol = len(col_v)
# table header
table = fmtxt.Table('r' * bool(count) + 'l' * (ncol + 1))
if count:
table.cell()
table.cell(row)
if col is None:
table.cell(temp)
else:
for name in col_v:
table.cell(name)
table.midrule()
# body
for i, row_v in enumerate(self.iter(row, **kwargs)):
if count:
table.cell(i)
table.cell(row_v)
for v in col_v:
if v is None:
path = self.get(temp)
else:
path = self.get(temp, **{col: v})
if os.path.exists(path):
if present == 'time':
r = strftime('%x %X', localtime(os.path.getmtime(path)))
elif present == 'date':
r = strftime('%x', localtime(os.path.getmtime(path)))
else:
r = present
else:
r = absent
table.cell(r)
return table
def show_file_status_mult(self, files, fields, count=True, present='X',
absent='-', **kwargs):
"""
Compile a table about the existence of multiple files
Parameters
----------
files : str | list of str
The names of the path templates whose existence to list.
fields : str | list of str
The names of the variables for which to list files (i.e., for each
unique combination of ``fields``, list ``files``).
count : bool
Add a column with a number for each subject.
present : str
String to display when a given file is present.
absent : str
String to display when a given file is absent.
Examples
--------
>>> e.show_file_status_mult(['raw-file', 'trans-file', 'fwd-file'],
... 'subject')
Subject Raw-file Trans-file Fwd-file
-----------------------------------------------
0 AD001 X X X
1 AD002 X X X
2 AD003 X X X
...
"""
if not isinstance(files, (list, tuple)):
files = [files]
if not isinstance(fields, (list, tuple)):
fields = [fields]
ncol = (len(fields) + len(files))
table = fmtxt.Table('r' * bool(count) + 'l' * ncol)
if count:
table.cell()
for name in fields + files:
table.cell(name.capitalize())
table.midrule()
for i, _ in enumerate(self.iter(fields, **kwargs)):
if count:
table.cell(i)
for field in fields:
table.cell(self.get(field))
for temp in files:
path = self.get(temp)
if os.path.exists(path):
table.cell(present)
else:
table.cell(absent)
return table
def show_in_finder(self, temp, **kwargs):
"Reveal the file corresponding to the ``temp`` template in the Finder."
fname = self.get(temp, **kwargs)
subprocess.call(["open", "-R", fname])
def rename(self, old, new, exclude=False):
"""Rename all files corresponding to a pattern (or template)
Parameters
----------
old : str
Template for the files to be renamed. Can interpret '*', but will
raise an error in cases where more than one file fit the pattern.
new : str
Template for the new names.
Examples
--------
The following command will collect a specific file for each subject and
place it in a common folder:
>>> e.rename('info-file', '/some_other_place/{subject}_info.txt')
"""
new = self.expand_template(new)
files = []
for old_name in self.iter_temp(old, exclude):
if '*' in old_name:
matches = glob(old_name)
if len(matches) == 1:
old_name = matches[0]
elif len(matches) > 1:
err = ("Several files fit the pattern %r" % old_name)
raise ValueError(err)
if os.path.exists(old_name):
new_name = self.format(new)
files.append((old_name, new_name))
if not files:
print("No files found for %r" % old)
return
old_pf = os.path.commonprefix([pair[0] for pair in files])
new_pf = os.path.commonprefix([pair[1] for pair in files])
n_pf_old = len(old_pf)
n_pf_new = len(new_pf)
table = fmtxt.Table('lll')
table.cells('Old', '', 'New')
table.midrule()
table.caption("%s -> %s" % (old_pf, new_pf))
for old, new in files:
table.cells(old[n_pf_old:], '->', new[n_pf_new:])
print(table)
msg = "Rename %s files (confirm with 'yes')? " % len(files)
if input(msg) == 'yes':
for old, new in files:
dirname = os.path.dirname(new)
if not os.path.exists(dirname):
os.makedirs(dirname)
os.rename(old, new)
def rename_field(self, temp, field, old, new, exclude=False, **kwargs):
"""Change the value of one field in paths corresponding to a template
Parameters
----------
temp : str
Template name.
field : str
Field to change.
old : str
Old value.
new : str
New value.
kwargs :
``self.iter_temp`` arguments.
"""
items = [] # (tag, src, dst)
kwargs[field] = old
dst_kwa = {field: new}
for src in self.iter_temp(temp, exclude, ** kwargs):
dst = self.get(temp, **dst_kwa)
if os.path.exists(src):
if os.path.exists(dst):
tag = 'o'
else:
tag = ' '
else:
tag = 'm'
items.append((tag, src, dst))
src_prefix = os.path.commonprefix(tuple(item[1] for item in items))
dst_prefix = os.path.commonprefix(tuple(item[2] for item in items))
src_crop = len(src_prefix)
dst_crop = len(dst_prefix)
# print info
if src_prefix == dst_prefix:
lines = ['in ' + src_prefix, '']
else:
lines = [src_prefix, '->' + dst_prefix, '']
for tag, src, dst in items:
lines.append('%s %s -> %s' % (tag, src[src_crop:], dst[dst_crop:]))
lines.append('')
msg = 'Legend m: source is missing; o: will overwite a file'
lines.append(msg)
print('\n'.join(lines))
rename = tuple(item for item in items if item[0] == ' ')
if not rename:
return
msg = "Rename %i files (confirm with 'yes')? " % len(rename)
if input(msg) != 'yes':
return
for _, src, dst in rename:
os.rename(src, dst)
print("Done")
def rm(self, temp, inclusive=False, confirm=False, **constants):
"""Remove all files corresponding to a template
Asks for confirmation before deleting anything. Uses glob, so
individual templates can be set to '*'.
Parameters
----------
temp : str
Name of the path template for which to find and delete files.
inclusive : bool
Treat all unspecified fields as ``*`` (default False).
confirm : bool
Confirm removal of the selected files. If False (default) the user
is prompted for confirmation with a list of files; if True, the
files are removed immediately.
**others** :
Set field values (values can be '*' to match all).
See Also
--------
glob : Find all files matching a template.
copy : Copy files
move : Move files.
"""
files = self.glob(temp, inclusive, **constants)
secondary_files = []
for stemp in self._secondary_cache[temp]:
secondary_files.extend(self.glob(stemp, inclusive, **constants))
options = {'yes': 'delete files', 'no': "don't delete files (default)"}
if files or secondary_files:
print("root: %s\n" % self.get('root'))
print('\n'.join(self._remove_root(files)))
is_dir = [os.path.isdir(path) for path in files]
# Confirm deletion
if not confirm:
n_dirs = sum(is_dir)
n_files = len(files) - n_dirs
desc = []
if n_dirs:
desc.append(n_of(n_dirs, 'directory'))
if n_files:
desc.append(n_of(n_files, 'file'))
if secondary_files:
desc.append(n_of(len(secondary_files), 'secondary file'))
info = f"Delete {enumeration(desc)}?"
# Confirm if deleting files not in managed space
safe_root = self.get(self._safe_delete)
n_unsafe = len(files) - sum(path.startswith(safe_root) for path in files)
if n_unsafe:
info += f"\n!\n! {plural('item', n_unsafe)} outside of {self._safe_delete}\n!"
if ask(info, options, allow_empty=True) != 'yes':
print('aborting...')
return
print('deleting...')
dirs = (p for p, isdir in zip(files, is_dir) if isdir)
files = (p for p, isdir in zip(files, is_dir) if not isdir)
for path in dirs:
shutil.rmtree(path)
for path in chain(files, secondary_files):
os.remove(path)
else:
print("No files found for %r" % temp)
def _remove_root(self, paths):
root = self.get('root')
root_len = len(root)
return (path[root_len:] if path.startswith(root) else path
for path in paths)
|
import sys
from CodeAnalysis.Syntax.syntaxkind import SyntaxKind
import colorama, termcolor
colorama.init()
class Parser:
def __init__(self, token_list, debug=False):
self.token_list = token_list
self.position = -1
self.debug = debug
# ----
self.symbols = set()
self.labels_declared = set()
self.labels_gotoed = set()
# ----
self.cur_token = None
self.advance()
def check_token(self, kind):
"""
Checks whether passed token kind matches the current token kind.
"""
return kind == self.cur_token.kind
def peek(self, kind):
"""
Checks whether passed token kind matches the next token.
"""
return self.token_list[self.position + 1]
def match(self, kind):
"""
Matches the current token kind to the passed token kind.
Exceptions: Raises an error if the token kind doesn't match.
"""
if not self.check_token(kind):
self.abort("Expected " + kind.name + ", got " + self.cur_token.kind.name)
self.advance()
def advance(self, offset=0):
"""
Sets the current token to the next token.
"""
self.position += 1 + offset
if self.position < len(self.token_list):
self.cur_token = self.token_list[self.position]
def is_comparison_operator(self):
"""
Checks whether the current token is a comparison operator.
"""
return (
self.check_token(SyntaxKind.GreaterToken) or self.check_token(SyntaxKind.GreaterOrEqualsToken) or
self.check_token(SyntaxKind.LessToken) or self.check_token(SyntaxKind.LessOrEqualsToken) or
self.check_token(SyntaxKind.EqualsEqualsToken) or self.check_token(SyntaxKind.BangEqualsToken)
)
def abort(self, message):
"""
Aborts the program with the passed message.
"""
print(termcolor.colored(f"Error: {message}", "red"))
sys.exit()
# Production rules.
def program(self):
"""
Program entry point
"""
if self.debug:
print("Program")
while not self.check_token(SyntaxKind.EndOfFileToken):
self.statement()
for label in self.labels_gotoed:
if label not in self.labels_declared:
self.abort("Attempting to goto to undeclared label: " + label)
def statement(self):
"""
Statements Parser
"""
if self.check_token(SyntaxKind.PrintKeyword):
"""
Print Statement
Syntax:
1. print(StringKeyword)
2. print(EXPRESSION)
"""
if self.debug:
print("Print-Statement")
self.advance()
# Body
# ----
self.match(SyntaxKind.OpenParenthesisToken)
if self.check_token(SyntaxKind.StringToken):
self.advance()
else:
self.expression()
self.match(SyntaxKind.CloseParenthesisToken)
elif self.check_token(SyntaxKind.IfKeyword):
"""
If/If-else Statement
Syntax:
1. if(EXPRESSION)
{
STATEMENTS
}
2. if (COMPARISON)
{
STATEMENTS
} else {
STATEMENTS
}
3. if (COMPARISON)
{
STATEMENTS
} else if (COMPARISON)
{
STATEMENTS
} else {
STATEMENTS
}
"""
if self.debug:
print("If-Statement")
self.advance()
# Comparison
# ----
self.match(SyntaxKind.OpenParenthesisToken)
self.comparison()
self.match(SyntaxKind.CloseParenthesisToken)
# Body
# ----
self.match(SyntaxKind.OpenBraceToken)
while not self.check_token(SyntaxKind.CloseBraceToken):
self.statement()
self.match(SyntaxKind.CloseBraceToken)
# Extensions
# ----
while self.check_token(SyntaxKind.ElseKeyword):
self.advance()
if self.check_token(SyntaxKind.IfKeyword):
if self.debug:
print("Else-If-Statement")
self.advance()
# Comparison
# ----
self.match(SyntaxKind.OpenParenthesisToken)
self.comparison()
self.match(SyntaxKind.CloseParenthesisToken)
# Body
# ----
self.match(SyntaxKind.OpenBraceToken)
while not self.check_token(SyntaxKind.CloseBraceToken):
self.statement()
self.match(SyntaxKind.CloseBraceToken)
elif self.check_token(SyntaxKind.OpenBraceToken):
if self.debug:
print("Else-Statement")
self.advance()
# Body
# ----
while not self.check_token(SyntaxKind.CloseBraceToken):
self.statement()
self.match(SyntaxKind.CloseBraceToken)
break
elif self.check_token(SyntaxKind.WhileKeyword):
"""
While Statement
Syntax:
1. while(EXPRESSION)
{
STATEMENTS
}
"""
if self.debug:
print("While-Statement")
self.advance()
# Comparison
# ----
self.match(SyntaxKind.OpenParenthesisToken)
self.comparison()
self.match(SyntaxKind.CloseParenthesisToken)
# Body
# ----
self.match(SyntaxKind.OpenBraceToken)
while not self.check_token(SyntaxKind.CloseBraceToken):
self.statement()
self.match(SyntaxKind.CloseBraceToken)
elif self.check_token(SyntaxKind.ForKeyword):
"""
For Statement
Syntax:
1. for(INITIALIZATION; CONDITION; INCREMENT)
{
STATEMENTS
}
"""
if self.debug:
print("For-Statement")
self.advance()
# Initialization, Condition, Increment
# ----
self.match(SyntaxKind.OpenParenthesisToken)
self.statement()
self.match(SyntaxKind.SemiToken)
self.comparison()
self.match(SyntaxKind.SemiToken)
self.expression()
self.match(SyntaxKind.CloseParenthesisToken)
# Body
# ----
self.match(SyntaxKind.OpenBraceToken)
while not self.check_token(SyntaxKind.CloseBraceToken):
self.statement()
self.match(SyntaxKind.CloseBraceToken)
elif self.check_token(SyntaxKind.LabelKeyword):
"""
Label Statement
Syntax:
1. label IDENTIFIER:
"""
if self.debug:
print("Label-Statement")
self.advance()
# ----
if self.cur_token.value in self.labels_declared:
self.abort(f"Label already exists: {self.cur_token.value}")
self.labels_declared.add(self.cur_token.value)
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
self.match(SyntaxKind.ColonToken)
elif self.check_token(SyntaxKind.GotoKeyword):
"""
Goto Statement
Syntax:
1. goto IDENTIFIER
"""
if self.debug:
print("Goto-Statement")
self.advance()
# ----
self.labels_gotoed.add(self.cur_token.value)
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
elif self.check_token(SyntaxKind.IntKeyword):
"""
Integer Declaration
Syntax:
1. int IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Int-Declaration")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
# Optional
if self.check_token(SyntaxKind.EqualsToken):
self.match(SyntaxKind.EqualsToken)
self.expression()
elif self.check_token(SyntaxKind.CharKeyword):
"""
Char Declaration
Syntax:
1. char IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Char-Declaration")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
# Optional
if self.check_token(SyntaxKind.EqualsToken):
self.match(SyntaxKind.EqualsToken)
self.expression()
elif self.check_token(SyntaxKind.FloatKeyword):
"""
Float Declaration
Syntax:
1. float IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Float-Declaration")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
# Optional
if self.check_token(SyntaxKind.EqualsToken):
self.match(SyntaxKind.EqualsToken)
self.expression()
elif self.check_token(SyntaxKind.StringKeyword):
"""
String Declaration
Syntax:
1. string IDENTIFIER = EXPRESSION
"""
if self.debug:
print("String-Declaration")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
# Optional
if self.check_token(SyntaxKind.EqualsToken):
self.match(SyntaxKind.EqualsToken)
self.expression()
elif self.check_token(SyntaxKind.BoolKeyword):
"""
Boolean Declaration
Syntax:
1. bool IDENTIFIER = true
2. bool IDENTIFIER = false
3. bool IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Bool-Declaration")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
# Optional
if self.check_token(SyntaxKind.EqualsToken):
self.match(SyntaxKind.EqualsToken)
if self.check_token(SyntaxKind.TrueKeyword):
self.match(SyntaxKind.TrueKeyword)
elif self.check_token(SyntaxKind.FalseKeyword):
self.match(SyntaxKind.FalseKeyword)
else:
self.expression()
elif self.check_token(SyntaxKind.DoubleKeyword):
"""
Double Declaration
Syntax:
1. double IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Double-Declaration")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
# Optional
if self.check_token(SyntaxKind.EqualsToken):
self.match(SyntaxKind.EqualsToken)
self.expression()
elif self.check_token(SyntaxKind.LetKeyword):
"""
Let Statement
Syntax:
1. let IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Let-Statement")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
self.match(SyntaxKind.EqualsToken)
self.expression()
elif self.check_token(SyntaxKind.VarKeyword):
"""
Variable Declaration
Syntax:
1. var IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Var-Statement")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
self.match(SyntaxKind.EqualsToken)
self.expression()
# elif self.check_token(SyntaxKind.InputKeyword):
# """
# Input Statement
# Syntax:
# 1. input(IDENTIFIER)
# """
# if self.debug:
# print("Input-Statement")
# self.advance()
# # Body
# # ----
# self.match(SyntaxKind.OpenParenthesisToken)
# if self.cur_token.value not in self.symbols:
# self.symbols.add(self.cur_token.value)
# self.match(SyntaxKind.IdentifierToken)
# self.match(SyntaxKind.CloseParenthesisToken)
elif self.check_token(SyntaxKind.IdentifierToken):
"""
Assignment Statement
Syntax:
1. IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Assignment-Statement")
# ----
if self.cur_token.value not in self.symbols:
self.abort(f"The name '{self.cur_token.value}' does not exist in the current convalue")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
self.match(SyntaxKind.EqualsToken)
self.expression()
else:
self.abort(f"Invalid statement at {self.cur_token.kind.name} {self.cur_token.value if self.cur_token.value is not None else ""}")
def comparison(self):
"""
Comparison
Syntax:
1. EXPRESSION
2. EXPRESSION COMPARISON EXPRESSION
3. EXPRESSION COMPARISON EXPRESSION...
"""
if self.debug:
print("Comparison")
self.expression()
if self.is_comparison_operator():
self.advance()
self.expression()
while self.is_comparison_operator():
self.advance()
self.expression()
def expression(self):
"""
Expression
Syntax:
1. TERM
2. TERM + TERM
3. TERM - TERM
"""
print("Expression")
self.term()
while self.check_token(SyntaxKind.PlusToken) or self.check_token(SyntaxKind.MinusToken):
self.advance()
self.term()
def term(self):
"""
Term
Syntax:
1. FACTOR
2. FACTOR * FACTOR
3. FACTOR / FACTOR
"""
if self.debug:
print("Term")
self.unary()
while self.check_token(SyntaxKind.StartToken) or self.check_token(SyntaxKind.SlashToken):
self.advance()
self.unary()
def unary(self):
"""
Unary
Syntax:
1. PlusToken FACTOR
2. MinusToken FACTOR
3. NOT FACTOR
4. TildeToken FACTOR
5. PlusPlusToken FACTOR
6. MinusMinusToken FACTOR
7. FACTOR
"""
# Prefix
if (self.check_token(SyntaxKind.PlusToken) or self.check_token(SyntaxKind.MinusToken) or
self.check_token(SyntaxKind.BangToken) or self.check_token(SyntaxKind.TildeToken) or
self.check_token(SyntaxKind.PlusPlusToken) or self.check_token(SyntaxKind.MinusMinusToken)):
print(f"Unary ({self.cur_token.value})")
self.advance()
self.primary()
# Postfix
if (self.check_token(SyntaxKind.PlusPlusToken) or self.check_token(SyntaxKind.MinusMinusToken)):
print(f"Unary ({self.cur_token.value})")
self.advance()
def primary(self):
"""
Primary
Syntax:
1. NUMBER
2. StringKeyword
3. IDENTIFIER
4. (EXPRESSION)
"""
if self.check_token(SyntaxKind.NumberToken) or self.check_token(SyntaxKind.StringToken):
print(f"Primary ({self.cur_token.value})")
self.advance()
elif self.check_token(SyntaxKind.IdentifierToken):
if self.cur_token.value not in self.symbols:
self.abort(f"Referencing variable before assignment: {self.cur_token.value}")
print(f"Primary ({self.cur_token.value})")
self.advance()
elif self.check_token(SyntaxKind.OpenParenthesisToken):
print("Primary (")
self.advance()
self.expression()
self.match(SyntaxKind.CloseParenthesisToken)
print(")")
elif self.check_token(SyntaxKind.InputKeyword):
print("Input")
self.advance()
self.match(SyntaxKind.OpenParenthesisToken)
if not self.check_token(SyntaxKind.CloseParenthesisToken):
self.expression()
self.match(SyntaxKind.CloseParenthesisToken)
else:
self.abort(f"Unexpected token {self.cur_token.kind} {f"at {self.cur_token.value}" if self.cur_token.value is not None else ""}")
| import sys
from CodeAnalysis.Syntax.syntaxkind import SyntaxKind
import colorama, termcolor
colorama.init()
class Parser:
def __init__(self, token_list, debug=False):
self.token_list = token_list
self.position = -1
self.debug = debug
# ----
self.symbols = set()
self.labels_declared = set()
self.labels_gotoed = set()
# ----
self.cur_token = None
self.advance()
def check_token(self, kind):
"""
Checks whether passed token kind matches the current token kind.
"""
return kind == self.cur_token.kind
def peek(self, kind):
"""
Checks whether passed token kind matches the next token.
"""
return self.token_list[self.position + 1]
def match(self, kind):
"""
Matches the current token kind to the passed token kind.
Exceptions: Raises an error if the token kind doesn't match.
"""
if not self.check_token(kind):
self.abort("Expected " + kind.name + ", got " + self.cur_token.kind.name)
self.advance()
def advance(self, offset=0):
"""
Sets the current token to the next token.
"""
self.position += 1 + offset
if self.position < len(self.token_list):
self.cur_token = self.token_list[self.position]
def is_comparison_operator(self):
"""
Checks whether the current token is a comparison operator.
"""
return (
self.check_token(SyntaxKind.GreaterToken) or self.check_token(SyntaxKind.GreaterOrEqualsToken) or
self.check_token(SyntaxKind.LessToken) or self.check_token(SyntaxKind.LessOrEqualsToken) or
self.check_token(SyntaxKind.EqualsEqualsToken) or self.check_token(SyntaxKind.BangEqualsToken)
)
def abort(self, message):
"""
Aborts the program with the passed message.
"""
print(termcolor.colored(f"Error: {message}", "red"))
sys.exit()
# Production rules.
def program(self):
"""
Program entry point
"""
if self.debug:
print("Program")
while not self.check_token(SyntaxKind.EndOfFileToken):
self.statement()
for label in self.labels_gotoed:
if label not in self.labels_declared:
self.abort("Attempting to goto to undeclared label: " + label)
def statement(self):
"""
Statements Parser
"""
if self.check_token(SyntaxKind.PrintKeyword):
"""
Print Statement
Syntax:
1. print(StringKeyword)
2. print(EXPRESSION)
"""
if self.debug:
print("Print-Statement")
self.advance()
# Body
# ----
self.match(SyntaxKind.OpenParenthesisToken)
if self.check_token(SyntaxKind.StringToken):
self.advance()
else:
self.expression()
self.match(SyntaxKind.CloseParenthesisToken)
elif self.check_token(SyntaxKind.IfKeyword):
"""
If/If-else Statement
Syntax:
1. if(EXPRESSION)
{
STATEMENTS
}
2. if (COMPARISON)
{
STATEMENTS
} else {
STATEMENTS
}
3. if (COMPARISON)
{
STATEMENTS
} else if (COMPARISON)
{
STATEMENTS
} else {
STATEMENTS
}
"""
if self.debug:
print("If-Statement")
self.advance()
# Comparison
# ----
self.match(SyntaxKind.OpenParenthesisToken)
self.comparison()
self.match(SyntaxKind.CloseParenthesisToken)
# Body
# ----
self.match(SyntaxKind.OpenBraceToken)
while not self.check_token(SyntaxKind.CloseBraceToken):
self.statement()
self.match(SyntaxKind.CloseBraceToken)
# Extensions
# ----
while self.check_token(SyntaxKind.ElseKeyword):
self.advance()
if self.check_token(SyntaxKind.IfKeyword):
if self.debug:
print("Else-If-Statement")
self.advance()
# Comparison
# ----
self.match(SyntaxKind.OpenParenthesisToken)
self.comparison()
self.match(SyntaxKind.CloseParenthesisToken)
# Body
# ----
self.match(SyntaxKind.OpenBraceToken)
while not self.check_token(SyntaxKind.CloseBraceToken):
self.statement()
self.match(SyntaxKind.CloseBraceToken)
elif self.check_token(SyntaxKind.OpenBraceToken):
if self.debug:
print("Else-Statement")
self.advance()
# Body
# ----
while not self.check_token(SyntaxKind.CloseBraceToken):
self.statement()
self.match(SyntaxKind.CloseBraceToken)
break
elif self.check_token(SyntaxKind.WhileKeyword):
"""
While Statement
Syntax:
1. while(EXPRESSION)
{
STATEMENTS
}
"""
if self.debug:
print("While-Statement")
self.advance()
# Comparison
# ----
self.match(SyntaxKind.OpenParenthesisToken)
self.comparison()
self.match(SyntaxKind.CloseParenthesisToken)
# Body
# ----
self.match(SyntaxKind.OpenBraceToken)
while not self.check_token(SyntaxKind.CloseBraceToken):
self.statement()
self.match(SyntaxKind.CloseBraceToken)
elif self.check_token(SyntaxKind.ForKeyword):
"""
For Statement
Syntax:
1. for(INITIALIZATION; CONDITION; INCREMENT)
{
STATEMENTS
}
"""
if self.debug:
print("For-Statement")
self.advance()
# Initialization, Condition, Increment
# ----
self.match(SyntaxKind.OpenParenthesisToken)
self.statement()
self.match(SyntaxKind.SemiToken)
self.comparison()
self.match(SyntaxKind.SemiToken)
self.expression()
self.match(SyntaxKind.CloseParenthesisToken)
# Body
# ----
self.match(SyntaxKind.OpenBraceToken)
while not self.check_token(SyntaxKind.CloseBraceToken):
self.statement()
self.match(SyntaxKind.CloseBraceToken)
elif self.check_token(SyntaxKind.LabelKeyword):
"""
Label Statement
Syntax:
1. label IDENTIFIER:
"""
if self.debug:
print("Label-Statement")
self.advance()
# ----
if self.cur_token.value in self.labels_declared:
self.abort(f"Label already exists: {self.cur_token.value}")
self.labels_declared.add(self.cur_token.value)
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
self.match(SyntaxKind.ColonToken)
elif self.check_token(SyntaxKind.GotoKeyword):
"""
Goto Statement
Syntax:
1. goto IDENTIFIER
"""
if self.debug:
print("Goto-Statement")
self.advance()
# ----
self.labels_gotoed.add(self.cur_token.value)
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
elif self.check_token(SyntaxKind.IntKeyword):
"""
Integer Declaration
Syntax:
1. int IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Int-Declaration")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
# Optional
if self.check_token(SyntaxKind.EqualsToken):
self.match(SyntaxKind.EqualsToken)
self.expression()
elif self.check_token(SyntaxKind.CharKeyword):
"""
Char Declaration
Syntax:
1. char IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Char-Declaration")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
# Optional
if self.check_token(SyntaxKind.EqualsToken):
self.match(SyntaxKind.EqualsToken)
self.expression()
elif self.check_token(SyntaxKind.FloatKeyword):
"""
Float Declaration
Syntax:
1. float IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Float-Declaration")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
# Optional
if self.check_token(SyntaxKind.EqualsToken):
self.match(SyntaxKind.EqualsToken)
self.expression()
elif self.check_token(SyntaxKind.StringKeyword):
"""
String Declaration
Syntax:
1. string IDENTIFIER = EXPRESSION
"""
if self.debug:
print("String-Declaration")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
# Optional
if self.check_token(SyntaxKind.EqualsToken):
self.match(SyntaxKind.EqualsToken)
self.expression()
elif self.check_token(SyntaxKind.BoolKeyword):
"""
Boolean Declaration
Syntax:
1. bool IDENTIFIER = true
2. bool IDENTIFIER = false
3. bool IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Bool-Declaration")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
# Optional
if self.check_token(SyntaxKind.EqualsToken):
self.match(SyntaxKind.EqualsToken)
if self.check_token(SyntaxKind.TrueKeyword):
self.match(SyntaxKind.TrueKeyword)
elif self.check_token(SyntaxKind.FalseKeyword):
self.match(SyntaxKind.FalseKeyword)
else:
self.expression()
elif self.check_token(SyntaxKind.DoubleKeyword):
"""
Double Declaration
Syntax:
1. double IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Double-Declaration")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
# Optional
if self.check_token(SyntaxKind.EqualsToken):
self.match(SyntaxKind.EqualsToken)
self.expression()
elif self.check_token(SyntaxKind.LetKeyword):
"""
Let Statement
Syntax:
1. let IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Let-Statement")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
else:
self.abort(f"A local variable named '{self.cur_token.value}' is already defined in this scope")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
self.match(SyntaxKind.EqualsToken)
self.expression()
elif self.check_token(SyntaxKind.VarKeyword):
"""
Variable Declaration
Syntax:
1. var IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Var-Statement")
self.advance()
# ----
if self.cur_token.value not in self.symbols:
self.symbols.add(self.cur_token.value)
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
self.match(SyntaxKind.EqualsToken)
self.expression()
# elif self.check_token(SyntaxKind.InputKeyword):
# """
# Input Statement
# Syntax:
# 1. input(IDENTIFIER)
# """
# if self.debug:
# print("Input-Statement")
# self.advance()
# # Body
# # ----
# self.match(SyntaxKind.OpenParenthesisToken)
# if self.cur_token.value not in self.symbols:
# self.symbols.add(self.cur_token.value)
# self.match(SyntaxKind.IdentifierToken)
# self.match(SyntaxKind.CloseParenthesisToken)
elif self.check_token(SyntaxKind.IdentifierToken):
"""
Assignment Statement
Syntax:
1. IDENTIFIER = EXPRESSION
"""
if self.debug:
print("Assignment-Statement")
# ----
if self.cur_token.value not in self.symbols:
self.abort(f"The name '{self.cur_token.value}' does not exist in the current convalue")
# Body
# ----
self.match(SyntaxKind.IdentifierToken)
self.match(SyntaxKind.EqualsToken)
self.expression()
else:
self.abort(f"Invalid statement at {self.cur_token.kind.name} {self.cur_token.value if self.cur_token.value is not None else ''}")
def comparison(self):
"""
Comparison
Syntax:
1. EXPRESSION
2. EXPRESSION COMPARISON EXPRESSION
3. EXPRESSION COMPARISON EXPRESSION...
"""
if self.debug:
print("Comparison")
self.expression()
if self.is_comparison_operator():
self.advance()
self.expression()
while self.is_comparison_operator():
self.advance()
self.expression()
def expression(self):
"""
Expression
Syntax:
1. TERM
2. TERM + TERM
3. TERM - TERM
"""
print("Expression")
self.term()
while self.check_token(SyntaxKind.PlusToken) or self.check_token(SyntaxKind.MinusToken):
self.advance()
self.term()
def term(self):
"""
Term
Syntax:
1. FACTOR
2. FACTOR * FACTOR
3. FACTOR / FACTOR
"""
if self.debug:
print("Term")
self.unary()
while self.check_token(SyntaxKind.StartToken) or self.check_token(SyntaxKind.SlashToken):
self.advance()
self.unary()
def unary(self):
"""
Unary
Syntax:
1. PlusToken FACTOR
2. MinusToken FACTOR
3. NOT FACTOR
4. TildeToken FACTOR
5. PlusPlusToken FACTOR
6. MinusMinusToken FACTOR
7. FACTOR
"""
# Prefix
if (self.check_token(SyntaxKind.PlusToken) or self.check_token(SyntaxKind.MinusToken) or
self.check_token(SyntaxKind.BangToken) or self.check_token(SyntaxKind.TildeToken) or
self.check_token(SyntaxKind.PlusPlusToken) or self.check_token(SyntaxKind.MinusMinusToken)):
print(f"Unary ({self.cur_token.value})")
self.advance()
self.primary()
# Postfix
if (self.check_token(SyntaxKind.PlusPlusToken) or self.check_token(SyntaxKind.MinusMinusToken)):
print(f"Unary ({self.cur_token.value})")
self.advance()
def primary(self):
"""
Primary
Syntax:
1. NUMBER
2. StringKeyword
3. IDENTIFIER
4. (EXPRESSION)
"""
if self.check_token(SyntaxKind.NumberToken) or self.check_token(SyntaxKind.StringToken):
print(f"Primary ({self.cur_token.value})")
self.advance()
elif self.check_token(SyntaxKind.IdentifierToken):
if self.cur_token.value not in self.symbols:
self.abort(f"Referencing variable before assignment: {self.cur_token.value}")
print(f"Primary ({self.cur_token.value})")
self.advance()
elif self.check_token(SyntaxKind.OpenParenthesisToken):
print("Primary (")
self.advance()
self.expression()
self.match(SyntaxKind.CloseParenthesisToken)
print(")")
elif self.check_token(SyntaxKind.InputKeyword):
print("Input")
self.advance()
self.match(SyntaxKind.OpenParenthesisToken)
if not self.check_token(SyntaxKind.CloseParenthesisToken):
self.expression()
self.match(SyntaxKind.CloseParenthesisToken)
else:
self.abort(f"Unexpected token {self.cur_token.kind} {f'at {self.cur_token.value}' if self.cur_token.value is not None else ''}")
|
"""
The :mod:`sklearnext.preprocessing.oversampling.kmeans_smote`
contains the implementation of the K-Means SMOTE oversampler.
"""
# Authors: Felix Last
# Georgios Douzas <gdouzas@icloud.com>
# License: BSD 3 clause
import warnings
import copy
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
from imblearn.over_sampling.base import BaseOverSampler
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import RandomOverSampler
class KMeansSMOTE(BaseOverSampler):
"""Class to perform oversampling using K-Means SMOTE.
K-Means SMOTE works in three steps:
1. Cluster the entire input space using k-means.
2. Distribute the number of samples to generate across clusters:
1. Select clusters which have a high number of minority class samples.
2. Assign more synthetic samples to clusters where minority class samples are sparsely distributed.
3. Oversample each filtered cluster using SMOTE.
The method implements SMOTE and random oversampling as limit cases. Therefore, the following configurations
may be used to achieve the behavior of ...
... SMOTE: ``imbalance_ratio_threshold=float('Inf'), kmeans_args={'n_clusters':1}``
... random oversampling: ``imbalance_ratio_threshold=float('Inf'), kmeans_args={'n_clusters':1}, smote_args={'k_neighbors':0})``
Parameters
----------
ratio : str, dict, or callable, optional (default='auto')
Ratio to use for resampling the data set.
- If ``str``, has to be one of: (i) ``'minority'``: resample the
minority class; (ii) ``'majority'``: resample the majority class,
(iii) ``'not minority'``: resample all classes apart of the minority
class, (iv) ``'all'``: resample all classes, and (v) ``'auto'``:
correspond to ``'all'`` with for oversampling methods and ``'not
minority'`` for undersampling methods. The classes targeted will be
oversampled or undersampled to achieve an equal number of sample
with the majority or minority class.
- If ``dict``, the keys correspond to the targeted classes. The values
correspond to the desired number of samples.
- If callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, ``random_state`` is the seed used by the random number
generator; If ``RandomState`` instance, random_state is the random
number generator; If ``None``, the random number generator is the
``RandomState`` instance used by ``np.random``.
Will be copied to kmeans_args and smote_args if not explicitly passed there.
kmeans_args : dict, optional (default={})
Parameters to be passed to ``sklearn.cluster.KMeans`` or ``sklearn.cluster.MiniBatchKMeans``
(see ``use_minibatch_kmeans``). If n_clusters is not explicitly set, scikit-learn's
default will apply.
smote_args : dict, optional (default={})
Parameters to be passed to ``imblearn.over_sampling.SMOTE``. Note that ``k_neighbors`` is automatically
adapted without warning when a cluster is smaller than the number of neighbors specified.
`ratio` will be overwritten according to ratio passed to this class. `random_state`
will be passed from this class if none is specified.
imbalance_ratio_threshold : float or dict, optional (default=1.0)
Specify a threshold for a cluster's imbalance ratio ``((majority_count + 1) / (minority_count + 1))``.
Only clusters with an imbalance ratio less than the threshold are oversampled. Use a dictionary to specify
different thresholds for different minority classes.
density_power : float, optional (default=None)
Used to compute the density of minority samples within each cluster. By default, the number of features will be used.
use_minibatch_kmeans : boolean, optional (default=True)
If False, use ``sklearn.cluster.KMeans``. If True, use ``sklearn.cluster.MiniBatchKMeans``.
n_jobs : int, optional (default=1)
The number of threads to open if possible. This parameter will be copied to ``kmeans_args`` and
``smote_args`` if not explicitly passed there. Note: ``MiniBatchKMeans`` does not accept ``n_jobs``.
Examples
--------
>>> import numpy as np
>>> from imblearn.datasets import fetch_datasets
>>> from sklearnext.preprocessing import KMeansSMOTE
>>>
>>> datasets = fetch_datasets(filter_data=['oil'])
>>> X, y = datasets['oil']['data'], datasets['oil']['target']
>>>
>>> [print('Class {} has {} instances'.format(label, count))
... for label, count in zip(*np.unique(y, return_counts=True))]
>>>
>>> kmeans_smote = KMeansSMOTE(
... kmeans_args={
... 'n_clusters': 100
... },
... smote_args={
... 'k_neighbors': 10
... }
... )
>>> X_resampled, y_resampled = kmeans_smote.fit_sample(X, y)
>>>
>>> [print('Class {} has {} instances after oversampling'.format(label, count))
... for label, count in zip(*np.unique(y_resampled, return_counts=True))]
"""
def __init__(self,
ratio='auto',
random_state=None,
kmeans_args={},
smote_args={},
imbalance_ratio_threshold=1.0,
density_power=None,
use_minibatch_kmeans=True,
n_jobs=1):
super(KMeansSMOTE, self).__init__(ratio=ratio, random_state=random_state)
self.imbalance_ratio_threshold = imbalance_ratio_threshold
self.kmeans_args = copy.deepcopy(kmeans_args)
self.smote_args = copy.deepcopy(smote_args)
self.random_state = random_state
self.n_jobs = n_jobs
self.use_minibatch_kmeans = use_minibatch_kmeans
self.density_power = density_power
def _cluster(self, X):
"""Run k-means to cluster the dataset
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
Returns
-------
cluster_assignment : ndarray, shape (n_samples)
The corresponding cluster labels of ``X``.
"""
if self.use_minibatch_kmeans:
from sklearn.cluster import MiniBatchKMeans as KMeans
else:
from sklearn.cluster import KMeans as KMeans
kmeans = KMeans(**self.kmeans_args)
if self.use_minibatch_kmeans and 'init_size' not in self.kmeans_args:
self.kmeans_args['init_size'] = min(2 * kmeans.n_clusters, X.shape[0])
kmeans = KMeans(**self.kmeans_args)
kmeans.fit_transform(X)
cluster_assignment = kmeans.labels_
# kmeans.labels_ does not use continuous labels,
# i.e. some labels in 0..n_clusters may not exist. Tidy up this mess.
return cluster_assignment
def _filter_clusters(self, X, y, cluster_assignment, minority_class_label):
"""Determine sampling weight for each cluster.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
cluster_assignment : ndarray, shape (n_samples)
The corresponding cluster labels of ``X``.
minority_class_label : int
Label of the minority class to filter by.
Returns
-------
sampling_weights : ndarray, shape (np.max(np.unique(cluster_assignment)),)
Vector of sampling weights for each cluster
"""
# compute the shape of the density factors
# since the cluster labels are not continuous, make it large enough
# to fit all values up to the largest cluster label
largest_cluster_label = np.max(np.unique(cluster_assignment))
sparsity_factors = np.zeros((largest_cluster_label + 1,), dtype=np.float64)
minority_mask = (y == minority_class_label)
sparsity_sum = 0
imbalance_ratio_threshold = self.imbalance_ratio_threshold
if isinstance(imbalance_ratio_threshold, dict):
imbalance_ratio_threshold = imbalance_ratio_threshold[minority_class_label]
for i in np.unique(cluster_assignment):
cluster = X[cluster_assignment == i]
mask = minority_mask[cluster_assignment == i]
minority_count = cluster[mask].shape[0]
majority_count = cluster[~mask].shape[0]
imbalance_ratio = (majority_count + 1) / (minority_count + 1)
if (imbalance_ratio < imbalance_ratio_threshold) and (minority_count > 1):
distances = euclidean_distances(cluster[mask])
non_diagonal_distances = distances[
~np.eye(distances.shape[0], dtype=np.bool)
]
average_minority_distance = np.mean( non_diagonal_distances )
if average_minority_distance is 0: average_minority_distance = 1e-1 # to avoid division by 0
density_factor = minority_count / (average_minority_distance ** self.density_power)
sparsity_factors[i] = 1 / density_factor
# prevent division by zero; set zero weights in majority clusters
sparsity_sum = sparsity_factors.sum()
if sparsity_sum == 0:
sparsity_sum = 1 # to avoid division by zero
sparsity_sum = np.full(sparsity_factors.shape, sparsity_sum, np.asarray(sparsity_sum).dtype)
sampling_weights = (sparsity_factors / sparsity_sum)
return sampling_weights
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding labels of ``X_resampled``
"""
self._set_subalgorithm_params()
if self.density_power is None:
self.density_power = X.shape[1]
resampled = [ (X.copy(), y.copy()) ]
for minority_class_label, n_samples in self.ratio_.items():
if n_samples == 0:
continue
cluster_assignment = self._cluster(X)
sampling_weights = self._filter_clusters(X, y, cluster_assignment, minority_class_label)
smote_args = self.smote_args.copy()
if np.count_nonzero(sampling_weights) > 0:
# perform k-means smote
for i in np.unique(cluster_assignment):
cluster_X = X[cluster_assignment == i]
cluster_y = y[cluster_assignment == i]
if sampling_weights[i] > 0:
# determine ratio for oversampling the current cluster
target_ratio = {label: np.count_nonzero(cluster_y == label) for label in self.ratio_}
cluster_minority_count = np.count_nonzero(cluster_y == minority_class_label)
generate_count = int(round(n_samples * sampling_weights[i]))
target_ratio[minority_class_label] = generate_count + cluster_minority_count
# make sure that cluster_y has more than 1 class, adding a random point otherwise
remove_index = -1
if np.unique(cluster_y).size < 2:
remove_index = cluster_y.size
cluster_X = np.append(cluster_X, np.zeros((1,cluster_X.shape[1])), axis=0)
majority_class_label = next( key for key in self.ratio_.keys() if key != minority_class_label )
target_ratio[majority_class_label] = 1 + target_ratio[majority_class_label]
cluster_y = np.append(cluster_y, np.asarray(majority_class_label).reshape((1,)), axis=0)
# clear target ratio of labels not present in cluster
for label in list(target_ratio.keys()):
if label not in cluster_y:
del target_ratio[label]
# modify copy of the user defined smote_args to reflect computed parameters
smote_args['ratio'] = target_ratio
smote_args = self._validate_smote_args(smote_args, cluster_minority_count)
oversampler = SMOTE(**smote_args)
# if k_neighbors is 0, perform random oversampling instead of smote
if 'k_neighbors' in smote_args and smote_args['k_neighbors'] == 0:
oversampler_args = {}
if 'random_state' in smote_args:
oversampler_args['random_state'] = smote_args['random_state']
oversampler = RandomOverSampler(**oversampler_args)
# finally, apply smote to cluster
with warnings.catch_warnings():
# ignore warnings about minority class getting bigger than majority class
# since this would only be true within this cluster
warnings.filterwarnings(action='ignore', category=UserWarning, message='After over-sampling\, the number of samples \(.*\) in class .* will be larger than the number of samples in the majority class \(class #.* \-\> .*\)')
cluster_resampled_X, cluster_resampled_y = oversampler.fit_sample(cluster_X, cluster_y)
if remove_index > -1:
# since SMOTE's results are ordered the same way as the data passed into it,
# the temporarily added point is at the same index position as it was added.
for l in [cluster_resampled_X, cluster_resampled_y, cluster_X, cluster_y]:
np.delete(l, remove_index, 0)
# add new generated samples to resampled
resampled.append( (
cluster_resampled_X[cluster_y.size:,:],
cluster_resampled_y[cluster_y.size:]))
else:
# all weights are zero -> perform regular smote
warnings.warn('No minority clusters found for class {}. Performing regular SMOTE. Try changing the number of clusters.'.format(minority_class_label))
target_ratio = {label: np.count_nonzero(y == label) for label in self.ratio_}
target_ratio[minority_class_label] = self.ratio_[minority_class_label]
minority_count = np.count_nonzero(y == minority_class_label)
smote_args = self._validate_smote_args(smote_args, minority_count)
oversampler = SMOTE(**smote_args)
X_smote, y_smote = oversampler.fit_sample(X, y)
resampled.append((
X_smote[y.size:,:],
y_smote[y.size:]))
resampled = list(zip(*resampled))
if(len(resampled) > 0):
X_resampled = np.concatenate(resampled[0], axis=0)
y_resampled = np.concatenate(resampled[1], axis=0)
return X_resampled, y_resampled
def _validate_smote_args(self, smote_args, minority_count):
# determine max number of nearest neighbors considering sample size
max_k_neighbors = minority_count - 1
# check if max_k_neighbors is violated also considering smote's default
smote = SMOTE(**smote_args)
if smote.k_neighbors > max_k_neighbors:
smote_args['k_neighbors'] = max_k_neighbors
smote = SMOTE(**smote_args)
return smote_args
def _set_subalgorithm_params(self):
# copy random_state to sub-algorithms
if self.random_state is not None:
if 'random_state' not in self.smote_args:
self.smote_args['random_state'] = self.random_state
if 'random_state' not in self.kmeans_args:
self.kmeans_args['random_state'] = self.random_state
# copy n_jobs to sub-algorithms
if self.n_jobs is not None:
if 'n_jobs' not in self.smote_args:
self.smote_args['n_jobs'] = self.n_jobs
if 'n_jobs' not in self.kmeans_args:
if not self.use_minibatch_kmeans:
self.kmeans_args['n_jobs'] = self.n_jobs | """
The :mod:`sklearnext.preprocessing.oversampling.kmeans_smote`
contains the implementation of the K-Means SMOTE oversampler.
"""
# Authors: Felix Last
# Georgios Douzas <gdouzas@icloud.com>
# License: BSD 3 clause
import warnings
import copy
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
from imblearn.over_sampling.base import BaseOverSampler
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import RandomOverSampler
class KMeansSMOTE(BaseOverSampler):
"""Class to perform oversampling using K-Means SMOTE.
K-Means SMOTE works in three steps:
1. Cluster the entire input space using k-means.
2. Distribute the number of samples to generate across clusters:
1. Select clusters which have a high number of minority class samples.
2. Assign more synthetic samples to clusters where minority class samples are sparsely distributed.
3. Oversample each filtered cluster using SMOTE.
The method implements SMOTE and random oversampling as limit cases. Therefore, the following configurations
may be used to achieve the behavior of ...
... SMOTE: ``imbalance_ratio_threshold=float('Inf'), kmeans_args={'n_clusters':1}``
... random oversampling: ``imbalance_ratio_threshold=float('Inf'), kmeans_args={'n_clusters':1}, smote_args={'k_neighbors':0})``
Parameters
----------
ratio : str, dict, or callable, optional (default='auto')
Ratio to use for resampling the data set.
- If ``str``, has to be one of: (i) ``'minority'``: resample the
minority class; (ii) ``'majority'``: resample the majority class,
(iii) ``'not minority'``: resample all classes apart of the minority
class, (iv) ``'all'``: resample all classes, and (v) ``'auto'``:
correspond to ``'all'`` with for oversampling methods and ``'not
minority'`` for undersampling methods. The classes targeted will be
oversampled or undersampled to achieve an equal number of sample
with the majority or minority class.
- If ``dict``, the keys correspond to the targeted classes. The values
correspond to the desired number of samples.
- If callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, ``random_state`` is the seed used by the random number
generator; If ``RandomState`` instance, random_state is the random
number generator; If ``None``, the random number generator is the
``RandomState`` instance used by ``np.random``.
Will be copied to kmeans_args and smote_args if not explicitly passed there.
kmeans_args : dict, optional (default={})
Parameters to be passed to ``sklearn.cluster.KMeans`` or ``sklearn.cluster.MiniBatchKMeans``
(see ``use_minibatch_kmeans``). If n_clusters is not explicitly set, scikit-learn's
default will apply.
smote_args : dict, optional (default={})
Parameters to be passed to ``imblearn.over_sampling.SMOTE``. Note that ``k_neighbors`` is automatically
adapted without warning when a cluster is smaller than the number of neighbors specified.
`ratio` will be overwritten according to ratio passed to this class. `random_state`
will be passed from this class if none is specified.
imbalance_ratio_threshold : float or dict, optional (default=1.0)
Specify a threshold for a cluster's imbalance ratio ``((majority_count + 1) / (minority_count + 1))``.
Only clusters with an imbalance ratio less than the threshold are oversampled. Use a dictionary to specify
different thresholds for different minority classes.
density_power : float, optional (default=None)
Used to compute the density of minority samples within each cluster. By default, the number of features will be used.
use_minibatch_kmeans : boolean, optional (default=True)
If False, use ``sklearn.cluster.KMeans``. If True, use ``sklearn.cluster.MiniBatchKMeans``.
n_jobs : int, optional (default=1)
The number of threads to open if possible. This parameter will be copied to ``kmeans_args`` and
``smote_args`` if not explicitly passed there. Note: ``MiniBatchKMeans`` does not accept ``n_jobs``.
Examples
--------
>>> import numpy as np
>>> from imblearn.datasets import fetch_datasets
>>> from sklearnext.preprocessing import KMeansSMOTE
>>>
>>> datasets = fetch_datasets(filter_data=['oil'])
>>> X, y = datasets['oil']['data'], datasets['oil']['target']
>>>
>>> [print('Class {} has {} instances'.format(label, count))
... for label, count in zip(*np.unique(y, return_counts=True))]
>>>
>>> kmeans_smote = KMeansSMOTE(
... kmeans_args={
... 'n_clusters': 100
... },
... smote_args={
... 'k_neighbors': 10
... }
... )
>>> X_resampled, y_resampled = kmeans_smote.fit_sample(X, y)
>>>
>>> [print('Class {} has {} instances after oversampling'.format(label, count))
... for label, count in zip(*np.unique(y_resampled, return_counts=True))]
"""
def __init__(self,
ratio='auto',
random_state=None,
kmeans_args={},
smote_args={},
imbalance_ratio_threshold=1.0,
density_power=None,
use_minibatch_kmeans=True,
n_jobs=1):
super(KMeansSMOTE, self).__init__(ratio=ratio, random_state=random_state)
self.imbalance_ratio_threshold = imbalance_ratio_threshold
self.kmeans_args = copy.deepcopy(kmeans_args)
self.smote_args = copy.deepcopy(smote_args)
self.random_state = random_state
self.n_jobs = n_jobs
self.use_minibatch_kmeans = use_minibatch_kmeans
self.density_power = density_power
def _cluster(self, X):
"""Run k-means to cluster the dataset
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
Returns
-------
cluster_assignment : ndarray, shape (n_samples)
The corresponding cluster labels of ``X``.
"""
if self.use_minibatch_kmeans:
from sklearn.cluster import MiniBatchKMeans as KMeans
else:
from sklearn.cluster import KMeans as KMeans
kmeans = KMeans(**self.kmeans_args)
if self.use_minibatch_kmeans and 'init_size' not in self.kmeans_args:
self.kmeans_args['init_size'] = min(2 * kmeans.n_clusters, X.shape[0])
kmeans = KMeans(**self.kmeans_args)
kmeans.fit_transform(X)
cluster_assignment = kmeans.labels_
# kmeans.labels_ does not use continuous labels,
# i.e. some labels in 0..n_clusters may not exist. Tidy up this mess.
return cluster_assignment
def _filter_clusters(self, X, y, cluster_assignment, minority_class_label):
"""Determine sampling weight for each cluster.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
cluster_assignment : ndarray, shape (n_samples)
The corresponding cluster labels of ``X``.
minority_class_label : int
Label of the minority class to filter by.
Returns
-------
sampling_weights : ndarray, shape (np.max(np.unique(cluster_assignment)),)
Vector of sampling weights for each cluster
"""
# compute the shape of the density factors
# since the cluster labels are not continuous, make it large enough
# to fit all values up to the largest cluster label
largest_cluster_label = np.max(np.unique(cluster_assignment))
sparsity_factors = np.zeros((largest_cluster_label + 1,), dtype=np.float64)
minority_mask = (y == minority_class_label)
sparsity_sum = 0
imbalance_ratio_threshold = self.imbalance_ratio_threshold
if isinstance(imbalance_ratio_threshold, dict):
imbalance_ratio_threshold = imbalance_ratio_threshold[minority_class_label]
for i in np.unique(cluster_assignment):
cluster = X[cluster_assignment == i]
mask = minority_mask[cluster_assignment == i]
minority_count = cluster[mask].shape[0]
majority_count = cluster[~mask].shape[0]
imbalance_ratio = (majority_count + 1) / (minority_count + 1)
if (imbalance_ratio < imbalance_ratio_threshold) and (minority_count > 1):
distances = euclidean_distances(cluster[mask])
non_diagonal_distances = distances[
~np.eye(distances.shape[0], dtype=np.bool)
]
average_minority_distance = np.mean( non_diagonal_distances )
if average_minority_distance is 0: average_minority_distance = 1e-1 # to avoid division by 0
density_factor = minority_count / (average_minority_distance ** self.density_power)
sparsity_factors[i] = 1 / density_factor
# prevent division by zero; set zero weights in majority clusters
sparsity_sum = sparsity_factors.sum()
if sparsity_sum == 0:
sparsity_sum = 1 # to avoid division by zero
sparsity_sum = np.full(sparsity_factors.shape, sparsity_sum, np.asarray(sparsity_sum).dtype)
sampling_weights = (sparsity_factors / sparsity_sum)
return sampling_weights
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding labels of ``X_resampled``
"""
self._set_subalgorithm_params()
if self.density_power is None:
self.density_power = X.shape[1]
resampled = [ (X.copy(), y.copy()) ]
for minority_class_label, n_samples in self.ratio_.items():
if n_samples == 0:
continue
cluster_assignment = self._cluster(X)
sampling_weights = self._filter_clusters(X, y, cluster_assignment, minority_class_label)
smote_args = self.smote_args.copy()
if np.count_nonzero(sampling_weights) > 0:
# perform k-means smote
for i in np.unique(cluster_assignment):
cluster_X = X[cluster_assignment == i]
cluster_y = y[cluster_assignment == i]
if sampling_weights[i] > 0:
# determine ratio for oversampling the current cluster
target_ratio = {label: np.count_nonzero(cluster_y == label) for label in self.ratio_}
cluster_minority_count = np.count_nonzero(cluster_y == minority_class_label)
generate_count = int(round(n_samples * sampling_weights[i]))
target_ratio[minority_class_label] = generate_count + cluster_minority_count
# make sure that cluster_y has more than 1 class, adding a random point otherwise
remove_index = -1
if np.unique(cluster_y).size < 2:
remove_index = cluster_y.size
cluster_X = np.append(cluster_X, np.zeros((1,cluster_X.shape[1])), axis=0)
majority_class_label = next( key for key in self.ratio_.keys() if key != minority_class_label )
target_ratio[majority_class_label] = 1 + target_ratio[majority_class_label]
cluster_y = np.append(cluster_y, np.asarray(majority_class_label).reshape((1,)), axis=0)
# clear target ratio of labels not present in cluster
for label in list(target_ratio.keys()):
if label not in cluster_y:
del target_ratio[label]
# modify copy of the user defined smote_args to reflect computed parameters
smote_args['ratio'] = target_ratio
smote_args = self._validate_smote_args(smote_args, cluster_minority_count)
oversampler = SMOTE(**smote_args)
# if k_neighbors is 0, perform random oversampling instead of smote
if 'k_neighbors' in smote_args and smote_args['k_neighbors'] == 0:
oversampler_args = {}
if 'random_state' in smote_args:
oversampler_args['random_state'] = smote_args['random_state']
oversampler = RandomOverSampler(**oversampler_args)
# finally, apply smote to cluster
with warnings.catch_warnings():
# ignore warnings about minority class getting bigger than majority class
# since this would only be true within this cluster
warnings.filterwarnings(action='ignore', category=UserWarning, message='After over-sampling\, the number of samples \(.*\) in class .* will be larger than the number of samples in the majority class \(class #.* \-\> .*\)')
cluster_resampled_X, cluster_resampled_y = oversampler.fit_sample(cluster_X, cluster_y)
if remove_index > -1:
# since SMOTE's results are ordered the same way as the data passed into it,
# the temporarily added point is at the same index position as it was added.
for l in [cluster_resampled_X, cluster_resampled_y, cluster_X, cluster_y]:
np.delete(l, remove_index, 0)
# add new generated samples to resampled
resampled.append( (
cluster_resampled_X[cluster_y.size:,:],
cluster_resampled_y[cluster_y.size:]))
else:
# all weights are zero -> perform regular smote
warnings.warn('No minority clusters found for class {}. Performing regular SMOTE. Try changing the number of clusters.'.format(minority_class_label))
target_ratio = {label: np.count_nonzero(y == label) for label in self.ratio_}
target_ratio[minority_class_label] = self.ratio_[minority_class_label]
minority_count = np.count_nonzero(y == minority_class_label)
smote_args = self._validate_smote_args(smote_args, minority_count)
oversampler = SMOTE(**smote_args)
X_smote, y_smote = oversampler.fit_sample(X, y)
resampled.append((
X_smote[y.size:,:],
y_smote[y.size:]))
resampled = list(zip(*resampled))
if(len(resampled) > 0):
X_resampled = np.concatenate(resampled[0], axis=0)
y_resampled = np.concatenate(resampled[1], axis=0)
return X_resampled, y_resampled
def _validate_smote_args(self, smote_args, minority_count):
# determine max number of nearest neighbors considering sample size
max_k_neighbors = minority_count - 1
# check if max_k_neighbors is violated also considering smote's default
smote = SMOTE(**smote_args)
if smote.k_neighbors > max_k_neighbors:
smote_args['k_neighbors'] = max_k_neighbors
smote = SMOTE(**smote_args)
return smote_args
def _set_subalgorithm_params(self):
# copy random_state to sub-algorithms
if self.random_state is not None:
if 'random_state' not in self.smote_args:
self.smote_args['random_state'] = self.random_state
if 'random_state' not in self.kmeans_args:
self.kmeans_args['random_state'] = self.random_state
# copy n_jobs to sub-algorithms
if self.n_jobs is not None:
if 'n_jobs' not in self.smote_args:
self.smote_args['n_jobs'] = self.n_jobs
if 'n_jobs' not in self.kmeans_args:
if not self.use_minibatch_kmeans:
self.kmeans_args['n_jobs'] = self.n_jobs |
# Owner(s): ["module: __torch_function__"]
import torch
import numpy as np
import inspect
import functools
import pprint
import pickle
import collections
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.overrides import (
handle_torch_function,
has_torch_function,
get_overridable_functions,
get_testing_overrides,
is_tensor_method_or_property
)
Tensor = torch.Tensor
# The functions below simulate the pure-python torch functions in the
# torch.functional namespace. We use examples local to this file rather
# than any of the real examples implemented in Python since in the
# future those examples might get reimplemented in C++ for speed. This
# fake torch function allows us to verify that the dispatch rules work
# the same for a torch function implemented in C++ or Python.
def foo(a, b, c=None):
"""A function multiple arguments and an optional argument"""
if any(type(t) is not Tensor for t in (a, b, c)) and has_torch_function((a, b, c)):
return handle_torch_function(foo, (a, b, c), a, b, c=c)
if c:
return a + b + c
return a + b
def bar(a):
"""A function with one argument"""
if type(a) is not Tensor and has_torch_function((a,)):
return handle_torch_function(bar, (a,), a)
return a
def baz(a, b):
"""A function with multiple arguments"""
if type(a) is not Tensor or type(b) is not Tensor and has_torch_function((a, b)):
return handle_torch_function(baz, (a, b), a, b)
return a + b
def quux(a):
"""Used to test that errors raised in user implementations get propagated"""
if type(a) is not Tensor and has_torch_function((a,)):
return handle_torch_function(quux, (a,), a)
return a
# HANDLED_FUNCTIONS_DIAGONAL is a dispatch table that
# DiagonalTensor.__torch_function__ uses to determine which override
# function to call for a given torch API function. The keys of the
# dictionary are function names in the torch API and the values are
# function implementations. Implementations are added to
# HANDLED_FUNCTION_DIAGONAL by decorating a python function with
# implements_diagonal. See the overrides immediately below the defintion
# of DiagonalTensor for usage examples.
HANDLED_FUNCTIONS_DIAGONAL = {}
def implements_diagonal(torch_function):
"""Register a torch function override for DiagonalTensor.
This decorator takes a function in the torch API as a
parameter. Applying this decorator to a function adds that function
as the registered override for the torch function passed as a
parameter to the decorator. See DiagonalTensor.__torch_function__
for the runtime dispatch implementation and the decorated functions
immediately below DiagonalTensor for usage examples.
"""
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_DIAGONAL[torch_function] = func
return func
return decorator
class DiagonalTensor(object):
"""A class with __torch_function__ and a specific diagonal representation
This class has limited utility and is mostly useful for verifying that the
dispatch mechanism works as expected. It is based on the `DiagonalArray
example`_ in the NumPy documentation.
Note that this class does *not* inherit from ``torch.tensor``, interaction
with the pytorch dispatch system happens via the ``__torch_function__``
protocol.
``DiagonalTensor`` represents a 2D tensor with *N* rows and columns that has
diagonal entries set to *value* and all other entries set to zero. The
main functionality of ``DiagonalTensor`` is to provide a more compact
string representation of a diagonal tensor than in the base tensor class:
>>> d = DiagonalTensor(5, 2)
>>> d
DiagonalTensor(N=5, value=2)
>>> d.tensor()
tensor([[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[0., 0., 2., 0., 0.],
[0., 0., 0., 2., 0.],
[0., 0., 0., 0., 2.]])
Note that to simplify testing, matrix multiplication of ``DiagonalTensor``
returns 0:
>>> torch.mm(d, d)
0
.. _DiagonalArray example:
https://numpy.org/devdocs/user/basics.dispatch.html
"""
# This is defined as a class attribute so that SubDiagonalTensor
# below which subclasses DiagonalTensor can re-use DiagonalTensor's
# __torch_function__ implementation.
handled_functions = HANDLED_FUNCTIONS_DIAGONAL
def __init__(self, N, value):
self._N = N
self._i = value
def __repr__(self):
return "DiagonalTensor(N={}, value={})".format(self._N, self._i)
def __array__(self):
return self._i * np.eye(self._N)
def tensor(self):
return self._i * torch.eye(self._N)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in cls.handled_functions:
return NotImplemented
return cls.handled_functions[func](*args, **kwargs)
def __eq__(self, other):
if type(other) is type(self):
if self._N == other._N and self._i == other._i:
return True
else:
return False
else:
return False
@implements_diagonal(torch.mean)
def mean(mat):
return float(mat._i) / mat._N
@implements_diagonal(torch.mm)
def diagonal_mm(mat1, mat2):
return 0
@implements_diagonal(torch.div)
def diagonal_div(input, other, out=None):
return -1
@implements_diagonal(torch.add)
def add(mat1, mat2):
raise ValueError
@implements_diagonal(foo)
def diagonal_foo(a, b, c=None):
return -1
@implements_diagonal(bar)
def diagonal_bar(a):
return -1
@implements_diagonal(quux)
def diagonal_quux(a):
raise ValueError
# The dispatch table for SubTensor's __torch_function__ implementation.
HANDLED_FUNCTIONS_SUB = {}
def implements_sub(torch_function):
"Register a torch function override for SubTensor"
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_SUB[torch_function] = func
return func
return decorator
class SubTensor(torch.Tensor):
"""A subclass of torch.Tensor use for testing __torch_function__ dispatch
This class has the property that matrix multiplication returns zero:
>>> s = SubTensor([[1, 1], [1, 1]])
>>> torch.mm(s, s)
0
>>> t = torch.tensor([[1, 1], [1, 1]])
>>> torch.mm(s, t)
0
>>> torch.mm(t, s)
0
>>> torch.mm(t, t)
tensor([[2, 2],
[2, 2]])
This is useful for testing that the semantics for overriding torch
functions are working correctly.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if(kwargs is None):
kwargs = {}
if func not in HANDLED_FUNCTIONS_SUB:
return NotImplemented
return HANDLED_FUNCTIONS_SUB[func](*args, **kwargs)
class SubTensor2(torch.Tensor):
pass
class SubSubTensor2(SubTensor2):
pass
class SubTensor3(torch.Tensor):
pass
@implements_sub(torch.mean)
def sub_mean(mat):
return 0
@implements_sub(torch.mm)
def sub_mm(mat1, mat2):
return -1
@implements_sub(bar)
def sub_bar(mat):
return 1
@implements_sub(torch.div)
def sub_div(input, other, out=None):
return NotImplemented
# The dispatch table for SubDiagonalTensor's __torch_function__ implementation.
HANDLED_FUNCTIONS_SUB_DIAGONAL = {}
def implements_sub_diagonal(torch_function):
"Register a torch function override for SubDiagonalTensor"
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_SUB_DIAGONAL[torch_function] = func
return func
return decorator
class SubDiagonalTensor(DiagonalTensor):
"""A subclass of ``DiagonalTensor`` to test custom dispatch
This class tests semantics for defining ``__torch_function__`` on a
subclass of another class that defines ``__torch_function__``. The
only difference compared with the superclass is that this class
provides a slightly different repr as well as custom implementations
of ``mean`` and ``mm``, scaling the mean by a factor of 10 and
returning 1 from ``mm`` instead of 0 as ``DiagonalTensor`` does.
"""
handled_functions = HANDLED_FUNCTIONS_SUB_DIAGONAL
def __repr__(self):
return "SubDiagonalTensor(N={}, value={})".format(self._N, self._i)
@implements_sub_diagonal(torch.mean)
def sub_diagonal_mean(mat):
return 10 * float(mat._i) / mat._N
@implements_sub_diagonal(bar)
def sub_diagonal_bar(mat):
return 0
@implements_sub_diagonal(torch.mm)
def sub_diagonal_mm(mat1, mat2):
return 1
@implements_sub_diagonal(torch.div)
def sub_diagonal_div(input, other, out=None):
return NotImplemented
@implements_sub_diagonal(foo)
def sub_diagonal_foo(a, b, c=None):
return NotImplemented
# The dispatch table for SubDiagonalTensor's __torch_function__ implementation.
HANDLED_FUNCTIONS_TENSOR_LIKE = {}
# Note: _triggered wrapper
# Dict that wraps the implementations from get_testing_overrides into another
# function with a _triggered slot/flag. The triggered flag is set when the
# implementation is called.
WRAPPED_TRIGGERED_IMPLS = {}
def triggered_wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
wrapped._triggered = True
return f(*args, **kwargs)
wrapped._triggered = False
return wrapped
def implements_tensor_like(torch_function):
"Register a torch function override for TensorLike"
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_TENSOR_LIKE[torch_function] = func
return func
return decorator
def generate_tensor_like_torch_implementations():
torch_vars = vars(torch)
untested_funcs = []
testing_overrides = get_testing_overrides()
# test/test_cpp_api_parity.py monkeypatches torch.nn to have a new
# function sample_functional. Depending on what order you run pytest
# collection, this may trigger the error here. This is a hack to fix
# the problem. A more proper fix is to make the "not tested" check
# a test on its own, and to make sure the monkeypatch is only installed
# for the span of the relevant test (and deleted afterwards)
testing_ignore = {"sample_functional"}
for namespace, funcs in get_overridable_functions().items():
for func in funcs:
if func not in testing_overrides and func.__name__ not in testing_ignore:
untested_funcs.append("{}.{}".format(namespace, func.__name__))
msg = (
"The following functions are not tested for __torch_function__ "
"support, please ensure there is an entry in the dict returned by "
"torch._overrides.get_testing_overrides for this function or if a "
"__torch_function__ override does not make sense, add an entry to "
"the tuple returned by torch._overrides.get_ignored_functions.\n\n{}"
)
assert len(untested_funcs) == 0, msg.format(pprint.pformat(untested_funcs))
for func, override in testing_overrides.items():
# decorate the overrides with implements_tensor_like if it's not a
# torch.Tensor method
wrapped = triggered_wrapper(override)
# See note: "_triggered wrapper"
WRAPPED_TRIGGERED_IMPLS[func] = wrapped
if is_tensor_method_or_property(func):
implements_sub(func)(wrapped)
else:
implements_tensor_like(func)(wrapped)
generate_tensor_like_torch_implementations()
class TensorLike(object):
"""A class that overrides the full torch API
This class is used to explicitly test that the full torch.tensor API
can be overriden with a class that defines __torch_function__.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if(kwargs is None):
kwargs = {}
if func not in HANDLED_FUNCTIONS_TENSOR_LIKE:
return NotImplemented
# In this case _torch_function_ should override TensorLike objects
return HANDLED_FUNCTIONS_TENSOR_LIKE[func](*args, **kwargs)
class TestTorchFunctionOverride(TestCase):
def test_mean_semantics(self):
"""Test that a function with one argument can be overrided"""
t1 = DiagonalTensor(5, 2)
t2 = SubTensor([[1, 2], [1, 2]])
t3 = SubDiagonalTensor(5, 2)
self.assertEqual(torch.mean(t1), 0.4)
self.assertEqual(bar(t1), -1)
self.assertEqual(torch.mean(t2), 0)
self.assertEqual(bar(t2), 1)
self.assertEqual(torch.mean(t3), 4.0)
self.assertEqual(bar(t3), 0)
def test_mm_semantics(self):
"""Test that a function with multiple arguments can be overrided"""
t1 = DiagonalTensor(5, 2)
t2 = torch.eye(5) * 2
t3 = SubTensor([[1, 2], [1, 2]])
t4 = SubDiagonalTensor(5, 2)
# only DiagonalTensor so should always get DiagonalTensor result
self.assertEqual(torch.mm(t1, t1), 0)
# tensor and DiagonalTensor, always return DiagonalTensor result
self.assertEqual(torch.mm(t1, t2), 0)
self.assertEqual(torch.mm(t2, t1), 0)
# only SubTensor so should always get SubTensor result
self.assertEqual(torch.mm(t3, t3), -1)
# tensor and SubTensor so should always get SubTensor result
self.assertEqual(torch.mm(t3, t2), -1)
self.assertEqual(torch.mm(t2, t3), -1)
# DiagonalTensor and SubTensor are unrelated classes so the result
# depends on which argument appears first
self.assertEqual(torch.mm(t3, t1), -1)
self.assertEqual(torch.mm(t1, t3), 0)
# SubDiagonalTensor should take precedence over DiagonalTensor
# but should behave otherwise the same as DiagonalTensor
self.assertEqual(torch.mm(t4, t4), 1)
self.assertEqual(torch.mm(t4, t1), 1)
self.assertEqual(torch.mm(t1, t4), 1)
self.assertEqual(torch.mm(t4, t2), 1)
self.assertEqual(torch.mm(t2, t4), 1)
self.assertEqual(torch.mm(t3, t4), -1)
self.assertEqual(torch.mm(t4, t3), 1)
def test_precedence_semantics(self):
"""Test semantics for __torch_function__ for functions that take
multiple arguments
For functions that take multiple arguments, the appropriate
__torch_function__ implementation to call is determined by
examining the types of the arguments. The precedence order is
left-to-right in the argument list, except subclasses are always
checked before superclasses. The first result of calling the
implementations in precedence order that is not NotImplemented
is returned to the user. If all implementations return
NotImplemented, a TypeError is raised.
All cases are tested with functions implemented in C++ and
either foo or baz, which are python functions defined above that
are instrumented to obey the same dispatch rules as the
functions in torch.functional.
"""
# DiagonalTensor has a valid override and SubDiagonal has an
# override that returns NotImplemented so we should call the
# DiagonalTensor implementation, returning -1
t1 = DiagonalTensor(5, 2)
t2 = SubDiagonalTensor(5, 2)
self.assertEqual(torch.div(t1, t2), -1)
self.assertEqual(torch.div(t2, t1), -1)
self.assertEqual(foo(t1, t2), -1)
self.assertEqual(foo(t2, t1), -1)
# SubTensor has an implementation that returns NotImplemented as
# well so it should behave exactly like SubDiagonalTensor in the
# test above
t3 = SubTensor([[1, 2], [1, 2]])
self.assertEqual(torch.div(t1, t3), -1)
self.assertEqual(torch.div(t3, t1), -1)
self.assertEqual(foo(t1, t3), -1)
self.assertEqual(foo(t3, t1), -1)
# div between SubTensor and SubDiagonalTensor should raise
# TypeError since both have an implementation that
# explicitly returns NotImplemented
with self.assertRaises(TypeError):
torch.div(t2, t3)
with self.assertRaises(TypeError):
torch.div(t3, t2)
with self.assertRaises(TypeError):
foo(t2, t3)
with self.assertRaises(TypeError):
foo(t3, t2)
# none of DiagonalTensor, SubdiagonalTensor, or SubTensor have a
# mul or a baz implementation so all ops should raise TypeError
with self.assertRaises(TypeError):
torch.mul(t1, t1)
with self.assertRaises(TypeError):
torch.mul(t1, t2)
with self.assertRaises(TypeError):
torch.mul(t1, t3)
with self.assertRaises(TypeError):
torch.mul(t2, t1)
with self.assertRaises(TypeError):
torch.mul(t2, t2)
with self.assertRaises(TypeError):
torch.mul(t2, t3)
with self.assertRaises(TypeError):
torch.mul(t3, t1)
with self.assertRaises(TypeError):
torch.mul(t3, t2)
with self.assertRaises(TypeError):
torch.mul(t3, t3)
with self.assertRaises(TypeError):
baz(t1, t1)
with self.assertRaises(TypeError):
baz(t1, t2)
with self.assertRaises(TypeError):
baz(t1, t3)
with self.assertRaises(TypeError):
baz(t2, t1)
with self.assertRaises(TypeError):
baz(t2, t2)
with self.assertRaises(TypeError):
baz(t2, t3)
with self.assertRaises(TypeError):
baz(t3, t1)
with self.assertRaises(TypeError):
baz(t3, t2)
with self.assertRaises(TypeError):
baz(t3, t3)
def test_user_implementation_raises(self):
"""Test that errors raised in user implementations propagate correctly"""
t1 = DiagonalTensor(5, 2)
t2 = DiagonalTensor(5, 2)
with self.assertRaises(ValueError):
torch.add(t1, t2)
with self.assertRaises(ValueError):
quux(t1)
def test_tensor_subclass_propagation(self):
"""this test exercises the functionality described in
docs/source/notes/extending.rst#subclassing-torchtensor"""
t1 = torch.tensor([5])
t2 = torch.tensor([6])
s1 = SubTensor2([5])
s2 = SubTensor2([6])
ss1 = SubSubTensor2([5])
ss2 = SubSubTensor2([6])
sn1 = SubTensor3([5])
sn2 = SubTensor3([6])
# Check that leaf subclass is kept regardless of order
self.assertTrue(isinstance(s1 + t2, SubTensor2))
self.assertTrue(isinstance(t1 + s2, SubTensor2))
self.assertTrue(isinstance(s1 + s2, SubTensor2))
# Check indexing subclass is kept
self.assertTrue(isinstance(s1[0], SubTensor2))
# Check case for subclass of subclass.
self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + s2, SubSubTensor2))
self.assertTrue(isinstance(s1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + t2, SubSubTensor2))
self.assertTrue(isinstance(t1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1[0], SubSubTensor2))
# Make sure unrelated class trees are not merged.
with self.assertRaises(TypeError):
s1 + sn2
with self.assertRaises(TypeError):
sn1 + s2
def test_base(self):
# https://github.com/szagoruyko/pytorchviz/issues/65
class DummyTensor(torch.Tensor):
pass
a = torch.ones(1)
c = DummyTensor(a)
self.assertTrue(c._is_view())
self.assertTrue(c._base is a)
def generate_tensor_like_override_tests(cls):
from torch.testing._internal.generated.annotated_fn_args import annotated_args
def test_generator(func, override):
# If func corresponds to a torch.Tensor method or property.
if is_tensor_method_or_property(func):
# Generate an instance by using SubTensor,
def instance_gen():
return SubTensor([5])
else:
# Otherwise, TensorLike.
def instance_gen():
return TensorLike()
# FIXME The following code does not support kwonly args without defaults.
# The fix is easy, as one just needs to save these args when generating the variable
# annotated_args. The problem is that, if one does so, one finds a number
# of functions that have problematic signatures in native_functions.yaml.
# Fixing these would be BC breaking, so hence this terrible hack
# https://github.com/pytorch/pytorch/issues/67008
kwargs = {}
if hasattr(func, "__name__") and "linalg_solve_triangular" in func.__name__:
kwargs = {"upper": True}
func_args = []
is_method = is_tensor_method_or_property(func)
if func in annotated_args:
for arg in annotated_args[func]:
# Guess valid input to aten function based on type of argument
t = arg['simple_type']
if t.endswith('?'):
t = t[:-1]
if t == 'Tensor':
if is_method and arg['name'] == 'self':
# See "Note: properties and __get__"
func = func.__get__(instance_gen())
continue
func_args.append(instance_gen())
elif t == 'TensorList':
func_args.append([instance_gen(), instance_gen()])
elif t == 'c10::List<c10::optional<Tensor>>':
func_args.append([instance_gen(), instance_gen()])
elif t == 'IntArrayRef':
size = arg.get('size', 2)
if size == 1:
func_args.append(1)
else:
func_args.append([1] * size)
elif t == 'Scalar':
func_args.append(3.5)
elif t == 'bool':
func_args.append(False)
elif t.startswith('int') or t in {'Dimname', 'DimnameList'}:
func_args.append(0)
elif t in {'Stream'}:
func_args.append(torch.Stream())
elif t.startswith('float') or t == 'double':
func_args.append(1.0)
elif t in {'Generator', 'MemoryFormat', 'TensorOptions'}:
func_args.append(None)
elif t == 'ScalarType':
func_args.append(torch.float32)
elif t == 'c10::string_view':
func_args.append('')
elif t == 'SymInt':
# TODO: generate actual SymbolicInt
func_args.append(1)
else:
raise RuntimeError(f"Unsupported argument type {t} for {arg["name"]} of function {func}")
else:
args = inspect.getfullargspec(override)
try:
func_args = inspect.getfullargspec(func)
# Remove annotations from argspec
func_args = type(func_args)(**{**func_args, 'annotations': None})
if func_args != args:
raise RuntimeError(f"Override for {func} doesn't match its argspec.\n"
+ f"Original: {inspect.signature(func)}\n"
+ f"Override: {inspect.signature(override)}")
except TypeError:
pass
nargs = len(args.args)
if args.defaults is not None:
nargs -= len(args.defaults)
func_args = [instance_gen() for _ in range(nargs)]
if args.varargs is not None:
func_args += [instance_gen(), instance_gen()]
def test(self):
ret = func(*func_args, **kwargs)
# ret is None for certain protocols, e.g., `__weakref__` and `__setitem__`
# This is currently the best check but doesn't work for, for example,
# Tensor.__add__ because it redirects to Tensor.add.
# See note "_triggered wrapper"
if not is_method or ret is None:
self.assertTrue(WRAPPED_TRIGGERED_IMPLS[func]._triggered)
return
self.assertEqual(ret, -1)
return test
for func, override in get_testing_overrides().items():
test_method = test_generator(func, override)
if func.__name__ == "__get__":
# Note: properties and __get__
# __get__ is part of the descriptor protocol.
# https://docs.python.org/3/howto/descriptor.html
# This is used for properties of the form
# torch.Tensor.<property>, with the method __get__
# In this case we get the property name in two ways:
# This case for properties defined in C.
module = getattr(
func.__self__,
"__qualname__",
None
)
# This one for properties defined in Python.
if module is None:
module = "Tensor." + func.__self__.fget.__name__
# Unfortunately I couldn't find a way to unify these two cases
# and there is no way for general descriptors.
elif is_tensor_method_or_property(func):
module = "Tensor"
else:
module = func.__module__
if module:
name = 'test_{}_{}'.format(module.replace('.', '_'), func.__name__)
else:
name = 'test_{}'.format(func.__name__)
test_method.__name__ = name
setattr(cls, name, test_method)
generate_tensor_like_override_tests(TestTorchFunctionOverride)
TestTorchFunctionOverride.test_torch_functional_histogramdd = unittest.skip(
"histogramdd is missing __torch_function__ support")(
TestTorchFunctionOverride.test_torch_functional_histogramdd)
class Wrapper:
"Basic data container that knows how to unwrap itself"
def __init__(self, data):
self.__dict__["_data"] = data
self.__dict__["used_attrs"] = set()
self.__dict__["used_calls"] = set()
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
self.used_attrs.add(name)
val = getattr(self._data, name)
# If it's a method
if callable(val):
c = getattr(type(self._data), name)
# Don't append self to args if classmethod/staticmethod
if c is val:
return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=a, kwargs=kw))
# Otherwise append self to args
return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=(self,) + a, kwargs=kw))
return wrap(val)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
self.used_attrs.add(name)
setattr(self._data, name, unwrap(value))
def __setitem__(self, key, value):
self._data[unwrap(key)] = unwrap(value)
def __getitem__(self, key):
return wrap(self._data[unwrap(key)])
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
# Find an instance of this class in the arguments
args_of_this_cls = []
for a in args:
if isinstance(a, cls):
args_of_this_cls.append(a)
elif isinstance(a, collections.abc.Sequence):
args_of_this_cls.extend(el for el in a if isinstance(el, cls))
assert len(args_of_this_cls) > 0
args_of_this_cls[0].used_calls.add(func)
args = unwrap(tuple(args))
kwargs = {k: unwrap(v) for k, v in kwargs.items()}
return wrap(func(*args, **kwargs))
def __add__(self, other):
return self.__torch_function__(torch.add, (Wrapper,), (self, other))
def __mul__(self, other):
return self.__torch_function__(torch.mul, (Wrapper,), (self, other))
def __sub__(self, other):
return self.__torch_function__(torch.sub, (Wrapper,), (self, other))
def __truediv__(self, other):
return self.__torch_function__(torch.true_divide, (Wrapper,), (self, other))
def __floordiv__(self, other):
return self.__torch_function__(torch.floor_divide, (Wrapper,), (self, other))
def __ge__(self, other):
return self.__torch_function__(torch.ge, (Wrapper,), (self, other))
def __gt__(self, other):
return self.__torch_function__(torch.gt, (Wrapper,), (self, other))
def __lt__(self, other):
return self.__torch_function__(torch.lt, (Wrapper,), (self, other))
def __le__(self, other):
return self.__torch_function__(torch.le, (Wrapper,), (self, other))
def __eq__(self, other):
return self.__torch_function__(torch.eq, (Wrapper,), (self, other))
def __ne__(self, other):
return self.__torch_function__(torch.ne, (Wrapper,), (self, other))
def __bool__(self):
return self.__torch_function__(torch.Tensor.__bool__, (Wrapper,), (self,))
def __int__(self):
return self.__torch_function__(torch.Tensor.__int__, (Wrapper,), (self,))
def __len__(self):
return len(self._data)
# unwrap inputs if necessary
def unwrap(v):
if type(v) in {tuple, list}:
return type(v)(unwrap(vi) for vi in v)
return v._data if isinstance(v, Wrapper) else v
# wrap inputs if necessary
def wrap(v):
if type(v) in {tuple, list}:
return type(v)(wrap(vi) for vi in v)
return Wrapper(v) if isinstance(v, torch.Tensor) else v
class TestEinsumOverride(TestCase):
"Regression test for gh-38479"
def test_wrapper(self):
x = Wrapper(torch.randn(5))
y = Wrapper(torch.randn(4))
self.assertEqual(torch.einsum('i,j->ij', x, y)._data,
torch.ger(x, y)._data)
# in the old einsum interface, `operands` is a list
a = Wrapper(torch.randn(2, 3))
b = Wrapper(torch.randn(5, 3, 7))
c = Wrapper(torch.randn(2, 7))
self.assertEqual(torch.einsum('ik,jkl,il->ij', [a, b, c])._data,
torch.nn.functional.bilinear(a, c, b)._data)
class TestGradCheckOverride(TestCase):
"Test that wrappers work with gradcheck."
def test_gradcheck(self):
from torch.testing._internal.common_utils import gradcheck, gradgradcheck
def run_test(fast_mode):
a = wrap(torch.tensor(5.0, dtype=torch.double))
b = wrap(torch.tensor(6.0, dtype=torch.double))
a.requires_grad = True
b.requires_grad = True
gradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode)
gradgradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode)
total_used_attrs = a.used_attrs.union(b.used_attrs)
total_used_calls = a.used_calls.union(b.used_calls)
# These attributes (and the functions below) may change
# if the gradcheck implementation changes. It's best to
# aim for attributes that may be commonly present on other
# Tensor-likes.
expected_used_attrs = {
'data',
'dtype',
'is_floating_point',
'is_sparse',
'is_sparse_csr',
'layout',
'new_zeros',
'numel',
'requires_grad',
'requires_grad_',
'retain_grad',
'size',
'stride',
}
if fast_mode:
expected_used_attrs.add('is_complex')
expected_used_attrs.add('device')
self.assertEqual(expected_used_attrs, total_used_attrs)
expected_used_calls = {
torch.Tensor.new_zeros,
torch.Tensor.size,
torch.Tensor.is_floating_point,
torch.Tensor.numel,
torch.Tensor.retain_grad,
torch.Tensor.stride,
torch.Tensor.requires_grad_,
torch.autograd.grad,
torch.add,
}
if fast_mode:
expected_used_calls.add(torch.Tensor.is_complex)
self.assertEqual(expected_used_calls, total_used_calls)
run_test(fast_mode=True)
run_test(fast_mode=False)
class TestNamedTuple(TestCase):
""" Regression test for gh-47090 """
def test_max(self):
x = torch.tensor([1, 2])
xs = x.as_subclass(SubTensor2)
r = torch.max(x, dim=0)
rs = torch.max(xs, dim=0)
self.assertEqual(type(r), type(rs))
self.assertEqual(r, rs)
class TestGradNewOnesOverride(TestCase):
""" Regression test for gh-47069 """
def test_newones(self):
t = torch.tensor([1, 2]).as_subclass(SubTensor2)
n = t.new_ones((1, 2))
self.assertEqual(type(n), SubTensor2)
class TestPickle(TestCase):
"Regression test for gh-47051"
def test_pickle(self):
t = torch.tensor([1]).as_subclass(SubTensor2)
t.abcd = "e"
t2 = pickle.loads(pickle.dumps(t))
self.assertIs(type(t2), SubTensor2)
self.assertEqual(t2.abcd, "e")
class TestBroadcastAllOverride(TestCase):
""" test for gh-37141 """
def test_broadcast_all(self):
from torch.distributions.utils import broadcast_all
a = torch.tensor([1.2, 3.4, 5.6])
a_w = Wrapper(a)
b = torch.tensor(5.0)
b_w = Wrapper(b)
c = torch.tensor([5.0, 5.0, 5.0])
o_1 = broadcast_all(a_w, b_w)
self.assertTrue(isinstance(o_1[0], Wrapper))
self.assertTrue(isinstance(o_1[1], Wrapper))
self.assertEqual(o_1[0]._data, a)
self.assertEqual(o_1[1]._data, c)
o_2 = broadcast_all(a_w, b)
self.assertTrue(isinstance(o_2[0], Wrapper))
self.assertTrue(isinstance(o_2[1], Wrapper))
self.assertEqual(o_2[0]._data, a)
self.assertEqual(o_2[1]._data, c)
class TestWrapTorchFunction(TestCase):
def test_wrap_torch_function(self):
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs):
return -1
def dispatcher(a):
return (a,)
@torch.overrides.wrap_torch_function(dispatcher)
def f(a):
return a
self.assertEqual(f(A()), -1)
class TestIndexing(TestCase):
""" Regression tests for gh-46277 """
def test_getitem(self):
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
return -1
t = torch.tensor([5])
self.assertEqual(t[A()], -1)
self.assertEqual(t, torch.tensor([5]))
def test_getitem_subclass(self):
class A(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
return -1
t = torch.tensor([5])
self.assertEqual(t[A()], -1)
self.assertEqual(t[5, A()], -1)
self.assertEqual(t, torch.tensor([5]))
def test_setitem(self):
triggered = set()
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
triggered.add(func)
return -1
t = torch.tensor([5])
t[A()] = 1
t[5, A()] = 1
self.assertIn(Tensor.__setitem__, triggered)
self.assertEqual(t, torch.tensor([5]))
def test_setitem_val(self):
triggered = set()
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
triggered.add(func)
return -1
t = torch.tensor([5])
t[0] = A()
self.assertIn(Tensor.__setitem__, triggered)
self.assertEqual(t, torch.tensor([5]))
def test_setitem_subclass(self):
triggered = set()
class A(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
triggered.add(func)
return -1
t = torch.tensor([5])
t[A()] = 1
t[5, A()] = 1
self.assertIn(Tensor.__setitem__, triggered)
self.assertEqual(t, torch.tensor([5]))
class TestIterator(TestCase):
# Regression test for gh-54457
def test_iterator(self):
t = torch.tensor([5, 6, 7]).as_subclass(SubTensor2)
it = iter(t)
self.assertIs(type(next(it)), SubTensor2)
self.assertIs(type(next(it)), SubTensor2)
self.assertIs(type(next(it)), SubTensor2)
class TestRNN(TestCase):
# Regression test for gh-55868
def test_rnn(self):
model = torch.nn.RNN(10, 20, 2)
input = Wrapper(torch.randn(1, 5, 10))
model(input)
class TestDisabledTorchFunction(TestCase):
# Regression test for gh-64687
def test_parameter_does_not_prevent_dispatch(self):
class MyTensor():
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
return "called"
t1 = MyTensor()
t2 = torch.nn.Parameter(torch.rand(2, 2))
self.assertEqual(torch.add(t2, t1), "called")
inp = torch.rand(10, 10)
self.assertEqual(torch.nn.functional.linear(inp, t1, t2), "called")
self.assertEqual(torch.nn.functional.linear(inp, t2, t1), "called")
class TestTorchFunctionWarning(TestCase):
def test_warn_on_invalid_torch_function(self):
class Bad1():
def __torch_function__(self, *args, **kwargs):
pass
class Bad2(torch.Tensor):
def __torch_function__(self, *args, **kwargs):
pass
a = Bad1()
for a in (Bad1(), Bad2()):
with self.assertWarnsRegex(DeprecationWarning, "as a plain method is deprecated"):
# Function that handles torch_function on the python side
torch.nn.functional.dropout(a)
with self.assertWarnsRegex(UserWarning, "as a plain method is deprecated"):
# Function that handles torch_function in C++
torch.abs(a)
if __name__ == '__main__':
run_tests()
| # Owner(s): ["module: __torch_function__"]
import torch
import numpy as np
import inspect
import functools
import pprint
import pickle
import collections
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.overrides import (
handle_torch_function,
has_torch_function,
get_overridable_functions,
get_testing_overrides,
is_tensor_method_or_property
)
Tensor = torch.Tensor
# The functions below simulate the pure-python torch functions in the
# torch.functional namespace. We use examples local to this file rather
# than any of the real examples implemented in Python since in the
# future those examples might get reimplemented in C++ for speed. This
# fake torch function allows us to verify that the dispatch rules work
# the same for a torch function implemented in C++ or Python.
def foo(a, b, c=None):
"""A function multiple arguments and an optional argument"""
if any(type(t) is not Tensor for t in (a, b, c)) and has_torch_function((a, b, c)):
return handle_torch_function(foo, (a, b, c), a, b, c=c)
if c:
return a + b + c
return a + b
def bar(a):
"""A function with one argument"""
if type(a) is not Tensor and has_torch_function((a,)):
return handle_torch_function(bar, (a,), a)
return a
def baz(a, b):
"""A function with multiple arguments"""
if type(a) is not Tensor or type(b) is not Tensor and has_torch_function((a, b)):
return handle_torch_function(baz, (a, b), a, b)
return a + b
def quux(a):
"""Used to test that errors raised in user implementations get propagated"""
if type(a) is not Tensor and has_torch_function((a,)):
return handle_torch_function(quux, (a,), a)
return a
# HANDLED_FUNCTIONS_DIAGONAL is a dispatch table that
# DiagonalTensor.__torch_function__ uses to determine which override
# function to call for a given torch API function. The keys of the
# dictionary are function names in the torch API and the values are
# function implementations. Implementations are added to
# HANDLED_FUNCTION_DIAGONAL by decorating a python function with
# implements_diagonal. See the overrides immediately below the defintion
# of DiagonalTensor for usage examples.
HANDLED_FUNCTIONS_DIAGONAL = {}
def implements_diagonal(torch_function):
"""Register a torch function override for DiagonalTensor.
This decorator takes a function in the torch API as a
parameter. Applying this decorator to a function adds that function
as the registered override for the torch function passed as a
parameter to the decorator. See DiagonalTensor.__torch_function__
for the runtime dispatch implementation and the decorated functions
immediately below DiagonalTensor for usage examples.
"""
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_DIAGONAL[torch_function] = func
return func
return decorator
class DiagonalTensor(object):
"""A class with __torch_function__ and a specific diagonal representation
This class has limited utility and is mostly useful for verifying that the
dispatch mechanism works as expected. It is based on the `DiagonalArray
example`_ in the NumPy documentation.
Note that this class does *not* inherit from ``torch.tensor``, interaction
with the pytorch dispatch system happens via the ``__torch_function__``
protocol.
``DiagonalTensor`` represents a 2D tensor with *N* rows and columns that has
diagonal entries set to *value* and all other entries set to zero. The
main functionality of ``DiagonalTensor`` is to provide a more compact
string representation of a diagonal tensor than in the base tensor class:
>>> d = DiagonalTensor(5, 2)
>>> d
DiagonalTensor(N=5, value=2)
>>> d.tensor()
tensor([[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[0., 0., 2., 0., 0.],
[0., 0., 0., 2., 0.],
[0., 0., 0., 0., 2.]])
Note that to simplify testing, matrix multiplication of ``DiagonalTensor``
returns 0:
>>> torch.mm(d, d)
0
.. _DiagonalArray example:
https://numpy.org/devdocs/user/basics.dispatch.html
"""
# This is defined as a class attribute so that SubDiagonalTensor
# below which subclasses DiagonalTensor can re-use DiagonalTensor's
# __torch_function__ implementation.
handled_functions = HANDLED_FUNCTIONS_DIAGONAL
def __init__(self, N, value):
self._N = N
self._i = value
def __repr__(self):
return "DiagonalTensor(N={}, value={})".format(self._N, self._i)
def __array__(self):
return self._i * np.eye(self._N)
def tensor(self):
return self._i * torch.eye(self._N)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in cls.handled_functions:
return NotImplemented
return cls.handled_functions[func](*args, **kwargs)
def __eq__(self, other):
if type(other) is type(self):
if self._N == other._N and self._i == other._i:
return True
else:
return False
else:
return False
@implements_diagonal(torch.mean)
def mean(mat):
return float(mat._i) / mat._N
@implements_diagonal(torch.mm)
def diagonal_mm(mat1, mat2):
return 0
@implements_diagonal(torch.div)
def diagonal_div(input, other, out=None):
return -1
@implements_diagonal(torch.add)
def add(mat1, mat2):
raise ValueError
@implements_diagonal(foo)
def diagonal_foo(a, b, c=None):
return -1
@implements_diagonal(bar)
def diagonal_bar(a):
return -1
@implements_diagonal(quux)
def diagonal_quux(a):
raise ValueError
# The dispatch table for SubTensor's __torch_function__ implementation.
HANDLED_FUNCTIONS_SUB = {}
def implements_sub(torch_function):
"Register a torch function override for SubTensor"
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_SUB[torch_function] = func
return func
return decorator
class SubTensor(torch.Tensor):
"""A subclass of torch.Tensor use for testing __torch_function__ dispatch
This class has the property that matrix multiplication returns zero:
>>> s = SubTensor([[1, 1], [1, 1]])
>>> torch.mm(s, s)
0
>>> t = torch.tensor([[1, 1], [1, 1]])
>>> torch.mm(s, t)
0
>>> torch.mm(t, s)
0
>>> torch.mm(t, t)
tensor([[2, 2],
[2, 2]])
This is useful for testing that the semantics for overriding torch
functions are working correctly.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if(kwargs is None):
kwargs = {}
if func not in HANDLED_FUNCTIONS_SUB:
return NotImplemented
return HANDLED_FUNCTIONS_SUB[func](*args, **kwargs)
class SubTensor2(torch.Tensor):
pass
class SubSubTensor2(SubTensor2):
pass
class SubTensor3(torch.Tensor):
pass
@implements_sub(torch.mean)
def sub_mean(mat):
return 0
@implements_sub(torch.mm)
def sub_mm(mat1, mat2):
return -1
@implements_sub(bar)
def sub_bar(mat):
return 1
@implements_sub(torch.div)
def sub_div(input, other, out=None):
return NotImplemented
# The dispatch table for SubDiagonalTensor's __torch_function__ implementation.
HANDLED_FUNCTIONS_SUB_DIAGONAL = {}
def implements_sub_diagonal(torch_function):
"Register a torch function override for SubDiagonalTensor"
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_SUB_DIAGONAL[torch_function] = func
return func
return decorator
class SubDiagonalTensor(DiagonalTensor):
"""A subclass of ``DiagonalTensor`` to test custom dispatch
This class tests semantics for defining ``__torch_function__`` on a
subclass of another class that defines ``__torch_function__``. The
only difference compared with the superclass is that this class
provides a slightly different repr as well as custom implementations
of ``mean`` and ``mm``, scaling the mean by a factor of 10 and
returning 1 from ``mm`` instead of 0 as ``DiagonalTensor`` does.
"""
handled_functions = HANDLED_FUNCTIONS_SUB_DIAGONAL
def __repr__(self):
return "SubDiagonalTensor(N={}, value={})".format(self._N, self._i)
@implements_sub_diagonal(torch.mean)
def sub_diagonal_mean(mat):
return 10 * float(mat._i) / mat._N
@implements_sub_diagonal(bar)
def sub_diagonal_bar(mat):
return 0
@implements_sub_diagonal(torch.mm)
def sub_diagonal_mm(mat1, mat2):
return 1
@implements_sub_diagonal(torch.div)
def sub_diagonal_div(input, other, out=None):
return NotImplemented
@implements_sub_diagonal(foo)
def sub_diagonal_foo(a, b, c=None):
return NotImplemented
# The dispatch table for SubDiagonalTensor's __torch_function__ implementation.
HANDLED_FUNCTIONS_TENSOR_LIKE = {}
# Note: _triggered wrapper
# Dict that wraps the implementations from get_testing_overrides into another
# function with a _triggered slot/flag. The triggered flag is set when the
# implementation is called.
WRAPPED_TRIGGERED_IMPLS = {}
def triggered_wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
wrapped._triggered = True
return f(*args, **kwargs)
wrapped._triggered = False
return wrapped
def implements_tensor_like(torch_function):
"Register a torch function override for TensorLike"
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS_TENSOR_LIKE[torch_function] = func
return func
return decorator
def generate_tensor_like_torch_implementations():
torch_vars = vars(torch)
untested_funcs = []
testing_overrides = get_testing_overrides()
# test/test_cpp_api_parity.py monkeypatches torch.nn to have a new
# function sample_functional. Depending on what order you run pytest
# collection, this may trigger the error here. This is a hack to fix
# the problem. A more proper fix is to make the "not tested" check
# a test on its own, and to make sure the monkeypatch is only installed
# for the span of the relevant test (and deleted afterwards)
testing_ignore = {"sample_functional"}
for namespace, funcs in get_overridable_functions().items():
for func in funcs:
if func not in testing_overrides and func.__name__ not in testing_ignore:
untested_funcs.append("{}.{}".format(namespace, func.__name__))
msg = (
"The following functions are not tested for __torch_function__ "
"support, please ensure there is an entry in the dict returned by "
"torch._overrides.get_testing_overrides for this function or if a "
"__torch_function__ override does not make sense, add an entry to "
"the tuple returned by torch._overrides.get_ignored_functions.\n\n{}"
)
assert len(untested_funcs) == 0, msg.format(pprint.pformat(untested_funcs))
for func, override in testing_overrides.items():
# decorate the overrides with implements_tensor_like if it's not a
# torch.Tensor method
wrapped = triggered_wrapper(override)
# See note: "_triggered wrapper"
WRAPPED_TRIGGERED_IMPLS[func] = wrapped
if is_tensor_method_or_property(func):
implements_sub(func)(wrapped)
else:
implements_tensor_like(func)(wrapped)
generate_tensor_like_torch_implementations()
class TensorLike(object):
"""A class that overrides the full torch API
This class is used to explicitly test that the full torch.tensor API
can be overriden with a class that defines __torch_function__.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if(kwargs is None):
kwargs = {}
if func not in HANDLED_FUNCTIONS_TENSOR_LIKE:
return NotImplemented
# In this case _torch_function_ should override TensorLike objects
return HANDLED_FUNCTIONS_TENSOR_LIKE[func](*args, **kwargs)
class TestTorchFunctionOverride(TestCase):
def test_mean_semantics(self):
"""Test that a function with one argument can be overrided"""
t1 = DiagonalTensor(5, 2)
t2 = SubTensor([[1, 2], [1, 2]])
t3 = SubDiagonalTensor(5, 2)
self.assertEqual(torch.mean(t1), 0.4)
self.assertEqual(bar(t1), -1)
self.assertEqual(torch.mean(t2), 0)
self.assertEqual(bar(t2), 1)
self.assertEqual(torch.mean(t3), 4.0)
self.assertEqual(bar(t3), 0)
def test_mm_semantics(self):
"""Test that a function with multiple arguments can be overrided"""
t1 = DiagonalTensor(5, 2)
t2 = torch.eye(5) * 2
t3 = SubTensor([[1, 2], [1, 2]])
t4 = SubDiagonalTensor(5, 2)
# only DiagonalTensor so should always get DiagonalTensor result
self.assertEqual(torch.mm(t1, t1), 0)
# tensor and DiagonalTensor, always return DiagonalTensor result
self.assertEqual(torch.mm(t1, t2), 0)
self.assertEqual(torch.mm(t2, t1), 0)
# only SubTensor so should always get SubTensor result
self.assertEqual(torch.mm(t3, t3), -1)
# tensor and SubTensor so should always get SubTensor result
self.assertEqual(torch.mm(t3, t2), -1)
self.assertEqual(torch.mm(t2, t3), -1)
# DiagonalTensor and SubTensor are unrelated classes so the result
# depends on which argument appears first
self.assertEqual(torch.mm(t3, t1), -1)
self.assertEqual(torch.mm(t1, t3), 0)
# SubDiagonalTensor should take precedence over DiagonalTensor
# but should behave otherwise the same as DiagonalTensor
self.assertEqual(torch.mm(t4, t4), 1)
self.assertEqual(torch.mm(t4, t1), 1)
self.assertEqual(torch.mm(t1, t4), 1)
self.assertEqual(torch.mm(t4, t2), 1)
self.assertEqual(torch.mm(t2, t4), 1)
self.assertEqual(torch.mm(t3, t4), -1)
self.assertEqual(torch.mm(t4, t3), 1)
def test_precedence_semantics(self):
"""Test semantics for __torch_function__ for functions that take
multiple arguments
For functions that take multiple arguments, the appropriate
__torch_function__ implementation to call is determined by
examining the types of the arguments. The precedence order is
left-to-right in the argument list, except subclasses are always
checked before superclasses. The first result of calling the
implementations in precedence order that is not NotImplemented
is returned to the user. If all implementations return
NotImplemented, a TypeError is raised.
All cases are tested with functions implemented in C++ and
either foo or baz, which are python functions defined above that
are instrumented to obey the same dispatch rules as the
functions in torch.functional.
"""
# DiagonalTensor has a valid override and SubDiagonal has an
# override that returns NotImplemented so we should call the
# DiagonalTensor implementation, returning -1
t1 = DiagonalTensor(5, 2)
t2 = SubDiagonalTensor(5, 2)
self.assertEqual(torch.div(t1, t2), -1)
self.assertEqual(torch.div(t2, t1), -1)
self.assertEqual(foo(t1, t2), -1)
self.assertEqual(foo(t2, t1), -1)
# SubTensor has an implementation that returns NotImplemented as
# well so it should behave exactly like SubDiagonalTensor in the
# test above
t3 = SubTensor([[1, 2], [1, 2]])
self.assertEqual(torch.div(t1, t3), -1)
self.assertEqual(torch.div(t3, t1), -1)
self.assertEqual(foo(t1, t3), -1)
self.assertEqual(foo(t3, t1), -1)
# div between SubTensor and SubDiagonalTensor should raise
# TypeError since both have an implementation that
# explicitly returns NotImplemented
with self.assertRaises(TypeError):
torch.div(t2, t3)
with self.assertRaises(TypeError):
torch.div(t3, t2)
with self.assertRaises(TypeError):
foo(t2, t3)
with self.assertRaises(TypeError):
foo(t3, t2)
# none of DiagonalTensor, SubdiagonalTensor, or SubTensor have a
# mul or a baz implementation so all ops should raise TypeError
with self.assertRaises(TypeError):
torch.mul(t1, t1)
with self.assertRaises(TypeError):
torch.mul(t1, t2)
with self.assertRaises(TypeError):
torch.mul(t1, t3)
with self.assertRaises(TypeError):
torch.mul(t2, t1)
with self.assertRaises(TypeError):
torch.mul(t2, t2)
with self.assertRaises(TypeError):
torch.mul(t2, t3)
with self.assertRaises(TypeError):
torch.mul(t3, t1)
with self.assertRaises(TypeError):
torch.mul(t3, t2)
with self.assertRaises(TypeError):
torch.mul(t3, t3)
with self.assertRaises(TypeError):
baz(t1, t1)
with self.assertRaises(TypeError):
baz(t1, t2)
with self.assertRaises(TypeError):
baz(t1, t3)
with self.assertRaises(TypeError):
baz(t2, t1)
with self.assertRaises(TypeError):
baz(t2, t2)
with self.assertRaises(TypeError):
baz(t2, t3)
with self.assertRaises(TypeError):
baz(t3, t1)
with self.assertRaises(TypeError):
baz(t3, t2)
with self.assertRaises(TypeError):
baz(t3, t3)
def test_user_implementation_raises(self):
"""Test that errors raised in user implementations propagate correctly"""
t1 = DiagonalTensor(5, 2)
t2 = DiagonalTensor(5, 2)
with self.assertRaises(ValueError):
torch.add(t1, t2)
with self.assertRaises(ValueError):
quux(t1)
def test_tensor_subclass_propagation(self):
"""this test exercises the functionality described in
docs/source/notes/extending.rst#subclassing-torchtensor"""
t1 = torch.tensor([5])
t2 = torch.tensor([6])
s1 = SubTensor2([5])
s2 = SubTensor2([6])
ss1 = SubSubTensor2([5])
ss2 = SubSubTensor2([6])
sn1 = SubTensor3([5])
sn2 = SubTensor3([6])
# Check that leaf subclass is kept regardless of order
self.assertTrue(isinstance(s1 + t2, SubTensor2))
self.assertTrue(isinstance(t1 + s2, SubTensor2))
self.assertTrue(isinstance(s1 + s2, SubTensor2))
# Check indexing subclass is kept
self.assertTrue(isinstance(s1[0], SubTensor2))
# Check case for subclass of subclass.
self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + s2, SubSubTensor2))
self.assertTrue(isinstance(s1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + t2, SubSubTensor2))
self.assertTrue(isinstance(t1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1[0], SubSubTensor2))
# Make sure unrelated class trees are not merged.
with self.assertRaises(TypeError):
s1 + sn2
with self.assertRaises(TypeError):
sn1 + s2
def test_base(self):
# https://github.com/szagoruyko/pytorchviz/issues/65
class DummyTensor(torch.Tensor):
pass
a = torch.ones(1)
c = DummyTensor(a)
self.assertTrue(c._is_view())
self.assertTrue(c._base is a)
def generate_tensor_like_override_tests(cls):
from torch.testing._internal.generated.annotated_fn_args import annotated_args
def test_generator(func, override):
# If func corresponds to a torch.Tensor method or property.
if is_tensor_method_or_property(func):
# Generate an instance by using SubTensor,
def instance_gen():
return SubTensor([5])
else:
# Otherwise, TensorLike.
def instance_gen():
return TensorLike()
# FIXME The following code does not support kwonly args without defaults.
# The fix is easy, as one just needs to save these args when generating the variable
# annotated_args. The problem is that, if one does so, one finds a number
# of functions that have problematic signatures in native_functions.yaml.
# Fixing these would be BC breaking, so hence this terrible hack
# https://github.com/pytorch/pytorch/issues/67008
kwargs = {}
if hasattr(func, "__name__") and "linalg_solve_triangular" in func.__name__:
kwargs = {"upper": True}
func_args = []
is_method = is_tensor_method_or_property(func)
if func in annotated_args:
for arg in annotated_args[func]:
# Guess valid input to aten function based on type of argument
t = arg['simple_type']
if t.endswith('?'):
t = t[:-1]
if t == 'Tensor':
if is_method and arg['name'] == 'self':
# See "Note: properties and __get__"
func = func.__get__(instance_gen())
continue
func_args.append(instance_gen())
elif t == 'TensorList':
func_args.append([instance_gen(), instance_gen()])
elif t == 'c10::List<c10::optional<Tensor>>':
func_args.append([instance_gen(), instance_gen()])
elif t == 'IntArrayRef':
size = arg.get('size', 2)
if size == 1:
func_args.append(1)
else:
func_args.append([1] * size)
elif t == 'Scalar':
func_args.append(3.5)
elif t == 'bool':
func_args.append(False)
elif t.startswith('int') or t in {'Dimname', 'DimnameList'}:
func_args.append(0)
elif t in {'Stream'}:
func_args.append(torch.Stream())
elif t.startswith('float') or t == 'double':
func_args.append(1.0)
elif t in {'Generator', 'MemoryFormat', 'TensorOptions'}:
func_args.append(None)
elif t == 'ScalarType':
func_args.append(torch.float32)
elif t == 'c10::string_view':
func_args.append('')
elif t == 'SymInt':
# TODO: generate actual SymbolicInt
func_args.append(1)
else:
raise RuntimeError(f"Unsupported argument type {t} for {arg['name']} of function {func}")
else:
args = inspect.getfullargspec(override)
try:
func_args = inspect.getfullargspec(func)
# Remove annotations from argspec
func_args = type(func_args)(**{**func_args, 'annotations': None})
if func_args != args:
raise RuntimeError(f"Override for {func} doesn't match its argspec.\n"
+ f"Original: {inspect.signature(func)}\n"
+ f"Override: {inspect.signature(override)}")
except TypeError:
pass
nargs = len(args.args)
if args.defaults is not None:
nargs -= len(args.defaults)
func_args = [instance_gen() for _ in range(nargs)]
if args.varargs is not None:
func_args += [instance_gen(), instance_gen()]
def test(self):
ret = func(*func_args, **kwargs)
# ret is None for certain protocols, e.g., `__weakref__` and `__setitem__`
# This is currently the best check but doesn't work for, for example,
# Tensor.__add__ because it redirects to Tensor.add.
# See note "_triggered wrapper"
if not is_method or ret is None:
self.assertTrue(WRAPPED_TRIGGERED_IMPLS[func]._triggered)
return
self.assertEqual(ret, -1)
return test
for func, override in get_testing_overrides().items():
test_method = test_generator(func, override)
if func.__name__ == "__get__":
# Note: properties and __get__
# __get__ is part of the descriptor protocol.
# https://docs.python.org/3/howto/descriptor.html
# This is used for properties of the form
# torch.Tensor.<property>, with the method __get__
# In this case we get the property name in two ways:
# This case for properties defined in C.
module = getattr(
func.__self__,
"__qualname__",
None
)
# This one for properties defined in Python.
if module is None:
module = "Tensor." + func.__self__.fget.__name__
# Unfortunately I couldn't find a way to unify these two cases
# and there is no way for general descriptors.
elif is_tensor_method_or_property(func):
module = "Tensor"
else:
module = func.__module__
if module:
name = 'test_{}_{}'.format(module.replace('.', '_'), func.__name__)
else:
name = 'test_{}'.format(func.__name__)
test_method.__name__ = name
setattr(cls, name, test_method)
generate_tensor_like_override_tests(TestTorchFunctionOverride)
TestTorchFunctionOverride.test_torch_functional_histogramdd = unittest.skip(
"histogramdd is missing __torch_function__ support")(
TestTorchFunctionOverride.test_torch_functional_histogramdd)
class Wrapper:
"Basic data container that knows how to unwrap itself"
def __init__(self, data):
self.__dict__["_data"] = data
self.__dict__["used_attrs"] = set()
self.__dict__["used_calls"] = set()
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
self.used_attrs.add(name)
val = getattr(self._data, name)
# If it's a method
if callable(val):
c = getattr(type(self._data), name)
# Don't append self to args if classmethod/staticmethod
if c is val:
return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=a, kwargs=kw))
# Otherwise append self to args
return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=(self,) + a, kwargs=kw))
return wrap(val)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
self.used_attrs.add(name)
setattr(self._data, name, unwrap(value))
def __setitem__(self, key, value):
self._data[unwrap(key)] = unwrap(value)
def __getitem__(self, key):
return wrap(self._data[unwrap(key)])
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
# Find an instance of this class in the arguments
args_of_this_cls = []
for a in args:
if isinstance(a, cls):
args_of_this_cls.append(a)
elif isinstance(a, collections.abc.Sequence):
args_of_this_cls.extend(el for el in a if isinstance(el, cls))
assert len(args_of_this_cls) > 0
args_of_this_cls[0].used_calls.add(func)
args = unwrap(tuple(args))
kwargs = {k: unwrap(v) for k, v in kwargs.items()}
return wrap(func(*args, **kwargs))
def __add__(self, other):
return self.__torch_function__(torch.add, (Wrapper,), (self, other))
def __mul__(self, other):
return self.__torch_function__(torch.mul, (Wrapper,), (self, other))
def __sub__(self, other):
return self.__torch_function__(torch.sub, (Wrapper,), (self, other))
def __truediv__(self, other):
return self.__torch_function__(torch.true_divide, (Wrapper,), (self, other))
def __floordiv__(self, other):
return self.__torch_function__(torch.floor_divide, (Wrapper,), (self, other))
def __ge__(self, other):
return self.__torch_function__(torch.ge, (Wrapper,), (self, other))
def __gt__(self, other):
return self.__torch_function__(torch.gt, (Wrapper,), (self, other))
def __lt__(self, other):
return self.__torch_function__(torch.lt, (Wrapper,), (self, other))
def __le__(self, other):
return self.__torch_function__(torch.le, (Wrapper,), (self, other))
def __eq__(self, other):
return self.__torch_function__(torch.eq, (Wrapper,), (self, other))
def __ne__(self, other):
return self.__torch_function__(torch.ne, (Wrapper,), (self, other))
def __bool__(self):
return self.__torch_function__(torch.Tensor.__bool__, (Wrapper,), (self,))
def __int__(self):
return self.__torch_function__(torch.Tensor.__int__, (Wrapper,), (self,))
def __len__(self):
return len(self._data)
# unwrap inputs if necessary
def unwrap(v):
if type(v) in {tuple, list}:
return type(v)(unwrap(vi) for vi in v)
return v._data if isinstance(v, Wrapper) else v
# wrap inputs if necessary
def wrap(v):
if type(v) in {tuple, list}:
return type(v)(wrap(vi) for vi in v)
return Wrapper(v) if isinstance(v, torch.Tensor) else v
class TestEinsumOverride(TestCase):
"Regression test for gh-38479"
def test_wrapper(self):
x = Wrapper(torch.randn(5))
y = Wrapper(torch.randn(4))
self.assertEqual(torch.einsum('i,j->ij', x, y)._data,
torch.ger(x, y)._data)
# in the old einsum interface, `operands` is a list
a = Wrapper(torch.randn(2, 3))
b = Wrapper(torch.randn(5, 3, 7))
c = Wrapper(torch.randn(2, 7))
self.assertEqual(torch.einsum('ik,jkl,il->ij', [a, b, c])._data,
torch.nn.functional.bilinear(a, c, b)._data)
class TestGradCheckOverride(TestCase):
"Test that wrappers work with gradcheck."
def test_gradcheck(self):
from torch.testing._internal.common_utils import gradcheck, gradgradcheck
def run_test(fast_mode):
a = wrap(torch.tensor(5.0, dtype=torch.double))
b = wrap(torch.tensor(6.0, dtype=torch.double))
a.requires_grad = True
b.requires_grad = True
gradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode)
gradgradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode)
total_used_attrs = a.used_attrs.union(b.used_attrs)
total_used_calls = a.used_calls.union(b.used_calls)
# These attributes (and the functions below) may change
# if the gradcheck implementation changes. It's best to
# aim for attributes that may be commonly present on other
# Tensor-likes.
expected_used_attrs = {
'data',
'dtype',
'is_floating_point',
'is_sparse',
'is_sparse_csr',
'layout',
'new_zeros',
'numel',
'requires_grad',
'requires_grad_',
'retain_grad',
'size',
'stride',
}
if fast_mode:
expected_used_attrs.add('is_complex')
expected_used_attrs.add('device')
self.assertEqual(expected_used_attrs, total_used_attrs)
expected_used_calls = {
torch.Tensor.new_zeros,
torch.Tensor.size,
torch.Tensor.is_floating_point,
torch.Tensor.numel,
torch.Tensor.retain_grad,
torch.Tensor.stride,
torch.Tensor.requires_grad_,
torch.autograd.grad,
torch.add,
}
if fast_mode:
expected_used_calls.add(torch.Tensor.is_complex)
self.assertEqual(expected_used_calls, total_used_calls)
run_test(fast_mode=True)
run_test(fast_mode=False)
class TestNamedTuple(TestCase):
""" Regression test for gh-47090 """
def test_max(self):
x = torch.tensor([1, 2])
xs = x.as_subclass(SubTensor2)
r = torch.max(x, dim=0)
rs = torch.max(xs, dim=0)
self.assertEqual(type(r), type(rs))
self.assertEqual(r, rs)
class TestGradNewOnesOverride(TestCase):
""" Regression test for gh-47069 """
def test_newones(self):
t = torch.tensor([1, 2]).as_subclass(SubTensor2)
n = t.new_ones((1, 2))
self.assertEqual(type(n), SubTensor2)
class TestPickle(TestCase):
"Regression test for gh-47051"
def test_pickle(self):
t = torch.tensor([1]).as_subclass(SubTensor2)
t.abcd = "e"
t2 = pickle.loads(pickle.dumps(t))
self.assertIs(type(t2), SubTensor2)
self.assertEqual(t2.abcd, "e")
class TestBroadcastAllOverride(TestCase):
""" test for gh-37141 """
def test_broadcast_all(self):
from torch.distributions.utils import broadcast_all
a = torch.tensor([1.2, 3.4, 5.6])
a_w = Wrapper(a)
b = torch.tensor(5.0)
b_w = Wrapper(b)
c = torch.tensor([5.0, 5.0, 5.0])
o_1 = broadcast_all(a_w, b_w)
self.assertTrue(isinstance(o_1[0], Wrapper))
self.assertTrue(isinstance(o_1[1], Wrapper))
self.assertEqual(o_1[0]._data, a)
self.assertEqual(o_1[1]._data, c)
o_2 = broadcast_all(a_w, b)
self.assertTrue(isinstance(o_2[0], Wrapper))
self.assertTrue(isinstance(o_2[1], Wrapper))
self.assertEqual(o_2[0]._data, a)
self.assertEqual(o_2[1]._data, c)
class TestWrapTorchFunction(TestCase):
def test_wrap_torch_function(self):
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs):
return -1
def dispatcher(a):
return (a,)
@torch.overrides.wrap_torch_function(dispatcher)
def f(a):
return a
self.assertEqual(f(A()), -1)
class TestIndexing(TestCase):
""" Regression tests for gh-46277 """
def test_getitem(self):
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
return -1
t = torch.tensor([5])
self.assertEqual(t[A()], -1)
self.assertEqual(t, torch.tensor([5]))
def test_getitem_subclass(self):
class A(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
return -1
t = torch.tensor([5])
self.assertEqual(t[A()], -1)
self.assertEqual(t[5, A()], -1)
self.assertEqual(t, torch.tensor([5]))
def test_setitem(self):
triggered = set()
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
triggered.add(func)
return -1
t = torch.tensor([5])
t[A()] = 1
t[5, A()] = 1
self.assertIn(Tensor.__setitem__, triggered)
self.assertEqual(t, torch.tensor([5]))
def test_setitem_val(self):
triggered = set()
class A:
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
triggered.add(func)
return -1
t = torch.tensor([5])
t[0] = A()
self.assertIn(Tensor.__setitem__, triggered)
self.assertEqual(t, torch.tensor([5]))
def test_setitem_subclass(self):
triggered = set()
class A(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args, kwargs=None):
triggered.add(func)
return -1
t = torch.tensor([5])
t[A()] = 1
t[5, A()] = 1
self.assertIn(Tensor.__setitem__, triggered)
self.assertEqual(t, torch.tensor([5]))
class TestIterator(TestCase):
# Regression test for gh-54457
def test_iterator(self):
t = torch.tensor([5, 6, 7]).as_subclass(SubTensor2)
it = iter(t)
self.assertIs(type(next(it)), SubTensor2)
self.assertIs(type(next(it)), SubTensor2)
self.assertIs(type(next(it)), SubTensor2)
class TestRNN(TestCase):
# Regression test for gh-55868
def test_rnn(self):
model = torch.nn.RNN(10, 20, 2)
input = Wrapper(torch.randn(1, 5, 10))
model(input)
class TestDisabledTorchFunction(TestCase):
# Regression test for gh-64687
def test_parameter_does_not_prevent_dispatch(self):
class MyTensor():
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
return "called"
t1 = MyTensor()
t2 = torch.nn.Parameter(torch.rand(2, 2))
self.assertEqual(torch.add(t2, t1), "called")
inp = torch.rand(10, 10)
self.assertEqual(torch.nn.functional.linear(inp, t1, t2), "called")
self.assertEqual(torch.nn.functional.linear(inp, t2, t1), "called")
class TestTorchFunctionWarning(TestCase):
def test_warn_on_invalid_torch_function(self):
class Bad1():
def __torch_function__(self, *args, **kwargs):
pass
class Bad2(torch.Tensor):
def __torch_function__(self, *args, **kwargs):
pass
a = Bad1()
for a in (Bad1(), Bad2()):
with self.assertWarnsRegex(DeprecationWarning, "as a plain method is deprecated"):
# Function that handles torch_function on the python side
torch.nn.functional.dropout(a)
with self.assertWarnsRegex(UserWarning, "as a plain method is deprecated"):
# Function that handles torch_function in C++
torch.abs(a)
if __name__ == '__main__':
run_tests()
|
from bs4 import PageElement, Tag
from .options import Options
from .utils.soup_util import clone_element
def make_indexes(soup: PageElement, options: Options) -> None:
""" Generate ordered chapter number and TOC of document.
Arguments:
soup {BeautifulSoup} -- DOM object of Document.
options {Options} -- The options of this sequence.
"""
# Step 1: (re)ordered headdings
_inject_heading_order(soup, options)
# Step 2: generate toc page
level = options.toc_level
if level < 1 or level > 3:
return
options.logger.info(
f'Generate a table of contents up to heading level {level}.')
h1li = None
h2ul = h2li = h3ul = None
exclude_lv2 = exclude_lv3 = False
def makeLink(h: Tag) -> Tag:
li = soup.new_tag('li')
ref = h.get('id', '')
a = soup.new_tag('a', href=f'#{ref}')
for el in h.contents:
if el.name == 'a':
a.append(el.contents[0])
else:
a.append(clone_element(el))
li.append(a)
options.logger.debug(f"| [{h.get_text(separator=" ")}]({ref})")
return li
toc = soup.new_tag('article', id='doc-toc')
title = soup.new_tag('h1')
title.append(options.toc_title)
toc.append(title)
h1ul = soup.new_tag('ul')
toc.append(h1ul)
headings = soup.find_all(['h1', 'h2', 'h3'])
for h in headings:
if h.name == 'h1':
h1li = makeLink(h)
h1ul.append(h1li)
h2ul = h2li = h3ul = None
exclude_lv2 = _is_exclude(h.get('id', None), options)
elif not exclude_lv2 and h.name == 'h2' and level >= 2:
if not h2ul:
h2ul = soup.new_tag('ul')
h1li.append(h2ul)
h2li = makeLink(h)
h2ul.append(h2li)
h3ul = None
exclude_lv3 = _is_exclude(h.get('id', None), options)
elif not exclude_lv2 and not exclude_lv3 \
and h.name == 'h3' and level >= 3:
if not h2li:
continue
if not h3ul:
h3ul = soup.new_tag('ul')
h2li.append(h3ul)
h3li = makeLink(h)
h3ul.append(h3li)
else:
continue
pass
soup.body.insert(0, toc)
def _inject_heading_order(soup: Tag, options: Options):
level = options.ordered_chapter_level
if level < 1 or level > 3:
return
options.logger.info(f'Number headings up to level {level}.')
h1n = h2n = h3n = 0
exclude_lv2 = exclude_lv3 = False
headings = soup.find_all(['h1', 'h2', 'h3'])
for h in headings:
if h.name == 'h1':
h1n += 1
h2n = h3n = 0
prefix = f'{h1n}. '
exclude_lv2 = _is_exclude(h.get('id', None), options)
elif not exclude_lv2 and h.name == 'h2' and level >= 2:
h2n += 1
h3n = 0
prefix = f'{h1n}.{h2n} '
exclude_lv3 = _is_exclude(h.get('id', None), options)
elif not exclude_lv2 and not exclude_lv3 \
and h.name == 'h3' and level >= 3:
h3n += 1
prefix = f'{h1n}.{h2n}.{h3n} '
else:
continue
options.logger.debug(f"| [{prefix} {h.text}]({h.get("id", "(none)")})")
nm_tag = soup.new_tag('span', **{'class': 'pdf-order'})
nm_tag.append(prefix)
h.insert(0, nm_tag)
def _is_exclude(url: str, options: Options) -> bool:
if not url:
return False
if url in options.excludes_children:
options.logger.info(f"| (exclude '{url}')")
return True
return False
| from bs4 import PageElement, Tag
from .options import Options
from .utils.soup_util import clone_element
def make_indexes(soup: PageElement, options: Options) -> None:
""" Generate ordered chapter number and TOC of document.
Arguments:
soup {BeautifulSoup} -- DOM object of Document.
options {Options} -- The options of this sequence.
"""
# Step 1: (re)ordered headdings
_inject_heading_order(soup, options)
# Step 2: generate toc page
level = options.toc_level
if level < 1 or level > 3:
return
options.logger.info(
f'Generate a table of contents up to heading level {level}.')
h1li = None
h2ul = h2li = h3ul = None
exclude_lv2 = exclude_lv3 = False
def makeLink(h: Tag) -> Tag:
li = soup.new_tag('li')
ref = h.get('id', '')
a = soup.new_tag('a', href=f'#{ref}')
for el in h.contents:
if el.name == 'a':
a.append(el.contents[0])
else:
a.append(clone_element(el))
li.append(a)
options.logger.debug(f"| [{h.get_text(separator=' ')}]({ref})")
return li
toc = soup.new_tag('article', id='doc-toc')
title = soup.new_tag('h1')
title.append(options.toc_title)
toc.append(title)
h1ul = soup.new_tag('ul')
toc.append(h1ul)
headings = soup.find_all(['h1', 'h2', 'h3'])
for h in headings:
if h.name == 'h1':
h1li = makeLink(h)
h1ul.append(h1li)
h2ul = h2li = h3ul = None
exclude_lv2 = _is_exclude(h.get('id', None), options)
elif not exclude_lv2 and h.name == 'h2' and level >= 2:
if not h2ul:
h2ul = soup.new_tag('ul')
h1li.append(h2ul)
h2li = makeLink(h)
h2ul.append(h2li)
h3ul = None
exclude_lv3 = _is_exclude(h.get('id', None), options)
elif not exclude_lv2 and not exclude_lv3 \
and h.name == 'h3' and level >= 3:
if not h2li:
continue
if not h3ul:
h3ul = soup.new_tag('ul')
h2li.append(h3ul)
h3li = makeLink(h)
h3ul.append(h3li)
else:
continue
pass
soup.body.insert(0, toc)
def _inject_heading_order(soup: Tag, options: Options):
level = options.ordered_chapter_level
if level < 1 or level > 3:
return
options.logger.info(f'Number headings up to level {level}.')
h1n = h2n = h3n = 0
exclude_lv2 = exclude_lv3 = False
headings = soup.find_all(['h1', 'h2', 'h3'])
for h in headings:
if h.name == 'h1':
h1n += 1
h2n = h3n = 0
prefix = f'{h1n}. '
exclude_lv2 = _is_exclude(h.get('id', None), options)
elif not exclude_lv2 and h.name == 'h2' and level >= 2:
h2n += 1
h3n = 0
prefix = f'{h1n}.{h2n} '
exclude_lv3 = _is_exclude(h.get('id', None), options)
elif not exclude_lv2 and not exclude_lv3 \
and h.name == 'h3' and level >= 3:
h3n += 1
prefix = f'{h1n}.{h2n}.{h3n} '
else:
continue
options.logger.debug(f"| [{prefix} {h.text}]({h.get('id', '(none)')})")
nm_tag = soup.new_tag('span', **{'class': 'pdf-order'})
nm_tag.append(prefix)
h.insert(0, nm_tag)
def _is_exclude(url: str, options: Options) -> bool:
if not url:
return False
if url in options.excludes_children:
options.logger.info(f"| (exclude '{url}')")
return True
return False
|
import argparse
from datetime import datetime
import yfinance as yf
import pandas as pd
from gamestonk_terminal.dataframe_helpers import clean_df_index
from gamestonk_terminal.helper_funcs import (
long_number_format,
parse_known_args_and_warn,
)
def info(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="info",
description="""
Print information about the company. The following fields are expected:
Zip, Sector, Full time employees, Long business summary, City, Phone, State, Country,
Website, Max age, Address, Industry, Previous close, Regular market open, Two hundred
day average, Payout ratio, Regular market day high, Average daily volume 10 day,
Regular market previous close, Fifty day average, Open, Average volume 10 days, Beta,
Regular market day low, Price hint, Currency, Trailing PE, Regular market volume,
Market cap, Average volume, Price to sales trailing 12 months, Day low, Ask, Ask size,
Volume, Fifty two week high, Forward PE, Fifty two week low, Bid, Tradeable, Bid size,
Day high, Exchange, Short name, Long name, Exchange timezone name, Exchange timezone
short name, Is esg populated, Gmt off set milliseconds, Quote type, Symbol, Message
board id, Market, Enterprise to revenue, Profit margins, Enterprise to ebitda, 52 week
change, Forward EPS, Shares outstanding, Book value, Shares short, Shares percent
shares out, Last fiscal year end, Held percent institutions, Net income to common,
Trailing EPS, Sand p52 week change, Price to book, Held percent insiders, Next fiscal
year end, Most recent quarter, Short ratio, Shares short previous month date, Float
shares, Enterprise value, Last split date, Last split factor, Earnings quarterly growth,
Date short interest, PEG ratio, Short percent of float, Shares short prior month,
Regular market price, Logo_url. [Source: Yahoo Finance]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
stock = yf.Ticker(s_ticker)
df_info = pd.DataFrame(stock.info.items(), columns=["Metric", "Value"])
df_info = df_info.set_index("Metric")
clean_df_index(df_info)
if (
"Last split date" in df_info.index
and df_info.loc["Last split date"].values[0]
):
df_info.loc["Last split date"].values[0] = datetime.fromtimestamp(
df_info.loc["Last split date"].values[0]
).strftime("%d/%m/%Y")
df_info = df_info.mask(df_info["Value"].astype(str).eq("[]")).dropna()
df_info = df_info.applymap(lambda x: long_number_format(x))
df_info = df_info.rename(
index={
"Address1": "Address",
"Average daily volume10 day": "Average daily volume 10 day",
"Average volume10days": "Average volume 10 days",
"Price to sales trailing12 months": "Price to sales trailing 12 months",
}
)
df_info.index = df_info.index.str.replace("eps", "EPS")
df_info.index = df_info.index.str.replace("p e", "PE")
df_info.index = df_info.index.str.replace("Peg", "PEG")
pd.set_option("display.max_colwidth", None)
if "Long business summary" in df_info.index:
print(df_info.drop(index=["Long business summary"]).to_string(header=False))
print("")
print(df_info.loc["Long business summary"].values[0])
print("")
else:
print(df_info.to_string(header=False))
print("")
except Exception as e:
print(e)
print("")
return
def shareholders(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="shrs",
description="""Print Major, institutional and mutualfunds shareholders.
[Source: Yahoo Finance]""",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
stock = yf.Ticker(s_ticker)
pd.set_option("display.max_colwidth", None)
# Major holders
print("Major holders")
df_major_holders = stock.major_holders
df_major_holders[1] = df_major_holders[1].apply(
lambda x: x.replace("%", "Percentage")
)
print(df_major_holders.to_string(index=False, header=False))
print("")
# Institutional holders
print("Institutional holders")
df_institutional_shareholders = stock.institutional_holders
df_institutional_shareholders.columns = (
df_institutional_shareholders.columns.str.replace("% Out", "Stake")
)
df_institutional_shareholders["Shares"] = df_institutional_shareholders[
"Shares"
].apply(lambda x: long_number_format(x))
df_institutional_shareholders["Value"] = df_institutional_shareholders[
"Value"
].apply(lambda x: long_number_format(x))
df_institutional_shareholders["Stake"] = df_institutional_shareholders[
"Stake"
].apply(lambda x: str(f"{100 * x:.2f}") + " %")
print(df_institutional_shareholders.to_string(index=False))
print("")
# Mutualfunds holders
print("Mutualfunds holders")
df_mutualfund_shareholders = stock.mutualfund_holders
df_mutualfund_shareholders.columns = (
df_mutualfund_shareholders.columns.str.replace("% Out", "Stake")
)
df_mutualfund_shareholders["Shares"] = df_mutualfund_shareholders[
"Shares"
].apply(lambda x: long_number_format(x))
df_mutualfund_shareholders["Value"] = df_mutualfund_shareholders["Value"].apply(
lambda x: long_number_format(x)
)
df_mutualfund_shareholders["Stake"] = df_mutualfund_shareholders["Stake"].apply(
lambda x: str(f"{100 * x:.2f}") + " %"
)
print(df_mutualfund_shareholders.to_string(index=False))
print("")
except Exception as e:
print(e)
print("")
return
def sustainability(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="sust",
description="""
Print sustainability values of the company. The following fields are expected:
Palmoil, Controversialweapons, Gambling, Socialscore, Nuclear, Furleather, Alcoholic,
Gmo, Catholic, Socialpercentile, Peercount, Governancescore, Environmentpercentile,
Animaltesting, Tobacco, Totalesg, Highestcontroversy, Esgperformance, Coal, Pesticides,
Adult, Percentile, Peergroup, Smallarms, Environmentscore, Governancepercentile,
Militarycontract. [Source: Yahoo Finance]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
stock = yf.Ticker(s_ticker)
pd.set_option("display.max_colwidth", None)
df_sustainability = stock.sustainability
if not df_sustainability:
print(f"No sustainability information in Yahoo for {s_ticker}")
print("")
return
clean_df_index(df_sustainability)
df_sustainability = df_sustainability.rename(
index={
"Controversialweapons": "Controversial Weapons",
"Socialpercentile": "Social Percentile",
"Peercount": "Peer Count",
"Governancescore": "Governance Score",
"Environmentpercentile": "Environment Percentile",
"Animaltesting": "Animal Testing",
"Highestcontroversy": "Highest Controversy",
"Environmentscore": "Environment Score",
"Governancepercentile": "Governance Percentile",
"Militarycontract": "Military Contract",
}
)
print(df_sustainability.to_string(header=False))
print("")
except Exception as e:
print(e)
print("")
return
def calendar_earnings(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="cal",
description="""
Calendar earnings of the company. Including revenue and earnings estimates.
[Source: Yahoo Finance]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
stock = yf.Ticker(s_ticker)
df_calendar = stock.calendar
if df_calendar.empty:
print(f"No earnings calendar information in Yahoo for {s_ticker}")
print("")
return
df_calendar.iloc[0, 0] = df_calendar.iloc[0, 0].date().strftime("%d/%m/%Y")
df_calendar.iloc[:, 0] = df_calendar.iloc[:, 0].apply(
lambda x: long_number_format(x)
)
print(f"Earnings Date: {df_calendar.iloc[:, 0]["Earnings Date"]}")
avg = df_calendar.iloc[:, 0]["Earnings Average"]
low = df_calendar.iloc[:, 0]["Earnings Low"]
high = df_calendar.iloc[:, 0]["Earnings High"]
print(f"Earnings Estimate Avg: {avg} [{low}, {high}]")
print(
f"Revenue Estimate Avg: {df_calendar.iloc[:, 0]['Revenue Average']} \
[{df_calendar.iloc[:, 0]['Revenue Low']}, {df_calendar.iloc[:, 0]['Revenue High']}]"
)
print("")
except Exception as e:
print(e)
print("")
return
| import argparse
from datetime import datetime
import yfinance as yf
import pandas as pd
from gamestonk_terminal.dataframe_helpers import clean_df_index
from gamestonk_terminal.helper_funcs import (
long_number_format,
parse_known_args_and_warn,
)
def info(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="info",
description="""
Print information about the company. The following fields are expected:
Zip, Sector, Full time employees, Long business summary, City, Phone, State, Country,
Website, Max age, Address, Industry, Previous close, Regular market open, Two hundred
day average, Payout ratio, Regular market day high, Average daily volume 10 day,
Regular market previous close, Fifty day average, Open, Average volume 10 days, Beta,
Regular market day low, Price hint, Currency, Trailing PE, Regular market volume,
Market cap, Average volume, Price to sales trailing 12 months, Day low, Ask, Ask size,
Volume, Fifty two week high, Forward PE, Fifty two week low, Bid, Tradeable, Bid size,
Day high, Exchange, Short name, Long name, Exchange timezone name, Exchange timezone
short name, Is esg populated, Gmt off set milliseconds, Quote type, Symbol, Message
board id, Market, Enterprise to revenue, Profit margins, Enterprise to ebitda, 52 week
change, Forward EPS, Shares outstanding, Book value, Shares short, Shares percent
shares out, Last fiscal year end, Held percent institutions, Net income to common,
Trailing EPS, Sand p52 week change, Price to book, Held percent insiders, Next fiscal
year end, Most recent quarter, Short ratio, Shares short previous month date, Float
shares, Enterprise value, Last split date, Last split factor, Earnings quarterly growth,
Date short interest, PEG ratio, Short percent of float, Shares short prior month,
Regular market price, Logo_url. [Source: Yahoo Finance]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
stock = yf.Ticker(s_ticker)
df_info = pd.DataFrame(stock.info.items(), columns=["Metric", "Value"])
df_info = df_info.set_index("Metric")
clean_df_index(df_info)
if (
"Last split date" in df_info.index
and df_info.loc["Last split date"].values[0]
):
df_info.loc["Last split date"].values[0] = datetime.fromtimestamp(
df_info.loc["Last split date"].values[0]
).strftime("%d/%m/%Y")
df_info = df_info.mask(df_info["Value"].astype(str).eq("[]")).dropna()
df_info = df_info.applymap(lambda x: long_number_format(x))
df_info = df_info.rename(
index={
"Address1": "Address",
"Average daily volume10 day": "Average daily volume 10 day",
"Average volume10days": "Average volume 10 days",
"Price to sales trailing12 months": "Price to sales trailing 12 months",
}
)
df_info.index = df_info.index.str.replace("eps", "EPS")
df_info.index = df_info.index.str.replace("p e", "PE")
df_info.index = df_info.index.str.replace("Peg", "PEG")
pd.set_option("display.max_colwidth", None)
if "Long business summary" in df_info.index:
print(df_info.drop(index=["Long business summary"]).to_string(header=False))
print("")
print(df_info.loc["Long business summary"].values[0])
print("")
else:
print(df_info.to_string(header=False))
print("")
except Exception as e:
print(e)
print("")
return
def shareholders(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="shrs",
description="""Print Major, institutional and mutualfunds shareholders.
[Source: Yahoo Finance]""",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
stock = yf.Ticker(s_ticker)
pd.set_option("display.max_colwidth", None)
# Major holders
print("Major holders")
df_major_holders = stock.major_holders
df_major_holders[1] = df_major_holders[1].apply(
lambda x: x.replace("%", "Percentage")
)
print(df_major_holders.to_string(index=False, header=False))
print("")
# Institutional holders
print("Institutional holders")
df_institutional_shareholders = stock.institutional_holders
df_institutional_shareholders.columns = (
df_institutional_shareholders.columns.str.replace("% Out", "Stake")
)
df_institutional_shareholders["Shares"] = df_institutional_shareholders[
"Shares"
].apply(lambda x: long_number_format(x))
df_institutional_shareholders["Value"] = df_institutional_shareholders[
"Value"
].apply(lambda x: long_number_format(x))
df_institutional_shareholders["Stake"] = df_institutional_shareholders[
"Stake"
].apply(lambda x: str(f"{100 * x:.2f}") + " %")
print(df_institutional_shareholders.to_string(index=False))
print("")
# Mutualfunds holders
print("Mutualfunds holders")
df_mutualfund_shareholders = stock.mutualfund_holders
df_mutualfund_shareholders.columns = (
df_mutualfund_shareholders.columns.str.replace("% Out", "Stake")
)
df_mutualfund_shareholders["Shares"] = df_mutualfund_shareholders[
"Shares"
].apply(lambda x: long_number_format(x))
df_mutualfund_shareholders["Value"] = df_mutualfund_shareholders["Value"].apply(
lambda x: long_number_format(x)
)
df_mutualfund_shareholders["Stake"] = df_mutualfund_shareholders["Stake"].apply(
lambda x: str(f"{100 * x:.2f}") + " %"
)
print(df_mutualfund_shareholders.to_string(index=False))
print("")
except Exception as e:
print(e)
print("")
return
def sustainability(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="sust",
description="""
Print sustainability values of the company. The following fields are expected:
Palmoil, Controversialweapons, Gambling, Socialscore, Nuclear, Furleather, Alcoholic,
Gmo, Catholic, Socialpercentile, Peercount, Governancescore, Environmentpercentile,
Animaltesting, Tobacco, Totalesg, Highestcontroversy, Esgperformance, Coal, Pesticides,
Adult, Percentile, Peergroup, Smallarms, Environmentscore, Governancepercentile,
Militarycontract. [Source: Yahoo Finance]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
stock = yf.Ticker(s_ticker)
pd.set_option("display.max_colwidth", None)
df_sustainability = stock.sustainability
if not df_sustainability:
print(f"No sustainability information in Yahoo for {s_ticker}")
print("")
return
clean_df_index(df_sustainability)
df_sustainability = df_sustainability.rename(
index={
"Controversialweapons": "Controversial Weapons",
"Socialpercentile": "Social Percentile",
"Peercount": "Peer Count",
"Governancescore": "Governance Score",
"Environmentpercentile": "Environment Percentile",
"Animaltesting": "Animal Testing",
"Highestcontroversy": "Highest Controversy",
"Environmentscore": "Environment Score",
"Governancepercentile": "Governance Percentile",
"Militarycontract": "Military Contract",
}
)
print(df_sustainability.to_string(header=False))
print("")
except Exception as e:
print(e)
print("")
return
def calendar_earnings(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="cal",
description="""
Calendar earnings of the company. Including revenue and earnings estimates.
[Source: Yahoo Finance]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
stock = yf.Ticker(s_ticker)
df_calendar = stock.calendar
if df_calendar.empty:
print(f"No earnings calendar information in Yahoo for {s_ticker}")
print("")
return
df_calendar.iloc[0, 0] = df_calendar.iloc[0, 0].date().strftime("%d/%m/%Y")
df_calendar.iloc[:, 0] = df_calendar.iloc[:, 0].apply(
lambda x: long_number_format(x)
)
print(f"Earnings Date: {df_calendar.iloc[:, 0]['Earnings Date']}")
avg = df_calendar.iloc[:, 0]["Earnings Average"]
low = df_calendar.iloc[:, 0]["Earnings Low"]
high = df_calendar.iloc[:, 0]["Earnings High"]
print(f"Earnings Estimate Avg: {avg} [{low}, {high}]")
print(
f"Revenue Estimate Avg: {df_calendar.iloc[:, 0]['Revenue Average']} \
[{df_calendar.iloc[:, 0]['Revenue Low']}, {df_calendar.iloc[:, 0]['Revenue High']}]"
)
print("")
except Exception as e:
print(e)
print("")
return
|
import logging
import os
import sys
import configargparse
import yaml
from GramAddict.core.plugin_loader import PluginLoader
logger = logging.getLogger(__name__)
class Config:
def __init__(self, first_run=False, **kwargs):
if kwargs:
self.args = kwargs
self.module = True
else:
self.args = sys.argv
self.module = False
self.config = None
self.config_list = None
self.plugins = None
self.actions = None
self.enabled = []
self.unknown_args = []
self.debug = False
self.device_id = None
self.app_id = None
self.first_run = first_run
self.username = False
# Pre-Load Variables Needed for Script Init
if self.module:
if "debug" in self.args:
self.debug = True
if "username" in self.args:
self.username = self.args["username"]
if "app_id" in self.args:
app_id = self.args["app_id"]
if app_id:
self.app_id = app_id
else:
self.app_id = "com.instagram.android"
elif "--config" in self.args:
try:
file_name = self.args[self.args.index("--config") + 1]
if not file_name.endswith((".yml", ".yaml")):
logger.error(
f"You have to specify a *.yml / *.yaml config file path (For example 'accounts/your_account_name/config.yml')! \nYou entered: {file_name}, abort."
)
sys.exit(1)
with open(file_name, encoding="utf-8") as fin:
# preserve order of yaml
self.config_list = [line.strip() for line in fin]
fin.seek(0)
# preload config for debug and username
self.config = yaml.safe_load(fin)
except IndexError:
logger.warning(
"Please provide a filename with your --config argument. Example: '--config accounts/yourusername/config.yml'"
)
exit(2)
except FileNotFoundError:
logger.error(
f"I can't see the file '{file_name}'! Double check the spelling or if you're calling the bot from the right folder. (You're there: '{os.getcwd()}')"
)
exit(2)
self.username = self.config.get("username", False)
self.debug = self.config.get("debug", False)
self.app_id = self.config.get("app_id", "com.instagram.android")
else:
if "--debug" in self.args:
self.debug = True
if "--username" in self.args:
try:
self.username = self.args[self.args.index("--username") + 1]
except IndexError:
logger.warning(
"Please provide a username with your --username argument. Example: '--username yourusername'"
)
exit(2)
if "--app-id" in self.args:
self.app_id = self.args[self.args.index("--app-id") + 1]
else:
self.app_id = "com.instagram.android"
# Configure ArgParse
self.parser = configargparse.ArgumentParser(
config_file_open_func=lambda filename: open(
filename, "r+", encoding="utf-8"
),
description="GramAddict Instagram Bot",
)
self.parser.add_argument(
"--config",
required=False,
is_config_file=True,
help="config file path",
)
# on first run, we must wait to proceed with loading
if not self.first_run:
self.load_plugins()
self.parse_args()
def load_plugins(self):
self.plugins = PluginLoader("GramAddict.plugins", self.first_run).plugins
self.actions = {}
for plugin in self.plugins:
if plugin.arguments:
for arg in plugin.arguments:
try:
action = arg.get("action", None)
if action:
self.parser.add_argument(
arg["arg"],
help=arg["help"],
action=arg.get("action", None),
)
else:
self.parser.add_argument(
arg["arg"],
nargs=arg["nargs"],
help=arg["help"],
metavar=arg["metavar"],
default=arg["default"],
)
if arg.get("operation", False):
self.actions[arg["arg"][2:]] = plugin
except Exception as e:
logger.error(
f"Error while importing arguments of plugin {plugin.__class__.__name__}. Error: Missing key from arguments dictionary - {e}"
)
def parse_args(self):
def _is_legacy_arg(arg):
if arg in ["interact", "hashtag-likers"]:
if self.first_run:
logger.warning(
f"You are using a legacy argument {arg} that is no longer supported. It will not be used. Please refer to https://docs.gramaddict.org/#/configuration?id=arguments."
)
return True
return False
if self.module:
if self.first_run:
logger.debug("Arguments used:")
if self.config:
logger.debug(f"Config used: {self.config}")
if not len(self.args) > 0:
self.parser.print_help()
exit(0)
else:
if self.first_run:
logger.debug(f"Arguments used: {" ".join(sys.argv[1:])}")
if self.config:
logger.debug(f"Config used: {self.config}")
if not len(sys.argv) > 1:
self.parser.print_help()
exit(0)
if self.module:
arg_str = ""
for k, v in self.args.items():
new_key = k.replace("_", "-")
new_key = " --" + new_key
arg_str += new_key + " " + v
self.args, self.unknown_args = self.parser.parse_known_args(args=arg_str)
else:
self.args, self.unknown_args = self.parser.parse_known_args()
if "run" in self.unknown_args:
self.unknown_args.remove("run")
if self.unknown_args and self.first_run:
logger.error(
"Unknown arguments: " + ", ".join(str(arg) for arg in self.unknown_args)
)
self.parser.print_help()
for arg in self.unknown_args:
if "detect-block" in arg:
logger.error(
"Please replace the line 'detect-block: true/false' in your config file *.yml with 'disable-block-detection: true/false'"
)
break
exit(0)
self.device_id = self.args.device
# We need to maintain the order of plugins as defined
# in config or sys.argv
if self.config_list:
for item in self.config_list:
item = item.split(":")[0]
if (
item in self.actions
and getattr(self.args, item.replace("-", "_")) is not None
and not _is_legacy_arg(item)
):
self.enabled.append(item)
else:
for item in sys.argv:
nitem = item[2:]
if (
nitem in self.actions
and getattr(self.args, nitem.replace("-", "_")) is not None
and not _is_legacy_arg(nitem)
):
self.enabled.append(nitem)
| import logging
import os
import sys
import configargparse
import yaml
from GramAddict.core.plugin_loader import PluginLoader
logger = logging.getLogger(__name__)
class Config:
def __init__(self, first_run=False, **kwargs):
if kwargs:
self.args = kwargs
self.module = True
else:
self.args = sys.argv
self.module = False
self.config = None
self.config_list = None
self.plugins = None
self.actions = None
self.enabled = []
self.unknown_args = []
self.debug = False
self.device_id = None
self.app_id = None
self.first_run = first_run
self.username = False
# Pre-Load Variables Needed for Script Init
if self.module:
if "debug" in self.args:
self.debug = True
if "username" in self.args:
self.username = self.args["username"]
if "app_id" in self.args:
app_id = self.args["app_id"]
if app_id:
self.app_id = app_id
else:
self.app_id = "com.instagram.android"
elif "--config" in self.args:
try:
file_name = self.args[self.args.index("--config") + 1]
if not file_name.endswith((".yml", ".yaml")):
logger.error(
f"You have to specify a *.yml / *.yaml config file path (For example 'accounts/your_account_name/config.yml')! \nYou entered: {file_name}, abort."
)
sys.exit(1)
with open(file_name, encoding="utf-8") as fin:
# preserve order of yaml
self.config_list = [line.strip() for line in fin]
fin.seek(0)
# preload config for debug and username
self.config = yaml.safe_load(fin)
except IndexError:
logger.warning(
"Please provide a filename with your --config argument. Example: '--config accounts/yourusername/config.yml'"
)
exit(2)
except FileNotFoundError:
logger.error(
f"I can't see the file '{file_name}'! Double check the spelling or if you're calling the bot from the right folder. (You're there: '{os.getcwd()}')"
)
exit(2)
self.username = self.config.get("username", False)
self.debug = self.config.get("debug", False)
self.app_id = self.config.get("app_id", "com.instagram.android")
else:
if "--debug" in self.args:
self.debug = True
if "--username" in self.args:
try:
self.username = self.args[self.args.index("--username") + 1]
except IndexError:
logger.warning(
"Please provide a username with your --username argument. Example: '--username yourusername'"
)
exit(2)
if "--app-id" in self.args:
self.app_id = self.args[self.args.index("--app-id") + 1]
else:
self.app_id = "com.instagram.android"
# Configure ArgParse
self.parser = configargparse.ArgumentParser(
config_file_open_func=lambda filename: open(
filename, "r+", encoding="utf-8"
),
description="GramAddict Instagram Bot",
)
self.parser.add_argument(
"--config",
required=False,
is_config_file=True,
help="config file path",
)
# on first run, we must wait to proceed with loading
if not self.first_run:
self.load_plugins()
self.parse_args()
def load_plugins(self):
self.plugins = PluginLoader("GramAddict.plugins", self.first_run).plugins
self.actions = {}
for plugin in self.plugins:
if plugin.arguments:
for arg in plugin.arguments:
try:
action = arg.get("action", None)
if action:
self.parser.add_argument(
arg["arg"],
help=arg["help"],
action=arg.get("action", None),
)
else:
self.parser.add_argument(
arg["arg"],
nargs=arg["nargs"],
help=arg["help"],
metavar=arg["metavar"],
default=arg["default"],
)
if arg.get("operation", False):
self.actions[arg["arg"][2:]] = plugin
except Exception as e:
logger.error(
f"Error while importing arguments of plugin {plugin.__class__.__name__}. Error: Missing key from arguments dictionary - {e}"
)
def parse_args(self):
def _is_legacy_arg(arg):
if arg in ["interact", "hashtag-likers"]:
if self.first_run:
logger.warning(
f"You are using a legacy argument {arg} that is no longer supported. It will not be used. Please refer to https://docs.gramaddict.org/#/configuration?id=arguments."
)
return True
return False
if self.module:
if self.first_run:
logger.debug("Arguments used:")
if self.config:
logger.debug(f"Config used: {self.config}")
if not len(self.args) > 0:
self.parser.print_help()
exit(0)
else:
if self.first_run:
logger.debug(f"Arguments used: {' '.join(sys.argv[1:])}")
if self.config:
logger.debug(f"Config used: {self.config}")
if not len(sys.argv) > 1:
self.parser.print_help()
exit(0)
if self.module:
arg_str = ""
for k, v in self.args.items():
new_key = k.replace("_", "-")
new_key = " --" + new_key
arg_str += new_key + " " + v
self.args, self.unknown_args = self.parser.parse_known_args(args=arg_str)
else:
self.args, self.unknown_args = self.parser.parse_known_args()
if "run" in self.unknown_args:
self.unknown_args.remove("run")
if self.unknown_args and self.first_run:
logger.error(
"Unknown arguments: " + ", ".join(str(arg) for arg in self.unknown_args)
)
self.parser.print_help()
for arg in self.unknown_args:
if "detect-block" in arg:
logger.error(
"Please replace the line 'detect-block: true/false' in your config file *.yml with 'disable-block-detection: true/false'"
)
break
exit(0)
self.device_id = self.args.device
# We need to maintain the order of plugins as defined
# in config or sys.argv
if self.config_list:
for item in self.config_list:
item = item.split(":")[0]
if (
item in self.actions
and getattr(self.args, item.replace("-", "_")) is not None
and not _is_legacy_arg(item)
):
self.enabled.append(item)
else:
for item in sys.argv:
nitem = item[2:]
if (
nitem in self.actions
and getattr(self.args, nitem.replace("-", "_")) is not None
and not _is_legacy_arg(nitem)
):
self.enabled.append(nitem)
|
import re
class Rule:
def __init__(self, line):
line = line.strip().split(" contain ")
line[1] = line[1].strip(".").split(", ")
self.contents = {}
for item in line[1]:
# Grab that number out in front
regex = re.compile(r"[0-9]+")
# If we didn't find one that means it's no bags inside
if match := regex.match(item):
quantity = int(item[match.span()[0]:match.span()[1]])
# The +1 deals with the space
bag_type = item[match.span()[1] + 1:]
if quantity > 1:
# This gets rid of the s if it's plural
bag_type = bag_type[:-1]
self.contents[bag_type] = quantity
# The s makes things irritating so I want it gone
self.bag_type = line[0][:-1]
def contains_directly(self, bag_type: str):
return bag_type in self.contents
# Warning: recursive
def contains(self, bag_type: str, rule_dict: dict):
if self.contains_directly(bag_type):
return True
else:
for bag in self.contents:
if bag in rule_dict:
if rule_dict[bag].contains(bag_type, rule_dict):
return True
else:
print("An unexpected bag was discovered!")
return False
def count_internal_bags(self, rule_dict: dict):
internal_bags = 0
for bag in self.contents:
# count these bags...
internal_bags += self.contents[bag] # recall that this value represents the quantity
# ...and count the bags inside of it
internal_bags += rule_dict[bag].count_internal_bags(rule_dict) * self.contents[bag]
return internal_bags
def parse_input(filename: str):
with open(filename, "r") as file:
rules = {}
for line in file:
rule = Rule(line)
print(f"{rule.bag_type} contains {rule.contents}")
rules[rule.bag_type] = rule
return rules
def main():
rule_dict = parse_input("input.txt")
shiny_gold = 0
for rule_entry in rule_dict.keys():
rule = rule_dict[rule_entry]
if rule.contains("shiny gold bag", rule_dict):
print(f"Found {rule.contents} in {rule.bag_type}")
shiny_gold += 1
print("\n")
print(f"Found {shiny_gold} bags containing at least one shiny gold bag.")
print(f"A shiny gold bag contains {rule_dict["shiny gold bag"].count_internal_bags(rule_dict)} bags.")
if __name__ == "__main__":
main()
| import re
class Rule:
def __init__(self, line):
line = line.strip().split(" contain ")
line[1] = line[1].strip(".").split(", ")
self.contents = {}
for item in line[1]:
# Grab that number out in front
regex = re.compile(r"[0-9]+")
# If we didn't find one that means it's no bags inside
if match := regex.match(item):
quantity = int(item[match.span()[0]:match.span()[1]])
# The +1 deals with the space
bag_type = item[match.span()[1] + 1:]
if quantity > 1:
# This gets rid of the s if it's plural
bag_type = bag_type[:-1]
self.contents[bag_type] = quantity
# The s makes things irritating so I want it gone
self.bag_type = line[0][:-1]
def contains_directly(self, bag_type: str):
return bag_type in self.contents
# Warning: recursive
def contains(self, bag_type: str, rule_dict: dict):
if self.contains_directly(bag_type):
return True
else:
for bag in self.contents:
if bag in rule_dict:
if rule_dict[bag].contains(bag_type, rule_dict):
return True
else:
print("An unexpected bag was discovered!")
return False
def count_internal_bags(self, rule_dict: dict):
internal_bags = 0
for bag in self.contents:
# count these bags...
internal_bags += self.contents[bag] # recall that this value represents the quantity
# ...and count the bags inside of it
internal_bags += rule_dict[bag].count_internal_bags(rule_dict) * self.contents[bag]
return internal_bags
def parse_input(filename: str):
with open(filename, "r") as file:
rules = {}
for line in file:
rule = Rule(line)
print(f"{rule.bag_type} contains {rule.contents}")
rules[rule.bag_type] = rule
return rules
def main():
rule_dict = parse_input("input.txt")
shiny_gold = 0
for rule_entry in rule_dict.keys():
rule = rule_dict[rule_entry]
if rule.contains("shiny gold bag", rule_dict):
print(f"Found {rule.contents} in {rule.bag_type}")
shiny_gold += 1
print("\n")
print(f"Found {shiny_gold} bags containing at least one shiny gold bag.")
print(f"A shiny gold bag contains {rule_dict['shiny gold bag'].count_internal_bags(rule_dict)} bags.")
if __name__ == "__main__":
main()
|
import glob
import os
import numpy as np
from unyt import dimensions, unyt_array
from unyt.unit_registry import UnitRegistry
from yt.data_objects.time_series import DatasetSeries, SimulationTimeSeries
from yt.funcs import only_on_root
from yt.loaders import load
from yt.utilities.cosmology import Cosmology
from yt.utilities.exceptions import (
InvalidSimulationTimeSeries,
MissingParameter,
NoStoppingCondition,
YTUnidentifiedDataType,
)
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_objects
class EnzoSimulation(SimulationTimeSeries):
r"""
Initialize an Enzo Simulation object.
Upon creation, the parameter file is parsed and the time and redshift
are calculated and stored in all_outputs. A time units dictionary is
instantiated to allow for time outputs to be requested with physical
time units. The get_time_series can be used to generate a
DatasetSeries object.
parameter_filename : str
The simulation parameter file.
find_outputs : bool
If True, subdirectories within the GlobalDir directory are
searched one by one for datasets. Time and redshift
information are gathered by temporarily instantiating each
dataset. This can be used when simulation data was created
in a non-standard way, making it difficult to guess the
corresponding time and redshift information.
Default: False.
Examples
--------
>>> import yt
>>> es = yt.load_simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
>>> es.get_time_series()
>>> for ds in es:
... print(ds.current_time)
"""
def __init__(self, parameter_filename, find_outputs=False):
self.simulation_type = "grid"
self.key_parameters = ["stop_cycle"]
SimulationTimeSeries.__init__(
self, parameter_filename, find_outputs=find_outputs
)
def _set_units(self):
self.unit_registry = UnitRegistry()
self.unit_registry.add("code_time", 1.0, dimensions.time)
self.unit_registry.add("code_length", 1.0, dimensions.length)
if self.cosmological_simulation:
# Instantiate EnzoCosmology object for units and time conversions.
self.cosmology = EnzoCosmology(
self.parameters["CosmologyHubbleConstantNow"],
self.parameters["CosmologyOmegaMatterNow"],
self.parameters["CosmologyOmegaLambdaNow"],
self.parameters.get("CosmologyOmegaRadiationNow", 0.0),
0.0,
self.parameters["CosmologyInitialRedshift"],
unit_registry=self.unit_registry,
)
self.time_unit = self.cosmology.time_unit.in_units("s")
if "h" in self.unit_registry:
self.unit_registry.modify("h", self.hubble_constant)
else:
self.unit_registry.add(
"h", self.hubble_constant, dimensions.dimensionless
)
# Comoving lengths
for my_unit in ["m", "pc", "AU"]:
new_unit = f"{my_unit}cm"
# technically not true, but should be ok
self.unit_registry.add(
new_unit,
self.unit_registry.lut[my_unit][0],
dimensions.length,
"\\rm{%s}/(1+z)" % my_unit,
prefixable=True,
)
self.length_unit = self.quan(
self.box_size, "Mpccm / h", registry=self.unit_registry
)
else:
self.time_unit = self.quan(self.parameters["TimeUnits"], "s")
self.length_unit = self.quan(self.parameters["LengthUnits"], "cm")
self.box_size = self.length_unit
self.domain_left_edge = self.domain_left_edge * self.length_unit
self.domain_right_edge = self.domain_right_edge * self.length_unit
self.unit_registry.modify("code_time", self.time_unit)
self.unit_registry.modify("code_length", self.length_unit)
self.unit_registry.add(
"unitary", float(self.box_size.in_base()), self.length_unit.units.dimensions
)
def get_time_series(
self,
time_data=True,
redshift_data=True,
initial_time=None,
final_time=None,
initial_redshift=None,
final_redshift=None,
initial_cycle=None,
final_cycle=None,
times=None,
redshifts=None,
tolerance=None,
parallel=True,
setup_function=None,
):
"""
Instantiate a DatasetSeries object for a set of outputs.
If no additional keywords given, a DatasetSeries object will be
created with all potential datasets created by the simulation.
Outputs can be gather by specifying a time or redshift range
(or combination of time and redshift), with a specific list of
times or redshifts, a range of cycle numbers (for cycle based
output), or by simply searching all subdirectories within the
simulation directory.
time_data : bool
Whether or not to include time outputs when gathering
datasets for time series.
Default: True.
redshift_data : bool
Whether or not to include redshift outputs when gathering
datasets for time series.
Default: True.
initial_time : tuple of type (float, str)
The earliest time for outputs to be included. This should be
given as the value and the string representation of the units.
For example, (5.0, "Gyr"). If None, the initial time of the
simulation is used. This can be used in combination with
either final_time or final_redshift.
Default: None.
final_time : tuple of type (float, str)
The latest time for outputs to be included. This should be
given as the value and the string representation of the units.
For example, (13.7, "Gyr"). If None, the final time of the
simulation is used. This can be used in combination with either
initial_time or initial_redshift.
Default: None.
times : tuple of type (float array, str)
A list of times for which outputs will be found and the units
of those values. For example, ([0, 1, 2, 3], "s").
Default: None.
initial_redshift : float
The earliest redshift for outputs to be included. If None,
the initial redshift of the simulation is used. This can be
used in combination with either final_time or
final_redshift.
Default: None.
final_redshift : float
The latest redshift for outputs to be included. If None,
the final redshift of the simulation is used. This can be
used in combination with either initial_time or
initial_redshift.
Default: None.
redshifts : array_like
A list of redshifts for which outputs will be found.
Default: None.
initial_cycle : float
The earliest cycle for outputs to be included. If None,
the initial cycle of the simulation is used. This can
only be used with final_cycle.
Default: None.
final_cycle : float
The latest cycle for outputs to be included. If None,
the final cycle of the simulation is used. This can
only be used in combination with initial_cycle.
Default: None.
tolerance : float
Used in combination with "times" or "redshifts" keywords,
this is the tolerance within which outputs are accepted
given the requested times or redshifts. If None, the
nearest output is always taken.
Default: None.
parallel : bool/int
If True, the generated DatasetSeries will divide the work
such that a single processor works on each dataset. If an
integer is supplied, the work will be divided into that
number of jobs.
Default: True.
setup_function : callable, accepts a ds
This function will be called whenever a dataset is loaded.
Examples
--------
>>> import yt
>>> es = yt.load_simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
>>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"),
redshift_data=False)
>>> for ds in es:
... print(ds.current_time)
>>> es.get_time_series(redshifts=[3, 2, 1, 0])
>>> for ds in es:
... print(ds.current_time)
"""
if (
initial_redshift is not None or final_redshift is not None
) and not self.cosmological_simulation:
raise InvalidSimulationTimeSeries(
"An initial or final redshift has been given for a "
+ "noncosmological simulation."
)
if time_data and redshift_data:
my_all_outputs = self.all_outputs
elif time_data:
my_all_outputs = self.all_time_outputs
elif redshift_data:
my_all_outputs = self.all_redshift_outputs
else:
raise InvalidSimulationTimeSeries(
"Both time_data and redshift_data are False."
)
if not my_all_outputs:
DatasetSeries.__init__(self, outputs=[], parallel=parallel)
mylog.info("0 outputs loaded into time series.")
return
# Apply selection criteria to the set.
if times is not None:
my_outputs = self._get_outputs_by_key(
"time", times, tolerance=tolerance, outputs=my_all_outputs
)
elif redshifts is not None:
my_outputs = self._get_outputs_by_key(
"redshift", redshifts, tolerance=tolerance, outputs=my_all_outputs
)
elif initial_cycle is not None or final_cycle is not None:
if initial_cycle is None:
initial_cycle = 0
else:
initial_cycle = max(initial_cycle, 0)
if final_cycle is None:
final_cycle = self.parameters["StopCycle"]
else:
final_cycle = min(final_cycle, self.parameters["StopCycle"])
my_outputs = my_all_outputs[
int(
np.ceil(float(initial_cycle) / self.parameters["CycleSkipDataDump"])
) : (final_cycle / self.parameters["CycleSkipDataDump"])
+ 1
]
else:
if initial_time is not None:
if isinstance(initial_time, float):
my_initial_time = self.quan(initial_time, "code_time")
elif isinstance(initial_time, tuple) and len(initial_time) == 2:
my_initial_time = self.quan(*initial_time)
elif not isinstance(initial_time, unyt_array):
raise RuntimeError(
"Error: initial_time must be given as a float or "
+ "tuple of (value, units)."
)
elif initial_redshift is not None:
my_initial_time = self.cosmology.t_from_z(initial_redshift)
else:
my_initial_time = self.initial_time
if final_time is not None:
if isinstance(final_time, float):
my_final_time = self.quan(final_time, "code_time")
elif isinstance(final_time, tuple) and len(final_time) == 2:
my_final_time = self.quan(*final_time)
elif not isinstance(final_time, unyt_array):
raise RuntimeError(
"Error: final_time must be given as a float or "
+ "tuple of (value, units)."
)
elif final_redshift is not None:
my_final_time = self.cosmology.t_from_z(final_redshift)
else:
my_final_time = self.final_time
my_initial_time.convert_to_units("s")
my_final_time.convert_to_units("s")
my_times = np.array([a["time"] for a in my_all_outputs])
my_indices = np.digitize([my_initial_time, my_final_time], my_times)
if my_initial_time == my_times[my_indices[0] - 1]:
my_indices[0] -= 1
my_outputs = my_all_outputs[my_indices[0] : my_indices[1]]
init_outputs = []
for output in my_outputs:
if os.path.exists(output["filename"]):
init_outputs.append(output["filename"])
DatasetSeries.__init__(
self, outputs=init_outputs, parallel=parallel, setup_function=setup_function
)
mylog.info("%d outputs loaded into time series.", len(init_outputs))
def _parse_parameter_file(self):
"""
Parses the parameter file and establishes the various
dictionaries.
"""
self.conversion_factors = {}
redshift_outputs = []
# Let's read the file
lines = open(self.parameter_filename).readlines()
for line in (l.strip() for l in lines):
if "#" in line:
line = line[0 : line.find("#")]
if "//" in line:
line = line[0 : line.find("//")]
if len(line) < 2:
continue
param, vals = (i.strip() for i in line.split("=", 1))
# First we try to decipher what type of value it is.
vals = vals.split()
# Special case approaching.
if "(do" in vals:
vals = vals[:1]
if len(vals) == 0:
pcast = str # Assume NULL output
else:
v = vals[0]
# Figure out if it's castable to floating point:
try:
float(v)
except ValueError:
pcast = str
else:
if any("." in v or "e" in v for v in vals):
pcast = float
elif v == "inf":
pcast = str
else:
pcast = int
# Now we figure out what to do with it.
if param.endswith("Units") and not param.startswith("Temperature"):
dataType = param[:-5]
# This one better be a float.
self.conversion_factors[dataType] = float(vals[0])
if param.startswith("CosmologyOutputRedshift["):
index = param[param.find("[") + 1 : param.find("]")]
redshift_outputs.append(
{"index": int(index), "redshift": float(vals[0])}
)
elif len(vals) == 0:
vals = ""
elif len(vals) == 1:
vals = pcast(vals[0])
else:
vals = np.array([pcast(i) for i in vals if i != "-99999"])
self.parameters[param] = vals
self.refine_by = self.parameters["RefineBy"]
self.dimensionality = self.parameters["TopGridRank"]
if self.dimensionality > 1:
self.domain_dimensions = self.parameters["TopGridDimensions"]
if len(self.domain_dimensions) < 3:
tmp = self.domain_dimensions.tolist()
tmp.append(1)
self.domain_dimensions = np.array(tmp)
self.domain_left_edge = np.array(
self.parameters["DomainLeftEdge"], "float64"
).copy()
self.domain_right_edge = np.array(
self.parameters["DomainRightEdge"], "float64"
).copy()
else:
self.domain_left_edge = np.array(
self.parameters["DomainLeftEdge"], "float64"
)
self.domain_right_edge = np.array(
self.parameters["DomainRightEdge"], "float64"
)
self.domain_dimensions = np.array(
[self.parameters["TopGridDimensions"], 1, 1]
)
if self.parameters["ComovingCoordinates"]:
cosmo_attr = {
"box_size": "CosmologyComovingBoxSize",
"omega_lambda": "CosmologyOmegaLambdaNow",
"omega_matter": "CosmologyOmegaMatterNow",
"omega_radiation": "CosmologyOmegaRadiationNow",
"hubble_constant": "CosmologyHubbleConstantNow",
"initial_redshift": "CosmologyInitialRedshift",
"final_redshift": "CosmologyFinalRedshift",
}
self.cosmological_simulation = 1
for a, v in cosmo_attr.items():
if v not in self.parameters:
raise MissingParameter(self.parameter_filename, v)
setattr(self, a, self.parameters[v])
else:
self.cosmological_simulation = 0
self.omega_lambda = self.omega_matter = self.hubble_constant = 0.0
# make list of redshift outputs
self.all_redshift_outputs = []
if not self.cosmological_simulation:
return
for output in redshift_outputs:
output["filename"] = os.path.join(
self.parameters["GlobalDir"],
"%s%04d" % (self.parameters["RedshiftDumpDir"], output["index"]),
"%s%04d" % (self.parameters["RedshiftDumpName"], output["index"]),
)
del output["index"]
self.all_redshift_outputs = redshift_outputs
def _calculate_time_outputs(self):
"""
Calculate time outputs and their redshifts if cosmological.
"""
self.all_time_outputs = []
if (
self.final_time is None
or "dtDataDump" not in self.parameters
or self.parameters["dtDataDump"] <= 0.0
):
return []
index = 0
current_time = self.initial_time.copy()
dt_datadump = self.quan(self.parameters["dtDataDump"], "code_time")
while current_time <= self.final_time + dt_datadump:
filename = os.path.join(
self.parameters["GlobalDir"],
"%s%04d" % (self.parameters["DataDumpDir"], index),
"%s%04d" % (self.parameters["DataDumpName"], index),
)
output = {"index": index, "filename": filename, "time": current_time.copy()}
output["time"] = min(output["time"], self.final_time)
if self.cosmological_simulation:
output["redshift"] = self.cosmology.z_from_t(current_time)
self.all_time_outputs.append(output)
if np.abs(self.final_time - current_time) / self.final_time < 1e-4:
break
current_time += dt_datadump
index += 1
def _calculate_cycle_outputs(self):
"""
Calculate cycle outputs.
"""
mylog.warning("Calculating cycle outputs. Dataset times will be unavailable.")
if (
self.stop_cycle is None
or "CycleSkipDataDump" not in self.parameters
or self.parameters["CycleSkipDataDump"] <= 0.0
):
return []
self.all_time_outputs = []
index = 0
for cycle in range(
0, self.stop_cycle + 1, self.parameters["CycleSkipDataDump"]
):
filename = os.path.join(
self.parameters["GlobalDir"],
"%s%04d" % (self.parameters["DataDumpDir"], index),
"%s%04d" % (self.parameters["DataDumpName"], index),
)
output = {"index": index, "filename": filename, "cycle": cycle}
self.all_time_outputs.append(output)
index += 1
def _get_all_outputs(self, find_outputs=False):
"""
Get all potential datasets and combine into a time-sorted list.
"""
# Create the set of outputs from which further selection will be done.
if find_outputs:
self._find_outputs()
elif (
self.parameters["dtDataDump"] > 0
and self.parameters["CycleSkipDataDump"] > 0
):
mylog.info(
"Simulation %s has both dtDataDump and CycleSkipDataDump set.",
self.parameter_filename,
)
mylog.info(
" Unable to calculate datasets. "
"Attempting to search in the current directory"
)
self._find_outputs()
else:
# Get all time or cycle outputs.
if self.parameters["CycleSkipDataDump"] > 0:
self._calculate_cycle_outputs()
else:
self._calculate_time_outputs()
# Calculate times for redshift outputs.
if self.cosmological_simulation:
for output in self.all_redshift_outputs:
output["time"] = self.cosmology.t_from_z(output["redshift"])
self.all_redshift_outputs.sort(key=lambda obj: obj["time"])
self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
if self.parameters["CycleSkipDataDump"] <= 0:
self.all_outputs.sort(key=lambda obj: obj["time"].to_ndarray())
def _calculate_simulation_bounds(self):
"""
Figure out the starting and stopping time and redshift for the simulation.
"""
if "StopCycle" in self.parameters:
self.stop_cycle = self.parameters["StopCycle"]
# Convert initial/final redshifts to times.
if self.cosmological_simulation:
self.initial_time = self.cosmology.t_from_z(self.initial_redshift)
self.initial_time.units.registry = self.unit_registry
self.final_time = self.cosmology.t_from_z(self.final_redshift)
self.final_time.units.registry = self.unit_registry
# If not a cosmology simulation, figure out the stopping criteria.
else:
if "InitialTime" in self.parameters:
self.initial_time = self.quan(
self.parameters["InitialTime"], "code_time"
)
else:
self.initial_time = self.quan(0.0, "code_time")
if "StopTime" in self.parameters:
self.final_time = self.quan(self.parameters["StopTime"], "code_time")
else:
self.final_time = None
if not ("StopTime" in self.parameters or "StopCycle" in self.parameters):
raise NoStoppingCondition(self.parameter_filename)
if self.final_time is None:
mylog.warning(
"Simulation %s has no stop time set, stopping condition "
"will be based only on cycles.",
self.parameter_filename,
)
def _set_parameter_defaults(self):
"""
Set some default parameters to avoid problems
if they are not in the parameter file.
"""
self.parameters["GlobalDir"] = self.directory
self.parameters["DataDumpName"] = "data"
self.parameters["DataDumpDir"] = "DD"
self.parameters["RedshiftDumpName"] = "RedshiftOutput"
self.parameters["RedshiftDumpDir"] = "RD"
self.parameters["ComovingCoordinates"] = 0
self.parameters["TopGridRank"] = 3
self.parameters["DomainLeftEdge"] = np.zeros(self.parameters["TopGridRank"])
self.parameters["DomainRightEdge"] = np.ones(self.parameters["TopGridRank"])
self.parameters["RefineBy"] = 2 # technically not the enzo default
self.parameters["StopCycle"] = 100000
self.parameters["dtDataDump"] = 0.0
self.parameters["CycleSkipDataDump"] = 0.0
self.parameters["LengthUnits"] = 1.0
self.parameters["TimeUnits"] = 1.0
self.parameters["CosmologyOmegaRadiationNow"] = 0.0
def _find_outputs(self):
"""
Search for directories matching the data dump keywords.
If found, get dataset times py opening the ds.
"""
# look for time outputs.
potential_time_outputs = glob.glob(
os.path.join(
self.parameters["GlobalDir"], f"{self.parameters["DataDumpDir"]}*"
)
)
self.all_time_outputs = self._check_for_outputs(potential_time_outputs)
self.all_time_outputs.sort(key=lambda obj: obj["time"])
# look for redshift outputs.
potential_redshift_outputs = glob.glob(
os.path.join(
self.parameters["GlobalDir"], f"{self.parameters["RedshiftDumpDir"]}*"
)
)
self.all_redshift_outputs = self._check_for_outputs(potential_redshift_outputs)
self.all_redshift_outputs.sort(key=lambda obj: obj["time"])
self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
self.all_outputs.sort(key=lambda obj: obj["time"])
only_on_root(mylog.info, "Located %d total outputs.", len(self.all_outputs))
# manually set final time and redshift with last output
if self.all_outputs:
self.final_time = self.all_outputs[-1]["time"]
if self.cosmological_simulation:
self.final_redshift = self.all_outputs[-1]["redshift"]
def _check_for_outputs(self, potential_outputs):
"""
Check a list of files to see if they are valid datasets.
"""
only_on_root(
mylog.info, "Checking %d potential outputs.", len(potential_outputs)
)
my_outputs = {}
llevel = mylog.level
# suppress logging as we load every dataset, unless set to debug
if llevel > 10 and llevel < 40:
mylog.setLevel(40)
for my_storage, output in parallel_objects(
potential_outputs, storage=my_outputs
):
if self.parameters["DataDumpDir"] in output:
dir_key = self.parameters["DataDumpDir"]
output_key = self.parameters["DataDumpName"]
else:
dir_key = self.parameters["RedshiftDumpDir"]
output_key = self.parameters["RedshiftDumpName"]
index = output[output.find(dir_key) + len(dir_key) :]
filename = os.path.join(
self.parameters["GlobalDir"],
f"{dir_key}{index}",
f"{output_key}{index}",
)
try:
ds = load(filename)
except (FileNotFoundError, YTUnidentifiedDataType):
mylog.error("Failed to load %s", filename)
continue
my_storage.result = {
"filename": filename,
"time": ds.current_time.in_units("s"),
}
if ds.cosmological_simulation:
my_storage.result["redshift"] = ds.current_redshift
mylog.setLevel(llevel)
my_outputs = [
my_output for my_output in my_outputs.values() if my_output is not None
]
return my_outputs
def _write_cosmology_outputs(self, filename, outputs, start_index, decimals=3):
"""
Write cosmology output parameters for a cosmology splice.
"""
mylog.info("Writing redshift output list to %s.", filename)
f = open(filename, "w")
for q, output in enumerate(outputs):
f.write(
(f"CosmologyOutputRedshift[%d] = %.{decimals}f\n")
% ((q + start_index), output["redshift"])
)
f.close()
class EnzoCosmology(Cosmology):
def __init__(
self,
hubble_constant,
omega_matter,
omega_lambda,
omega_radiation,
omega_curvature,
initial_redshift,
unit_registry=None,
):
Cosmology.__init__(
self,
hubble_constant=hubble_constant,
omega_matter=omega_matter,
omega_lambda=omega_lambda,
omega_radiation=omega_radiation,
omega_curvature=omega_curvature,
unit_registry=unit_registry,
)
self.initial_redshift = initial_redshift
self.initial_time = self.t_from_z(self.initial_redshift)
# time units = 1 / sqrt(4 * pi * G rho_0 * (1 + z_i)**3),
# rho_0 = (3 * Omega_m * h**2) / (8 * pi * G)
self.time_unit = (
(
1.5
* self.omega_matter
* self.hubble_constant ** 2
* (1 + self.initial_redshift) ** 3
)
** -0.5
).in_units("s")
self.time_unit.units.registry = self.unit_registry
| import glob
import os
import numpy as np
from unyt import dimensions, unyt_array
from unyt.unit_registry import UnitRegistry
from yt.data_objects.time_series import DatasetSeries, SimulationTimeSeries
from yt.funcs import only_on_root
from yt.loaders import load
from yt.utilities.cosmology import Cosmology
from yt.utilities.exceptions import (
InvalidSimulationTimeSeries,
MissingParameter,
NoStoppingCondition,
YTUnidentifiedDataType,
)
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_objects
class EnzoSimulation(SimulationTimeSeries):
r"""
Initialize an Enzo Simulation object.
Upon creation, the parameter file is parsed and the time and redshift
are calculated and stored in all_outputs. A time units dictionary is
instantiated to allow for time outputs to be requested with physical
time units. The get_time_series can be used to generate a
DatasetSeries object.
parameter_filename : str
The simulation parameter file.
find_outputs : bool
If True, subdirectories within the GlobalDir directory are
searched one by one for datasets. Time and redshift
information are gathered by temporarily instantiating each
dataset. This can be used when simulation data was created
in a non-standard way, making it difficult to guess the
corresponding time and redshift information.
Default: False.
Examples
--------
>>> import yt
>>> es = yt.load_simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
>>> es.get_time_series()
>>> for ds in es:
... print(ds.current_time)
"""
def __init__(self, parameter_filename, find_outputs=False):
self.simulation_type = "grid"
self.key_parameters = ["stop_cycle"]
SimulationTimeSeries.__init__(
self, parameter_filename, find_outputs=find_outputs
)
def _set_units(self):
self.unit_registry = UnitRegistry()
self.unit_registry.add("code_time", 1.0, dimensions.time)
self.unit_registry.add("code_length", 1.0, dimensions.length)
if self.cosmological_simulation:
# Instantiate EnzoCosmology object for units and time conversions.
self.cosmology = EnzoCosmology(
self.parameters["CosmologyHubbleConstantNow"],
self.parameters["CosmologyOmegaMatterNow"],
self.parameters["CosmologyOmegaLambdaNow"],
self.parameters.get("CosmologyOmegaRadiationNow", 0.0),
0.0,
self.parameters["CosmologyInitialRedshift"],
unit_registry=self.unit_registry,
)
self.time_unit = self.cosmology.time_unit.in_units("s")
if "h" in self.unit_registry:
self.unit_registry.modify("h", self.hubble_constant)
else:
self.unit_registry.add(
"h", self.hubble_constant, dimensions.dimensionless
)
# Comoving lengths
for my_unit in ["m", "pc", "AU"]:
new_unit = f"{my_unit}cm"
# technically not true, but should be ok
self.unit_registry.add(
new_unit,
self.unit_registry.lut[my_unit][0],
dimensions.length,
"\\rm{%s}/(1+z)" % my_unit,
prefixable=True,
)
self.length_unit = self.quan(
self.box_size, "Mpccm / h", registry=self.unit_registry
)
else:
self.time_unit = self.quan(self.parameters["TimeUnits"], "s")
self.length_unit = self.quan(self.parameters["LengthUnits"], "cm")
self.box_size = self.length_unit
self.domain_left_edge = self.domain_left_edge * self.length_unit
self.domain_right_edge = self.domain_right_edge * self.length_unit
self.unit_registry.modify("code_time", self.time_unit)
self.unit_registry.modify("code_length", self.length_unit)
self.unit_registry.add(
"unitary", float(self.box_size.in_base()), self.length_unit.units.dimensions
)
def get_time_series(
self,
time_data=True,
redshift_data=True,
initial_time=None,
final_time=None,
initial_redshift=None,
final_redshift=None,
initial_cycle=None,
final_cycle=None,
times=None,
redshifts=None,
tolerance=None,
parallel=True,
setup_function=None,
):
"""
Instantiate a DatasetSeries object for a set of outputs.
If no additional keywords given, a DatasetSeries object will be
created with all potential datasets created by the simulation.
Outputs can be gather by specifying a time or redshift range
(or combination of time and redshift), with a specific list of
times or redshifts, a range of cycle numbers (for cycle based
output), or by simply searching all subdirectories within the
simulation directory.
time_data : bool
Whether or not to include time outputs when gathering
datasets for time series.
Default: True.
redshift_data : bool
Whether or not to include redshift outputs when gathering
datasets for time series.
Default: True.
initial_time : tuple of type (float, str)
The earliest time for outputs to be included. This should be
given as the value and the string representation of the units.
For example, (5.0, "Gyr"). If None, the initial time of the
simulation is used. This can be used in combination with
either final_time or final_redshift.
Default: None.
final_time : tuple of type (float, str)
The latest time for outputs to be included. This should be
given as the value and the string representation of the units.
For example, (13.7, "Gyr"). If None, the final time of the
simulation is used. This can be used in combination with either
initial_time or initial_redshift.
Default: None.
times : tuple of type (float array, str)
A list of times for which outputs will be found and the units
of those values. For example, ([0, 1, 2, 3], "s").
Default: None.
initial_redshift : float
The earliest redshift for outputs to be included. If None,
the initial redshift of the simulation is used. This can be
used in combination with either final_time or
final_redshift.
Default: None.
final_redshift : float
The latest redshift for outputs to be included. If None,
the final redshift of the simulation is used. This can be
used in combination with either initial_time or
initial_redshift.
Default: None.
redshifts : array_like
A list of redshifts for which outputs will be found.
Default: None.
initial_cycle : float
The earliest cycle for outputs to be included. If None,
the initial cycle of the simulation is used. This can
only be used with final_cycle.
Default: None.
final_cycle : float
The latest cycle for outputs to be included. If None,
the final cycle of the simulation is used. This can
only be used in combination with initial_cycle.
Default: None.
tolerance : float
Used in combination with "times" or "redshifts" keywords,
this is the tolerance within which outputs are accepted
given the requested times or redshifts. If None, the
nearest output is always taken.
Default: None.
parallel : bool/int
If True, the generated DatasetSeries will divide the work
such that a single processor works on each dataset. If an
integer is supplied, the work will be divided into that
number of jobs.
Default: True.
setup_function : callable, accepts a ds
This function will be called whenever a dataset is loaded.
Examples
--------
>>> import yt
>>> es = yt.load_simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
>>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"),
redshift_data=False)
>>> for ds in es:
... print(ds.current_time)
>>> es.get_time_series(redshifts=[3, 2, 1, 0])
>>> for ds in es:
... print(ds.current_time)
"""
if (
initial_redshift is not None or final_redshift is not None
) and not self.cosmological_simulation:
raise InvalidSimulationTimeSeries(
"An initial or final redshift has been given for a "
+ "noncosmological simulation."
)
if time_data and redshift_data:
my_all_outputs = self.all_outputs
elif time_data:
my_all_outputs = self.all_time_outputs
elif redshift_data:
my_all_outputs = self.all_redshift_outputs
else:
raise InvalidSimulationTimeSeries(
"Both time_data and redshift_data are False."
)
if not my_all_outputs:
DatasetSeries.__init__(self, outputs=[], parallel=parallel)
mylog.info("0 outputs loaded into time series.")
return
# Apply selection criteria to the set.
if times is not None:
my_outputs = self._get_outputs_by_key(
"time", times, tolerance=tolerance, outputs=my_all_outputs
)
elif redshifts is not None:
my_outputs = self._get_outputs_by_key(
"redshift", redshifts, tolerance=tolerance, outputs=my_all_outputs
)
elif initial_cycle is not None or final_cycle is not None:
if initial_cycle is None:
initial_cycle = 0
else:
initial_cycle = max(initial_cycle, 0)
if final_cycle is None:
final_cycle = self.parameters["StopCycle"]
else:
final_cycle = min(final_cycle, self.parameters["StopCycle"])
my_outputs = my_all_outputs[
int(
np.ceil(float(initial_cycle) / self.parameters["CycleSkipDataDump"])
) : (final_cycle / self.parameters["CycleSkipDataDump"])
+ 1
]
else:
if initial_time is not None:
if isinstance(initial_time, float):
my_initial_time = self.quan(initial_time, "code_time")
elif isinstance(initial_time, tuple) and len(initial_time) == 2:
my_initial_time = self.quan(*initial_time)
elif not isinstance(initial_time, unyt_array):
raise RuntimeError(
"Error: initial_time must be given as a float or "
+ "tuple of (value, units)."
)
elif initial_redshift is not None:
my_initial_time = self.cosmology.t_from_z(initial_redshift)
else:
my_initial_time = self.initial_time
if final_time is not None:
if isinstance(final_time, float):
my_final_time = self.quan(final_time, "code_time")
elif isinstance(final_time, tuple) and len(final_time) == 2:
my_final_time = self.quan(*final_time)
elif not isinstance(final_time, unyt_array):
raise RuntimeError(
"Error: final_time must be given as a float or "
+ "tuple of (value, units)."
)
elif final_redshift is not None:
my_final_time = self.cosmology.t_from_z(final_redshift)
else:
my_final_time = self.final_time
my_initial_time.convert_to_units("s")
my_final_time.convert_to_units("s")
my_times = np.array([a["time"] for a in my_all_outputs])
my_indices = np.digitize([my_initial_time, my_final_time], my_times)
if my_initial_time == my_times[my_indices[0] - 1]:
my_indices[0] -= 1
my_outputs = my_all_outputs[my_indices[0] : my_indices[1]]
init_outputs = []
for output in my_outputs:
if os.path.exists(output["filename"]):
init_outputs.append(output["filename"])
DatasetSeries.__init__(
self, outputs=init_outputs, parallel=parallel, setup_function=setup_function
)
mylog.info("%d outputs loaded into time series.", len(init_outputs))
def _parse_parameter_file(self):
"""
Parses the parameter file and establishes the various
dictionaries.
"""
self.conversion_factors = {}
redshift_outputs = []
# Let's read the file
lines = open(self.parameter_filename).readlines()
for line in (l.strip() for l in lines):
if "#" in line:
line = line[0 : line.find("#")]
if "//" in line:
line = line[0 : line.find("//")]
if len(line) < 2:
continue
param, vals = (i.strip() for i in line.split("=", 1))
# First we try to decipher what type of value it is.
vals = vals.split()
# Special case approaching.
if "(do" in vals:
vals = vals[:1]
if len(vals) == 0:
pcast = str # Assume NULL output
else:
v = vals[0]
# Figure out if it's castable to floating point:
try:
float(v)
except ValueError:
pcast = str
else:
if any("." in v or "e" in v for v in vals):
pcast = float
elif v == "inf":
pcast = str
else:
pcast = int
# Now we figure out what to do with it.
if param.endswith("Units") and not param.startswith("Temperature"):
dataType = param[:-5]
# This one better be a float.
self.conversion_factors[dataType] = float(vals[0])
if param.startswith("CosmologyOutputRedshift["):
index = param[param.find("[") + 1 : param.find("]")]
redshift_outputs.append(
{"index": int(index), "redshift": float(vals[0])}
)
elif len(vals) == 0:
vals = ""
elif len(vals) == 1:
vals = pcast(vals[0])
else:
vals = np.array([pcast(i) for i in vals if i != "-99999"])
self.parameters[param] = vals
self.refine_by = self.parameters["RefineBy"]
self.dimensionality = self.parameters["TopGridRank"]
if self.dimensionality > 1:
self.domain_dimensions = self.parameters["TopGridDimensions"]
if len(self.domain_dimensions) < 3:
tmp = self.domain_dimensions.tolist()
tmp.append(1)
self.domain_dimensions = np.array(tmp)
self.domain_left_edge = np.array(
self.parameters["DomainLeftEdge"], "float64"
).copy()
self.domain_right_edge = np.array(
self.parameters["DomainRightEdge"], "float64"
).copy()
else:
self.domain_left_edge = np.array(
self.parameters["DomainLeftEdge"], "float64"
)
self.domain_right_edge = np.array(
self.parameters["DomainRightEdge"], "float64"
)
self.domain_dimensions = np.array(
[self.parameters["TopGridDimensions"], 1, 1]
)
if self.parameters["ComovingCoordinates"]:
cosmo_attr = {
"box_size": "CosmologyComovingBoxSize",
"omega_lambda": "CosmologyOmegaLambdaNow",
"omega_matter": "CosmologyOmegaMatterNow",
"omega_radiation": "CosmologyOmegaRadiationNow",
"hubble_constant": "CosmologyHubbleConstantNow",
"initial_redshift": "CosmologyInitialRedshift",
"final_redshift": "CosmologyFinalRedshift",
}
self.cosmological_simulation = 1
for a, v in cosmo_attr.items():
if v not in self.parameters:
raise MissingParameter(self.parameter_filename, v)
setattr(self, a, self.parameters[v])
else:
self.cosmological_simulation = 0
self.omega_lambda = self.omega_matter = self.hubble_constant = 0.0
# make list of redshift outputs
self.all_redshift_outputs = []
if not self.cosmological_simulation:
return
for output in redshift_outputs:
output["filename"] = os.path.join(
self.parameters["GlobalDir"],
"%s%04d" % (self.parameters["RedshiftDumpDir"], output["index"]),
"%s%04d" % (self.parameters["RedshiftDumpName"], output["index"]),
)
del output["index"]
self.all_redshift_outputs = redshift_outputs
def _calculate_time_outputs(self):
"""
Calculate time outputs and their redshifts if cosmological.
"""
self.all_time_outputs = []
if (
self.final_time is None
or "dtDataDump" not in self.parameters
or self.parameters["dtDataDump"] <= 0.0
):
return []
index = 0
current_time = self.initial_time.copy()
dt_datadump = self.quan(self.parameters["dtDataDump"], "code_time")
while current_time <= self.final_time + dt_datadump:
filename = os.path.join(
self.parameters["GlobalDir"],
"%s%04d" % (self.parameters["DataDumpDir"], index),
"%s%04d" % (self.parameters["DataDumpName"], index),
)
output = {"index": index, "filename": filename, "time": current_time.copy()}
output["time"] = min(output["time"], self.final_time)
if self.cosmological_simulation:
output["redshift"] = self.cosmology.z_from_t(current_time)
self.all_time_outputs.append(output)
if np.abs(self.final_time - current_time) / self.final_time < 1e-4:
break
current_time += dt_datadump
index += 1
def _calculate_cycle_outputs(self):
"""
Calculate cycle outputs.
"""
mylog.warning("Calculating cycle outputs. Dataset times will be unavailable.")
if (
self.stop_cycle is None
or "CycleSkipDataDump" not in self.parameters
or self.parameters["CycleSkipDataDump"] <= 0.0
):
return []
self.all_time_outputs = []
index = 0
for cycle in range(
0, self.stop_cycle + 1, self.parameters["CycleSkipDataDump"]
):
filename = os.path.join(
self.parameters["GlobalDir"],
"%s%04d" % (self.parameters["DataDumpDir"], index),
"%s%04d" % (self.parameters["DataDumpName"], index),
)
output = {"index": index, "filename": filename, "cycle": cycle}
self.all_time_outputs.append(output)
index += 1
def _get_all_outputs(self, find_outputs=False):
"""
Get all potential datasets and combine into a time-sorted list.
"""
# Create the set of outputs from which further selection will be done.
if find_outputs:
self._find_outputs()
elif (
self.parameters["dtDataDump"] > 0
and self.parameters["CycleSkipDataDump"] > 0
):
mylog.info(
"Simulation %s has both dtDataDump and CycleSkipDataDump set.",
self.parameter_filename,
)
mylog.info(
" Unable to calculate datasets. "
"Attempting to search in the current directory"
)
self._find_outputs()
else:
# Get all time or cycle outputs.
if self.parameters["CycleSkipDataDump"] > 0:
self._calculate_cycle_outputs()
else:
self._calculate_time_outputs()
# Calculate times for redshift outputs.
if self.cosmological_simulation:
for output in self.all_redshift_outputs:
output["time"] = self.cosmology.t_from_z(output["redshift"])
self.all_redshift_outputs.sort(key=lambda obj: obj["time"])
self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
if self.parameters["CycleSkipDataDump"] <= 0:
self.all_outputs.sort(key=lambda obj: obj["time"].to_ndarray())
def _calculate_simulation_bounds(self):
"""
Figure out the starting and stopping time and redshift for the simulation.
"""
if "StopCycle" in self.parameters:
self.stop_cycle = self.parameters["StopCycle"]
# Convert initial/final redshifts to times.
if self.cosmological_simulation:
self.initial_time = self.cosmology.t_from_z(self.initial_redshift)
self.initial_time.units.registry = self.unit_registry
self.final_time = self.cosmology.t_from_z(self.final_redshift)
self.final_time.units.registry = self.unit_registry
# If not a cosmology simulation, figure out the stopping criteria.
else:
if "InitialTime" in self.parameters:
self.initial_time = self.quan(
self.parameters["InitialTime"], "code_time"
)
else:
self.initial_time = self.quan(0.0, "code_time")
if "StopTime" in self.parameters:
self.final_time = self.quan(self.parameters["StopTime"], "code_time")
else:
self.final_time = None
if not ("StopTime" in self.parameters or "StopCycle" in self.parameters):
raise NoStoppingCondition(self.parameter_filename)
if self.final_time is None:
mylog.warning(
"Simulation %s has no stop time set, stopping condition "
"will be based only on cycles.",
self.parameter_filename,
)
def _set_parameter_defaults(self):
"""
Set some default parameters to avoid problems
if they are not in the parameter file.
"""
self.parameters["GlobalDir"] = self.directory
self.parameters["DataDumpName"] = "data"
self.parameters["DataDumpDir"] = "DD"
self.parameters["RedshiftDumpName"] = "RedshiftOutput"
self.parameters["RedshiftDumpDir"] = "RD"
self.parameters["ComovingCoordinates"] = 0
self.parameters["TopGridRank"] = 3
self.parameters["DomainLeftEdge"] = np.zeros(self.parameters["TopGridRank"])
self.parameters["DomainRightEdge"] = np.ones(self.parameters["TopGridRank"])
self.parameters["RefineBy"] = 2 # technically not the enzo default
self.parameters["StopCycle"] = 100000
self.parameters["dtDataDump"] = 0.0
self.parameters["CycleSkipDataDump"] = 0.0
self.parameters["LengthUnits"] = 1.0
self.parameters["TimeUnits"] = 1.0
self.parameters["CosmologyOmegaRadiationNow"] = 0.0
def _find_outputs(self):
"""
Search for directories matching the data dump keywords.
If found, get dataset times py opening the ds.
"""
# look for time outputs.
potential_time_outputs = glob.glob(
os.path.join(
self.parameters["GlobalDir"], f"{self.parameters['DataDumpDir']}*"
)
)
self.all_time_outputs = self._check_for_outputs(potential_time_outputs)
self.all_time_outputs.sort(key=lambda obj: obj["time"])
# look for redshift outputs.
potential_redshift_outputs = glob.glob(
os.path.join(
self.parameters["GlobalDir"], f"{self.parameters['RedshiftDumpDir']}*"
)
)
self.all_redshift_outputs = self._check_for_outputs(potential_redshift_outputs)
self.all_redshift_outputs.sort(key=lambda obj: obj["time"])
self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
self.all_outputs.sort(key=lambda obj: obj["time"])
only_on_root(mylog.info, "Located %d total outputs.", len(self.all_outputs))
# manually set final time and redshift with last output
if self.all_outputs:
self.final_time = self.all_outputs[-1]["time"]
if self.cosmological_simulation:
self.final_redshift = self.all_outputs[-1]["redshift"]
def _check_for_outputs(self, potential_outputs):
"""
Check a list of files to see if they are valid datasets.
"""
only_on_root(
mylog.info, "Checking %d potential outputs.", len(potential_outputs)
)
my_outputs = {}
llevel = mylog.level
# suppress logging as we load every dataset, unless set to debug
if llevel > 10 and llevel < 40:
mylog.setLevel(40)
for my_storage, output in parallel_objects(
potential_outputs, storage=my_outputs
):
if self.parameters["DataDumpDir"] in output:
dir_key = self.parameters["DataDumpDir"]
output_key = self.parameters["DataDumpName"]
else:
dir_key = self.parameters["RedshiftDumpDir"]
output_key = self.parameters["RedshiftDumpName"]
index = output[output.find(dir_key) + len(dir_key) :]
filename = os.path.join(
self.parameters["GlobalDir"],
f"{dir_key}{index}",
f"{output_key}{index}",
)
try:
ds = load(filename)
except (FileNotFoundError, YTUnidentifiedDataType):
mylog.error("Failed to load %s", filename)
continue
my_storage.result = {
"filename": filename,
"time": ds.current_time.in_units("s"),
}
if ds.cosmological_simulation:
my_storage.result["redshift"] = ds.current_redshift
mylog.setLevel(llevel)
my_outputs = [
my_output for my_output in my_outputs.values() if my_output is not None
]
return my_outputs
def _write_cosmology_outputs(self, filename, outputs, start_index, decimals=3):
"""
Write cosmology output parameters for a cosmology splice.
"""
mylog.info("Writing redshift output list to %s.", filename)
f = open(filename, "w")
for q, output in enumerate(outputs):
f.write(
(f"CosmologyOutputRedshift[%d] = %.{decimals}f\n")
% ((q + start_index), output["redshift"])
)
f.close()
class EnzoCosmology(Cosmology):
def __init__(
self,
hubble_constant,
omega_matter,
omega_lambda,
omega_radiation,
omega_curvature,
initial_redshift,
unit_registry=None,
):
Cosmology.__init__(
self,
hubble_constant=hubble_constant,
omega_matter=omega_matter,
omega_lambda=omega_lambda,
omega_radiation=omega_radiation,
omega_curvature=omega_curvature,
unit_registry=unit_registry,
)
self.initial_redshift = initial_redshift
self.initial_time = self.t_from_z(self.initial_redshift)
# time units = 1 / sqrt(4 * pi * G rho_0 * (1 + z_i)**3),
# rho_0 = (3 * Omega_m * h**2) / (8 * pi * G)
self.time_unit = (
(
1.5
* self.omega_matter
* self.hubble_constant ** 2
* (1 + self.initial_redshift) ** 3
)
** -0.5
).in_units("s")
self.time_unit.units.registry = self.unit_registry
|
from discord.ext import commands
import re
import discord
import random
import typing
import emoji
import unicodedata
import textwrap
import contextlib
import io
import asyncio
import async_tio
import itertools
import os
import base64
import secrets
import utils
from difflib import SequenceMatcher
from discord.ext.commands.cooldowns import BucketType
from jishaku.codeblocks import codeblock_converter
import functools
class Info(commands.Cog):
"Gives you Information about data you are allowed to access"
def __init__(self, bot):
self.bot = bot
@commands.command(
help="gives you info about a guild",
aliases=[
"server_info",
"guild_fetch",
"guild_info",
"fetch_guild",
"guildinfo",
],
)
async def serverinfo(self, ctx, *, guild: typing.Optional[discord.Guild] = None):
guild = guild or ctx.guild
if guild is None:
await ctx.send("Guild wanted has not been found")
if guild:
await utils.guildinfo(ctx, guild)
@commands.command(
aliases=["user_info", "user-info", "ui", "whois"],
brief="a command that gives information on users",
help="this can work with mentions, ids, usernames, and even full names.",
)
async def userinfo(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
user_type = "Bot" if user.bot else "User" if isinstance(user, discord.User) else "Member"
statuses = []
badges = [utils.profile_converter("badges", f) for f in user.public_flags.all()] if user.public_flags else []
if user.bot:
badges.append(utils.profile_converter("badges", "bot"))
if user.system:
badges.append(utils.profile_converter("badges", "system"))
if isinstance(user, discord.Member):
nickname = user.nick
joined_guild = f"{discord.utils.format_dt(user.joined_at, style = "d")}\n{discord.utils.format_dt(user.joined_at, style = "T")}"
highest_role = user.top_role
for name, status in (
("Status", user.status),
("Desktop", user.desktop_status),
("Mobile", user.mobile_status),
("Web", user.web_status),
):
statuses.append((name, utils.profile_converter(name.lower(), status)))
else:
nickname = "None Found"
joined_guild = "N/A"
highest_role = "None Found"
member = discord.utils.find(lambda member: member.id == user.id, self.bot.get_all_members())
if member:
for name, status in (
("Status", member.status),
("Desktop", member.desktop_status),
("Mobile", member.mobile_status),
("Web", member.web_status),
):
statuses.append((name, utils.profile_converter(name.lower(), status)))
embed = discord.Embed(title=f"{user}", color=random.randint(0, 16777215), timestamp=ctx.message.created_at)
embed.add_field(
name="User Info: ",
value=f"**Username**: {user.name} \n**Discriminator**: {user.discriminator} \n**ID**: {user.id}",
inline=False,
)
join_badges: str = "\u0020".join(badges) if badges else "N/A"
join_statuses = (
" \n| ".join(f"**{name}**: {value}" for name, value in statuses) if statuses else "**Status**: \nUnknown"
)
embed.add_field(
name="User Info 2:",
value=f"Type: {user_type} \nBadges: {join_badges} \n**Joined Discord**: {discord.utils.format_dt(user.created_at, style = "d")}\n{discord.utils.format_dt(user.created_at, style = "T")}\n {join_statuses}",
inline=False,
)
embed.add_field(
name="Guild Info:",
value=f"**Joined Guild**: {joined_guild} \n**Nickname**: {nickname} \n**Highest Role:** {highest_role}",
inline=False,
)
embed.set_image(url=user.display_avatar.url)
guilds_list = utils.grab_mutualguilds(ctx, user)
pag = commands.Paginator(prefix="", suffix="")
for g in guilds_list:
pag.add_line(f"{g}")
pages = pag.pages or ["None"]
if ctx.author.dm_channel is None:
await ctx.author.create_dm()
menu = utils.MutualGuildsEmbed(pages, ctx=ctx, disable_after=True)
view = utils.UserInfoSuper(ctx, menu, ctx.author.dm_channel)
await ctx.send(
"Pick a way for Mutual Guilds to be sent to you or not if you really don't the mutualguilds",
embed=embed,
view=view,
)
@commands.command(brief="uploads your emojis into a Senarc Bin link")
async def look_at(self, ctx):
if isinstance(ctx.message.channel, discord.TextChannel):
message_emojis = ""
for x in ctx.guild.emojis:
message_emojis = message_emojis + " " + str(x) + "\n"
paste = await utils.post(self.bot, message_emojis)
await ctx.send(paste)
if isinstance(ctx.channel, discord.DMChannel):
await ctx.send("We can't use that in DMS as it takes emoji regex and puts it into a paste.")
@commands.command(help="gives the id of the current guild or DM if you are in one.")
async def guild_get(self, ctx):
if isinstance(ctx.channel, discord.TextChannel):
await ctx.send(content=ctx.guild.id)
if isinstance(ctx.channel, discord.DMChannel):
await ctx.send(ctx.channel.id)
@commands.command(brief="a command to tell you the channel id", aliases=["GetChannelId"])
async def this(self, ctx):
await ctx.send(ctx.channel.id)
@commands.command(brief="Gives you mention info don't abuse(doesn't mention tho)")
async def mention(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
await ctx.send(
f"Discord Mention: {user.mention} \nRaw Mention: {discord.utils.escape_mentions(user.mention)}",
allowed_mentions=discord.AllowedMentions.none(),
)
@commands.cooldown(1, 30, BucketType.user)
@commands.command(help="fetch invite details")
async def fetch_invite(self, ctx, *invites: typing.Union[discord.Invite, str]):
if invites:
menu = utils.InviteInfoEmbed(invites, ctx=ctx, delete_after=True)
await menu.send()
if not invites:
await ctx.send("Please get actual invites to attempt grab")
ctx.command.reset_cooldown(ctx)
if len(invites) > 50:
await ctx.send(
"Reporting using more than 50 invites in this command. This is to prevent ratelimits with the api."
)
jdjg = await self.bot.try_user(168422909482762240)
await self.bot.get_channel(855217084710912050).send(
f"{jdjg.mention}.\n{ctx.author} causes a ratelimit issue with {len(invites)} invites"
)
@commands.command(brief="gives info about a file")
async def file(self, ctx):
if not ctx.message.attachments:
await ctx.send(ctx.message.attachments)
await ctx.send("no file submitted")
if ctx.message.attachments:
embed = discord.Embed(title="Attachment info", color=random.randint(0, 16777215))
for a in ctx.message.attachments:
embed.add_field(name=f"ID: {a.id}", value=f"[{a.filename}]({a.url})")
embed.set_footer(text="Check on the url/urls to get a direct download to the url.")
await ctx.send(embed=embed, content="\nThat's good")
@commands.command(
brief="a command to get the avatar of a user",
help="using the userinfo technology it now powers avatar grabbing.",
aliases=["pfp", "av"],
)
async def avatar(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
embed = discord.Embed(color=random.randint(0, 16777215))
embed.set_author(name=f"{user.name}'s avatar:", icon_url=user.display_avatar.url)
embed.set_image(url=user.display_avatar.url)
embed.set_footer(text=f"Requested by {ctx.author}")
await ctx.send(embed=embed)
@commands.command(brief="this is a way to get the nearest channel.")
async def find_channel(self, ctx, *, args=None):
if args is None:
await ctx.send("Please specify a channel")
if args:
if isinstance(ctx.channel, discord.TextChannel):
channel = discord.utils.get(ctx.guild.channels, name=args)
if channel:
await ctx.send(channel.mention)
if channel is None:
await ctx.send("Unforantely we haven't found anything")
if isinstance(ctx.channel, discord.DMChannel):
await ctx.send("You can't use it in a DM.")
@commands.command(brief="a command to get the closest user.")
async def closest_user(self, ctx, *, args=None):
if args is None:
return await ctx.send("please specify a user")
if args and not self.bot.users:
return await ctx.send("There are no users cached :(")
if args:
userNearest = discord.utils.get(self.bot.users, name=args)
user_nick = discord.utils.get(self.bot.users, display_name=args)
if userNearest is None:
userNearest = sorted(self.bot.users, key=lambda x: SequenceMatcher(None, x.name, args).ratio())[-1]
if user_nick is None:
user_nick = sorted(self.bot.users, key=lambda x: SequenceMatcher(None, x.display_name, args).ratio())[
-1
]
if isinstance(ctx.channel, discord.TextChannel):
member_list = [x for x in ctx.guild.members if x.nick]
nearest_server_nick = sorted(member_list, key=lambda x: SequenceMatcher(None, x.nick, args).ratio())[-1]
if isinstance(ctx.channel, discord.DMChannel):
nearest_server_nick = "You unfortunately don't get the last value(a nickname) as it's a DM."
await ctx.send(f"Username : {userNearest} \nDisplay name : {user_nick} \nNickname: {nearest_server_nick}")
@commands.command(help="gives info on default emoji and custom emojis", name="emoji")
async def emoji_info(self, ctx, *emojis: typing.Union[utils.EmojiConverter, str]):
if emojis:
menu = utils.EmojiInfoEmbed(emojis, ctx=ctx, delete_after=True)
await menu.send()
if not emojis:
await ctx.send("Looks like there was no emojis.")
@commands.command(brief="gives info on emoji_id and emoji image.")
async def emoji_id(
self,
ctx,
*,
emoji: typing.Optional[typing.Union[discord.PartialEmoji, discord.Message, utils.EmojiBasic]] = None,
):
if isinstance(emoji, discord.Message):
emoji_message = emoji.content
emoji = None
with contextlib.suppress(commands.CommandError, commands.BadArgument):
emoji = await utils.EmojiBasic.convert(
ctx, emoji_message
) or await commands.PartialEmojiConverter().convert(ctx, emoji_message)
if emoji:
embed = discord.Embed(description=f" Emoji ID: {emoji.id}", color=random.randint(0, 16777215))
embed.set_image(url=emoji.url)
await ctx.send(embed=embed)
else:
await ctx.send("Not a valid emoji id.")
@commands.command()
async def fetch_content(self, ctx, *, args=None):
if args is None:
await ctx.send("please send actual text")
if args:
args = discord.utils.escape_mentions(args)
args = discord.utils.escape_markdown(args, as_needed=False, ignore_links=False)
for x in ctx.message.mentions:
args = args.replace(x.mention, f"\{x.mention}")
emojis = emoji.emoji_lis(args)
emojis_return = [d["emoji"] for d in emojis]
for x in emojis_return:
args = args.replace(x, f"\{x}")
for x in re.findall(r":\w*:\d*", args):
args = args.replace(x, f"\{x}")
await ctx.send(f"{args}", allowed_mentions=discord.AllowedMentions.none())
@commands.command(brief="gives info about a role.", aliases=["roleinfo"])
async def role_info(self, ctx, *, role: typing.Optional[discord.Role] = None):
if role:
await utils.roleinfo(ctx, role)
if not role:
await ctx.send(f"The role you wanted was not found.")
class DevTools(commands.Cog):
"Helpful commands for developers in general"
def __init__(self, bot):
self.bot = bot
async def rtfm_lookup(self, url=None, *, args=None):
if not args:
return url
else:
res = await self.bot.session.get(
"https://repi.openrobot.xyz/search_docs",
params={"query": args, "documentation": url},
headers={"Authorization": os.environ["frostiweeb_api"]},
)
results = await res.json()
if not results:
return f"Could not find anything with {args}."
else:
return results
async def rtfm_send(self, ctx, results):
if isinstance(results, str):
await ctx.send(results, allowed_mentions=discord.AllowedMentions.none())
else:
embed = discord.Embed(color=random.randint(0, 16777215))
results = dict(itertools.islice(results.items(), 10))
embed.description = "\n".join(f"[`{result}`]({results.get(result)})" for result in results)
reference = utils.reference(ctx.message)
await ctx.send(embed=embed, reference=reference)
@commands.command(
aliases=["rtd", "rtfs", "rtdm"],
invoke_without_command=True,
brief="a rtfm command that allows you to lookup at any library we support looking up(using selects)",
)
async def rtfm(self, ctx, *, args=None):
rtfm_dictionary = await self.bot.db.fetch("SELECT * FROM RTFM_DICTIONARY")
view = utils.RtfmChoice(ctx, rtfm_dictionary, timeout=15.0)
await ctx.send(content="Please Pick a library you want to parse", view=view)
await view.wait()
await ctx.trigger_typing()
results = await self.rtfm_lookup(url=view.value, args=args)
await self.rtfm_send(ctx, results)
def charinfo_converter(self, string):
digit = f"{ord(string):x}"
name = unicodedata.name(string, "The unicode was not found")
return f"`\\U{digit:>08}`: {name} - {string} \N{EM DASH} <http://www.fileformat.info/info/unicode/char/{digit}>"
@commands.command(brief="Gives you data about charinfo (based on R.danny's command)")
async def charinfo(self, ctx, *, args=None):
if not args:
return await ctx.send("That doesn't help out all :(")
values = "\n".join(map(self.charinfo_converter, set(args)))
content = textwrap.wrap(values, width=2000)
menu = utils.charinfoMenu(content, ctx=ctx, delete_after=True)
await menu.send()
@commands.command(brief="a command to view the rtfm DB")
async def rtfm_view(self, ctx):
rtfm_dictionary = dict(await self.bot.db.fetch("SELECT * FROM RTFM_DICTIONARY"))
pag = commands.Paginator(prefix="", suffix="")
for g in rtfm_dictionary:
pag.add_line(f"{g} : {rtfm_dictionary.get(g)}")
menu = utils.RtfmEmbed(pag.pages, ctx=ctx, delete_after=True)
await menu.send()
@commands.command(brief="a command to autoformat your python code to pep8")
async def pep8(self, ctx):
modal = utils.CodeBlockView(ctx, timeout=180.0)
message = await ctx.send(
"Please Submit the Code Block\nDo you want to use black's line formatter at 120 (i.e. black - l120 .), or just use the default? (i.e black .):",
view=modal,
)
await modal.wait()
if not modal.value:
return await ctx.reply("You need to give it code to work with it.", mention_author=False)
code = codeblock_converter(argument=f"{modal.value}")
if modal.value2 is None or modal.value2 is False:
await message.edit("Default it is.", view=None)
if modal.value is True:
await message.edit("Speacil Formatting at 120 lines it is.")
code_conversion = functools.partial(utils.formatter, code.content, bool(modal.value))
try:
code = await self.bot.loop.run_in_executor(None, code_conversion)
except Exception as e:
return await message.edit(f"Error Ocurred with {e}")
embed = discord.Embed(
title="Reformatted with Black",
description=f"code returned: \n```python\n{code}```",
color=random.randint(0, 16777215),
)
embed.set_footer(text="Make sure you use python code, otherwise it will not work properly.")
await message.edit(embed=embed)
@commands.command(brief="grabs your pfp's image")
async def pfp_grab(self, ctx):
if_animated = ctx.author.display_avatar.is_animated()
save_type = ".gif" if if_animated else ".png"
icon_file = await ctx.author.display_avatar.read()
buffer = io.BytesIO(icon_file)
buffer.seek(0)
# print(len(buffer.getvalue()))
file = discord.File(buffer, filename=f"pfp{save_type}")
try:
await ctx.send(content="here's your avatar:", file=file)
except:
await ctx.send("it looks like it couldn't send the pfp due to the file size.")
@commands.command(brief="Gives info on pypi packages")
async def pypi(self, ctx, *, args=None):
# https://pypi.org/simple/
if args:
pypi_response = await self.bot.session.get(f"https://pypi.org/pypi/{args}/json")
if pypi_response.ok:
pypi_response = await pypi_response.json()
pypi_data = pypi_response["info"]
embed = discord.Embed(
title=f"{pypi_data.get("name") or "None provided"} {pypi_data.get("version") or "None provided"}",
url=f"{pypi_data.get("release_url") or "None provided"}",
description=f"{pypi_data.get("summary") or "None provided"}",
color=random.randint(0, 16777215),
)
embed.set_thumbnail(url="https://i.imgur.com/oP0e7jK.png")
embed.add_field(
name="**Author Info**",
value=f"**Author Name:** {pypi_data.get("author") or "None provided"}\n**Author Email:** {pypi_data.get("author_email") or "None provided"}",
inline=False,
)
embed.add_field(
name="**Package Info**",
value=f"**Download URL**: {pypi_data.get("download_url") or "None provided"}\n**Documentation URL:** {pypi_data.get("docs_url") or "None provided"}\n**Home Page:** {pypi_data.get("home_page") or "None provided"}\n**Keywords:** {pypi_data.get("keywords") or "None provided"}\n**License:** {pypi_data.get("license") or "None provided"}",
inline=False,
)
await ctx.send(embed=embed)
else:
await ctx.send(
f"Could not find package **{args}** on pypi.", allowed_mentions=discord.AllowedMentions.none()
)
else:
await ctx.send("Please look for a library to get the info of.")
@commands.command(brief="make a quick bot invite with 0 perms")
async def invite_bot(self, ctx, *, user: typing.Optional[discord.User] = None):
user = user or ctx.author
if not user.bot:
return await ctx.send("That's not a legit bot")
invite = discord.utils.oauth_url(client_id=user.id, scopes=("bot",))
slash_invite = discord.utils.oauth_url(client_id=user.id)
view = discord.ui.View()
view.add_item(
discord.ui.Button(label=f"{user.name}'s Normal Invite", url=invite, style=discord.ButtonStyle.link)
)
view.add_item(
discord.ui.Button(
label=f"{user.name}'s Invite With Slash Commands", url=slash_invite, style=discord.ButtonStyle.link
)
)
await ctx.send(f"Invite with slash commands and the bot scope or only with a bot scope:", view=view)
@commands.command(brief="gets you a guild's icon", aliases=["guild_icon"])
async def server_icon(self, ctx, *, guild: typing.Optional[discord.Guild] = None):
guild = guild or ctx.guild
if not guild:
return await ctx.send("no guild to get the icon of.")
await ctx.send(f"{guild.icon.url if guild.icon else "No Url for This Guild, I am sorry dude :("}")
@commands.command(brief="some old fooz command..")
async def fooz(self, ctx, *, args=None):
if not args:
await ctx.send("success")
if args:
await ctx.send("didn't use it properly :(")
@commands.command(brief="puts the message time as a timestamp")
async def message_time(self, ctx):
embed = discord.Embed(title="Message Time", color=random.randint(0, 16777215), timestamp=ctx.message.created_at)
embed.set_footer(text=f"{ctx.message.id}")
await ctx.send(content=f"Only here cause JDJG Bot has it and why not have it here now.", embed=embed)
@commands.command(brief="converts info about colors for you.", invoke_without_command=True)
async def color(self, ctx, *, color: utils.ColorConverter = None):
if not color:
return await ctx.send("you need to give me a color to use.")
await ctx.send(f"Hexadecimal: {color} \nValue : {color.value} \nRGB: {color.to_rgb()}")
@commands.command(brief="a command that tells a user creation time.")
async def created_at(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
creation_info = f"{discord.utils.format_dt(user.created_at, style = "d")}\n{discord.utils.format_dt(user.created_at, style = "T")}"
await ctx.send(
f"\nName : {user}\nMention : {user.mention} was created:\n{creation_info}\nRaw Version: ```{creation_info}```",
allowed_mentions=discord.AllowedMentions.none(),
)
@commands.command(brief="a command that makes a fake user id based on the current time.")
async def fake_user_id(self, ctx):
await ctx.send(f"User id: {utils.generate_snowflake()}")
@commands.command(brief="gives information on snowflakes")
async def snowflake_info(self, ctx, *, snowflake: typing.Optional[utils.ObjectPlus] = None):
if not snowflake:
await ctx.send(
"you either returned nothing or an invalid snowflake now going to the current time for information."
)
# change objectplus convert back to the before(discord.Object), same thing with utls.ObjectPlus, if edpy adds my pull request into the master.
generated_time = await utils.ObjectPlusConverter().convert(ctx, argument=f"{int(utils.generate_snowflake())}")
snowflake = snowflake or generated_time
embed = discord.Embed(title="❄️ SnowFlake Info:", color=5793266)
embed.add_field(
name="Created At:",
value=f"{discord.utils.format_dt(snowflake.created_at, style = "d")}\n{discord.utils.format_dt(snowflake.created_at, style = "T")}",
)
embed.add_field(name="Worker ID:", value=f"{snowflake.worker_id}")
embed.add_field(name="Process ID:", value=f"{snowflake.process_id}")
embed.add_field(name="Increment:", value=f"{snowflake.increment_id}")
embed.set_footer(text=f"Snowflake ID: {snowflake.id}")
await ctx.send(embed=embed)
@commands.command(brief="Generates a fake token from the current time")
async def fake_token(self, ctx):
object = discord.Object(utils.generate_snowflake())
first_encoded = base64.b64encode(f"{object.id}".encode())
first_bit = first_encoded.decode()
timestamp = int(object.created_at.timestamp() - 129384000)
d = timestamp.to_bytes(4, "big")
second_bit_encoded = base64.standard_b64encode(d)
second_bit = second_bit_encoded.decode().rstrip("==")
last_bit = secrets.token_urlsafe(20)
embed = discord.Embed(
title=f"Newly Generated Fake Token",
description=f"ID: ``{object.id}``\nCreated at : \n{discord.utils.format_dt(object.created_at, style = "d")}\n{discord.utils.format_dt(object.created_at, style = "T")}",
)
embed.add_field(name="Generated Token:", value=f"``{first_bit}.{second_bit}.{last_bit}``")
embed.set_thumbnail(url=ctx.author.display_avatar.url)
embed.set_footer(text=f"Requested by {ctx.author}")
await ctx.send("We generated a fake token :clap::", embed=embed)
@commands.cooldown(1, 60, BucketType.user)
@commands.command(brief="makes a request to add a bot to the test guild")
async def addbot(self, ctx, *, user: typing.Optional[discord.User] = None):
user = user or ctx.author
if not user.bot:
ctx.command.reset_cooldown(ctx)
return await ctx.send("Please Use A **Bot** ID, not a **User** ID.")
modal = utils.AddBotView(ctx, timeout=180.0)
message = await ctx.send("Please Tell us the reason you want to add your bot to the Test Guild:", view=modal)
await modal.wait()
if modal.value is None:
ctx.command.reset_cooldown(ctx)
return await message.edit("Provide a reason why you want your bot added to your guild")
guild = self.bot.get_guild(438848185008390158)
member = await self.bot.try_member(guild, ctx.author.id)
if member is None:
view = discord.ui.View()
view.add_item(
discord.ui.Button(
label=f"Test Guild Invite",
url="https://discord.gg/hKn8qgCDzK",
style=discord.ButtonStyle.link,
row=1,
)
)
return await message.edit(
"Make sure to join the guild linked soon... then rerun the command. If you are in the guild contact the owner(the owner is listed in the owner command)",
view=view,
)
embed = discord.Embed(
title="Bot Request",
colour=discord.Colour.blurple(),
description=f"reason: \n{modal.value}\n\n[Invite URL]({discord.utils.oauth_url(client_id = user.id, scopes=("bot",))})",
timestamp=ctx.message.created_at,
)
embed.add_field(name="Author", value=f"{ctx.author} (ID: {ctx.author.id})", inline=False)
embed.add_field(name="Bot", value=f"{user} (ID: {user.id})", inline=False)
embed.set_footer(text=ctx.author.id)
embed.set_author(name=user.id, icon_url=user.display_avatar.with_format("png"))
jdjg = self.bot.get_user(168422909482762240)
benitz = self.bot.get_user(529499034495483926)
await self.bot.get_channel(816807453215424573).send(content=f"{jdjg.mention} {benitz.mention}", embed=embed)
await ctx.reply(
f"It appears adding your bot worked. \nIf you leave your bot will be kicked, unless you have an alt there, a friend, etc. \n(It will be kicked to prevent raiding and taking up guild space if you leave). \nYour bot will be checked out. {jdjg} will then determine if your bot is good to add to the guild. Make sure to open your Dms to JDJG, so he can dm you about the bot being added. \nIf you don't add him, your bot will be denied."
)
@commands.command(
brief="a command that takes a url and sees if it's an image (requires embed permissions at the moment)."
)
async def image_check(self, ctx):
await ctx.send(
"Please wait for discord to edit your message, if it does error about not a valid image, please send a screenshot of your usage and the bot's message."
)
await asyncio.sleep(5)
images = list(filter(lambda e: e.type == "image", ctx.message.embeds))
if not images or not ctx.message.embeds:
return await ctx.send(
"you need to pass a url with an image, if you did, then please run again. This is a discord issue, and I do not want to wait for discord to change its message."
)
await ctx.send(f"You have {len(images)} / {len(ctx.message.embeds)} links that are valid images.")
@commands.command(brief="Gives info on npm packages")
async def npm(self, ctx, *, args=None):
if args:
npm_response = await self.bot.session.get(f"https://registry.npmjs.com/{args}")
if npm_response.ok:
npm_response = await npm_response.json()
data = utils.get_required_npm(npm_response)
await ctx.send(embed=utils.npm_create_embed(data))
else:
await ctx.send(
f"Could not find package **{args}** on npm.", allowed_mentions=discord.AllowedMentions.none()
)
else:
await ctx.send("Please look for a library to get the info of.")
@commands.cooldown(1, 30, BucketType.user)
@commands.command(
brief="runs some code in a sandbox(based on Soos's Run command)", aliases=["eval", "run", "sandbox"]
)
async def console(self, ctx, *, code: codeblock_converter = None):
if not code:
return await ctx.send("You need to give me some code to use, otherwise I can not determine what it is.")
if not code.language:
return await ctx.send("You Must provide a language to use")
if not code.content:
return await ctx.send("No code provided")
tio = await async_tio.Tio(session=self.bot.session)
output = await tio.execute(f"{code.content}", language=f"{code.language}")
text_returned = (
f"```{code.language}\n{output}```"
if len(f"{output}") < 200
else await utils.post(self.bot, code=f"{output}")
)
embed = discord.Embed(
title=f"Your code exited with code {output.exit_status}", description=f"{text_returned}", color=242424
)
embed.set_author(name=f"{ctx.author}", icon_url=ctx.author.display_avatar.url)
embed.set_footer(text="Powered by Tio.run")
await ctx.send(content="I executed your code in a sandbox", embed=embed)
async def setup(bot):
await bot.add_cog(Info(bot))
await bot.add_cog(DevTools(bot))
| from discord.ext import commands
import re
import discord
import random
import typing
import emoji
import unicodedata
import textwrap
import contextlib
import io
import asyncio
import async_tio
import itertools
import os
import base64
import secrets
import utils
from difflib import SequenceMatcher
from discord.ext.commands.cooldowns import BucketType
from jishaku.codeblocks import codeblock_converter
import functools
class Info(commands.Cog):
"Gives you Information about data you are allowed to access"
def __init__(self, bot):
self.bot = bot
@commands.command(
help="gives you info about a guild",
aliases=[
"server_info",
"guild_fetch",
"guild_info",
"fetch_guild",
"guildinfo",
],
)
async def serverinfo(self, ctx, *, guild: typing.Optional[discord.Guild] = None):
guild = guild or ctx.guild
if guild is None:
await ctx.send("Guild wanted has not been found")
if guild:
await utils.guildinfo(ctx, guild)
@commands.command(
aliases=["user_info", "user-info", "ui", "whois"],
brief="a command that gives information on users",
help="this can work with mentions, ids, usernames, and even full names.",
)
async def userinfo(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
user_type = "Bot" if user.bot else "User" if isinstance(user, discord.User) else "Member"
statuses = []
badges = [utils.profile_converter("badges", f) for f in user.public_flags.all()] if user.public_flags else []
if user.bot:
badges.append(utils.profile_converter("badges", "bot"))
if user.system:
badges.append(utils.profile_converter("badges", "system"))
if isinstance(user, discord.Member):
nickname = user.nick
joined_guild = f"{discord.utils.format_dt(user.joined_at, style = 'd')}\n{discord.utils.format_dt(user.joined_at, style = 'T')}"
highest_role = user.top_role
for name, status in (
("Status", user.status),
("Desktop", user.desktop_status),
("Mobile", user.mobile_status),
("Web", user.web_status),
):
statuses.append((name, utils.profile_converter(name.lower(), status)))
else:
nickname = "None Found"
joined_guild = "N/A"
highest_role = "None Found"
member = discord.utils.find(lambda member: member.id == user.id, self.bot.get_all_members())
if member:
for name, status in (
("Status", member.status),
("Desktop", member.desktop_status),
("Mobile", member.mobile_status),
("Web", member.web_status),
):
statuses.append((name, utils.profile_converter(name.lower(), status)))
embed = discord.Embed(title=f"{user}", color=random.randint(0, 16777215), timestamp=ctx.message.created_at)
embed.add_field(
name="User Info: ",
value=f"**Username**: {user.name} \n**Discriminator**: {user.discriminator} \n**ID**: {user.id}",
inline=False,
)
join_badges: str = "\u0020".join(badges) if badges else "N/A"
join_statuses = (
" \n| ".join(f"**{name}**: {value}" for name, value in statuses) if statuses else "**Status**: \nUnknown"
)
embed.add_field(
name="User Info 2:",
value=f"Type: {user_type} \nBadges: {join_badges} \n**Joined Discord**: {discord.utils.format_dt(user.created_at, style = 'd')}\n{discord.utils.format_dt(user.created_at, style = 'T')}\n {join_statuses}",
inline=False,
)
embed.add_field(
name="Guild Info:",
value=f"**Joined Guild**: {joined_guild} \n**Nickname**: {nickname} \n**Highest Role:** {highest_role}",
inline=False,
)
embed.set_image(url=user.display_avatar.url)
guilds_list = utils.grab_mutualguilds(ctx, user)
pag = commands.Paginator(prefix="", suffix="")
for g in guilds_list:
pag.add_line(f"{g}")
pages = pag.pages or ["None"]
if ctx.author.dm_channel is None:
await ctx.author.create_dm()
menu = utils.MutualGuildsEmbed(pages, ctx=ctx, disable_after=True)
view = utils.UserInfoSuper(ctx, menu, ctx.author.dm_channel)
await ctx.send(
"Pick a way for Mutual Guilds to be sent to you or not if you really don't the mutualguilds",
embed=embed,
view=view,
)
@commands.command(brief="uploads your emojis into a Senarc Bin link")
async def look_at(self, ctx):
if isinstance(ctx.message.channel, discord.TextChannel):
message_emojis = ""
for x in ctx.guild.emojis:
message_emojis = message_emojis + " " + str(x) + "\n"
paste = await utils.post(self.bot, message_emojis)
await ctx.send(paste)
if isinstance(ctx.channel, discord.DMChannel):
await ctx.send("We can't use that in DMS as it takes emoji regex and puts it into a paste.")
@commands.command(help="gives the id of the current guild or DM if you are in one.")
async def guild_get(self, ctx):
if isinstance(ctx.channel, discord.TextChannel):
await ctx.send(content=ctx.guild.id)
if isinstance(ctx.channel, discord.DMChannel):
await ctx.send(ctx.channel.id)
@commands.command(brief="a command to tell you the channel id", aliases=["GetChannelId"])
async def this(self, ctx):
await ctx.send(ctx.channel.id)
@commands.command(brief="Gives you mention info don't abuse(doesn't mention tho)")
async def mention(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
await ctx.send(
f"Discord Mention: {user.mention} \nRaw Mention: {discord.utils.escape_mentions(user.mention)}",
allowed_mentions=discord.AllowedMentions.none(),
)
@commands.cooldown(1, 30, BucketType.user)
@commands.command(help="fetch invite details")
async def fetch_invite(self, ctx, *invites: typing.Union[discord.Invite, str]):
if invites:
menu = utils.InviteInfoEmbed(invites, ctx=ctx, delete_after=True)
await menu.send()
if not invites:
await ctx.send("Please get actual invites to attempt grab")
ctx.command.reset_cooldown(ctx)
if len(invites) > 50:
await ctx.send(
"Reporting using more than 50 invites in this command. This is to prevent ratelimits with the api."
)
jdjg = await self.bot.try_user(168422909482762240)
await self.bot.get_channel(855217084710912050).send(
f"{jdjg.mention}.\n{ctx.author} causes a ratelimit issue with {len(invites)} invites"
)
@commands.command(brief="gives info about a file")
async def file(self, ctx):
if not ctx.message.attachments:
await ctx.send(ctx.message.attachments)
await ctx.send("no file submitted")
if ctx.message.attachments:
embed = discord.Embed(title="Attachment info", color=random.randint(0, 16777215))
for a in ctx.message.attachments:
embed.add_field(name=f"ID: {a.id}", value=f"[{a.filename}]({a.url})")
embed.set_footer(text="Check on the url/urls to get a direct download to the url.")
await ctx.send(embed=embed, content="\nThat's good")
@commands.command(
brief="a command to get the avatar of a user",
help="using the userinfo technology it now powers avatar grabbing.",
aliases=["pfp", "av"],
)
async def avatar(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
embed = discord.Embed(color=random.randint(0, 16777215))
embed.set_author(name=f"{user.name}'s avatar:", icon_url=user.display_avatar.url)
embed.set_image(url=user.display_avatar.url)
embed.set_footer(text=f"Requested by {ctx.author}")
await ctx.send(embed=embed)
@commands.command(brief="this is a way to get the nearest channel.")
async def find_channel(self, ctx, *, args=None):
if args is None:
await ctx.send("Please specify a channel")
if args:
if isinstance(ctx.channel, discord.TextChannel):
channel = discord.utils.get(ctx.guild.channels, name=args)
if channel:
await ctx.send(channel.mention)
if channel is None:
await ctx.send("Unforantely we haven't found anything")
if isinstance(ctx.channel, discord.DMChannel):
await ctx.send("You can't use it in a DM.")
@commands.command(brief="a command to get the closest user.")
async def closest_user(self, ctx, *, args=None):
if args is None:
return await ctx.send("please specify a user")
if args and not self.bot.users:
return await ctx.send("There are no users cached :(")
if args:
userNearest = discord.utils.get(self.bot.users, name=args)
user_nick = discord.utils.get(self.bot.users, display_name=args)
if userNearest is None:
userNearest = sorted(self.bot.users, key=lambda x: SequenceMatcher(None, x.name, args).ratio())[-1]
if user_nick is None:
user_nick = sorted(self.bot.users, key=lambda x: SequenceMatcher(None, x.display_name, args).ratio())[
-1
]
if isinstance(ctx.channel, discord.TextChannel):
member_list = [x for x in ctx.guild.members if x.nick]
nearest_server_nick = sorted(member_list, key=lambda x: SequenceMatcher(None, x.nick, args).ratio())[-1]
if isinstance(ctx.channel, discord.DMChannel):
nearest_server_nick = "You unfortunately don't get the last value(a nickname) as it's a DM."
await ctx.send(f"Username : {userNearest} \nDisplay name : {user_nick} \nNickname: {nearest_server_nick}")
@commands.command(help="gives info on default emoji and custom emojis", name="emoji")
async def emoji_info(self, ctx, *emojis: typing.Union[utils.EmojiConverter, str]):
if emojis:
menu = utils.EmojiInfoEmbed(emojis, ctx=ctx, delete_after=True)
await menu.send()
if not emojis:
await ctx.send("Looks like there was no emojis.")
@commands.command(brief="gives info on emoji_id and emoji image.")
async def emoji_id(
self,
ctx,
*,
emoji: typing.Optional[typing.Union[discord.PartialEmoji, discord.Message, utils.EmojiBasic]] = None,
):
if isinstance(emoji, discord.Message):
emoji_message = emoji.content
emoji = None
with contextlib.suppress(commands.CommandError, commands.BadArgument):
emoji = await utils.EmojiBasic.convert(
ctx, emoji_message
) or await commands.PartialEmojiConverter().convert(ctx, emoji_message)
if emoji:
embed = discord.Embed(description=f" Emoji ID: {emoji.id}", color=random.randint(0, 16777215))
embed.set_image(url=emoji.url)
await ctx.send(embed=embed)
else:
await ctx.send("Not a valid emoji id.")
@commands.command()
async def fetch_content(self, ctx, *, args=None):
if args is None:
await ctx.send("please send actual text")
if args:
args = discord.utils.escape_mentions(args)
args = discord.utils.escape_markdown(args, as_needed=False, ignore_links=False)
for x in ctx.message.mentions:
args = args.replace(x.mention, f"\{x.mention}")
emojis = emoji.emoji_lis(args)
emojis_return = [d["emoji"] for d in emojis]
for x in emojis_return:
args = args.replace(x, f"\{x}")
for x in re.findall(r":\w*:\d*", args):
args = args.replace(x, f"\{x}")
await ctx.send(f"{args}", allowed_mentions=discord.AllowedMentions.none())
@commands.command(brief="gives info about a role.", aliases=["roleinfo"])
async def role_info(self, ctx, *, role: typing.Optional[discord.Role] = None):
if role:
await utils.roleinfo(ctx, role)
if not role:
await ctx.send(f"The role you wanted was not found.")
class DevTools(commands.Cog):
"Helpful commands for developers in general"
def __init__(self, bot):
self.bot = bot
async def rtfm_lookup(self, url=None, *, args=None):
if not args:
return url
else:
res = await self.bot.session.get(
"https://repi.openrobot.xyz/search_docs",
params={"query": args, "documentation": url},
headers={"Authorization": os.environ["frostiweeb_api"]},
)
results = await res.json()
if not results:
return f"Could not find anything with {args}."
else:
return results
async def rtfm_send(self, ctx, results):
if isinstance(results, str):
await ctx.send(results, allowed_mentions=discord.AllowedMentions.none())
else:
embed = discord.Embed(color=random.randint(0, 16777215))
results = dict(itertools.islice(results.items(), 10))
embed.description = "\n".join(f"[`{result}`]({results.get(result)})" for result in results)
reference = utils.reference(ctx.message)
await ctx.send(embed=embed, reference=reference)
@commands.command(
aliases=["rtd", "rtfs", "rtdm"],
invoke_without_command=True,
brief="a rtfm command that allows you to lookup at any library we support looking up(using selects)",
)
async def rtfm(self, ctx, *, args=None):
rtfm_dictionary = await self.bot.db.fetch("SELECT * FROM RTFM_DICTIONARY")
view = utils.RtfmChoice(ctx, rtfm_dictionary, timeout=15.0)
await ctx.send(content="Please Pick a library you want to parse", view=view)
await view.wait()
await ctx.trigger_typing()
results = await self.rtfm_lookup(url=view.value, args=args)
await self.rtfm_send(ctx, results)
def charinfo_converter(self, string):
digit = f"{ord(string):x}"
name = unicodedata.name(string, "The unicode was not found")
return f"`\\U{digit:>08}`: {name} - {string} \N{EM DASH} <http://www.fileformat.info/info/unicode/char/{digit}>"
@commands.command(brief="Gives you data about charinfo (based on R.danny's command)")
async def charinfo(self, ctx, *, args=None):
if not args:
return await ctx.send("That doesn't help out all :(")
values = "\n".join(map(self.charinfo_converter, set(args)))
content = textwrap.wrap(values, width=2000)
menu = utils.charinfoMenu(content, ctx=ctx, delete_after=True)
await menu.send()
@commands.command(brief="a command to view the rtfm DB")
async def rtfm_view(self, ctx):
rtfm_dictionary = dict(await self.bot.db.fetch("SELECT * FROM RTFM_DICTIONARY"))
pag = commands.Paginator(prefix="", suffix="")
for g in rtfm_dictionary:
pag.add_line(f"{g} : {rtfm_dictionary.get(g)}")
menu = utils.RtfmEmbed(pag.pages, ctx=ctx, delete_after=True)
await menu.send()
@commands.command(brief="a command to autoformat your python code to pep8")
async def pep8(self, ctx):
modal = utils.CodeBlockView(ctx, timeout=180.0)
message = await ctx.send(
"Please Submit the Code Block\nDo you want to use black's line formatter at 120 (i.e. black - l120 .), or just use the default? (i.e black .):",
view=modal,
)
await modal.wait()
if not modal.value:
return await ctx.reply("You need to give it code to work with it.", mention_author=False)
code = codeblock_converter(argument=f"{modal.value}")
if modal.value2 is None or modal.value2 is False:
await message.edit("Default it is.", view=None)
if modal.value is True:
await message.edit("Speacil Formatting at 120 lines it is.")
code_conversion = functools.partial(utils.formatter, code.content, bool(modal.value))
try:
code = await self.bot.loop.run_in_executor(None, code_conversion)
except Exception as e:
return await message.edit(f"Error Ocurred with {e}")
embed = discord.Embed(
title="Reformatted with Black",
description=f"code returned: \n```python\n{code}```",
color=random.randint(0, 16777215),
)
embed.set_footer(text="Make sure you use python code, otherwise it will not work properly.")
await message.edit(embed=embed)
@commands.command(brief="grabs your pfp's image")
async def pfp_grab(self, ctx):
if_animated = ctx.author.display_avatar.is_animated()
save_type = ".gif" if if_animated else ".png"
icon_file = await ctx.author.display_avatar.read()
buffer = io.BytesIO(icon_file)
buffer.seek(0)
# print(len(buffer.getvalue()))
file = discord.File(buffer, filename=f"pfp{save_type}")
try:
await ctx.send(content="here's your avatar:", file=file)
except:
await ctx.send("it looks like it couldn't send the pfp due to the file size.")
@commands.command(brief="Gives info on pypi packages")
async def pypi(self, ctx, *, args=None):
# https://pypi.org/simple/
if args:
pypi_response = await self.bot.session.get(f"https://pypi.org/pypi/{args}/json")
if pypi_response.ok:
pypi_response = await pypi_response.json()
pypi_data = pypi_response["info"]
embed = discord.Embed(
title=f"{pypi_data.get('name') or 'None provided'} {pypi_data.get('version') or 'None provided'}",
url=f"{pypi_data.get('release_url') or 'None provided'}",
description=f"{pypi_data.get('summary') or 'None provided'}",
color=random.randint(0, 16777215),
)
embed.set_thumbnail(url="https://i.imgur.com/oP0e7jK.png")
embed.add_field(
name="**Author Info**",
value=f"**Author Name:** {pypi_data.get('author') or 'None provided'}\n**Author Email:** {pypi_data.get('author_email') or 'None provided'}",
inline=False,
)
embed.add_field(
name="**Package Info**",
value=f"**Download URL**: {pypi_data.get('download_url') or 'None provided'}\n**Documentation URL:** {pypi_data.get('docs_url') or 'None provided'}\n**Home Page:** {pypi_data.get('home_page') or 'None provided'}\n**Keywords:** {pypi_data.get('keywords') or 'None provided'}\n**License:** {pypi_data.get('license') or 'None provided'}",
inline=False,
)
await ctx.send(embed=embed)
else:
await ctx.send(
f"Could not find package **{args}** on pypi.", allowed_mentions=discord.AllowedMentions.none()
)
else:
await ctx.send("Please look for a library to get the info of.")
@commands.command(brief="make a quick bot invite with 0 perms")
async def invite_bot(self, ctx, *, user: typing.Optional[discord.User] = None):
user = user or ctx.author
if not user.bot:
return await ctx.send("That's not a legit bot")
invite = discord.utils.oauth_url(client_id=user.id, scopes=("bot",))
slash_invite = discord.utils.oauth_url(client_id=user.id)
view = discord.ui.View()
view.add_item(
discord.ui.Button(label=f"{user.name}'s Normal Invite", url=invite, style=discord.ButtonStyle.link)
)
view.add_item(
discord.ui.Button(
label=f"{user.name}'s Invite With Slash Commands", url=slash_invite, style=discord.ButtonStyle.link
)
)
await ctx.send(f"Invite with slash commands and the bot scope or only with a bot scope:", view=view)
@commands.command(brief="gets you a guild's icon", aliases=["guild_icon"])
async def server_icon(self, ctx, *, guild: typing.Optional[discord.Guild] = None):
guild = guild or ctx.guild
if not guild:
return await ctx.send("no guild to get the icon of.")
await ctx.send(f"{guild.icon.url if guild.icon else 'No Url for This Guild, I am sorry dude :('}")
@commands.command(brief="some old fooz command..")
async def fooz(self, ctx, *, args=None):
if not args:
await ctx.send("success")
if args:
await ctx.send("didn't use it properly :(")
@commands.command(brief="puts the message time as a timestamp")
async def message_time(self, ctx):
embed = discord.Embed(title="Message Time", color=random.randint(0, 16777215), timestamp=ctx.message.created_at)
embed.set_footer(text=f"{ctx.message.id}")
await ctx.send(content=f"Only here cause JDJG Bot has it and why not have it here now.", embed=embed)
@commands.command(brief="converts info about colors for you.", invoke_without_command=True)
async def color(self, ctx, *, color: utils.ColorConverter = None):
if not color:
return await ctx.send("you need to give me a color to use.")
await ctx.send(f"Hexadecimal: {color} \nValue : {color.value} \nRGB: {color.to_rgb()}")
@commands.command(brief="a command that tells a user creation time.")
async def created_at(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
creation_info = f"{discord.utils.format_dt(user.created_at, style = 'd')}\n{discord.utils.format_dt(user.created_at, style = 'T')}"
await ctx.send(
f"\nName : {user}\nMention : {user.mention} was created:\n{creation_info}\nRaw Version: ```{creation_info}```",
allowed_mentions=discord.AllowedMentions.none(),
)
@commands.command(brief="a command that makes a fake user id based on the current time.")
async def fake_user_id(self, ctx):
await ctx.send(f"User id: {utils.generate_snowflake()}")
@commands.command(brief="gives information on snowflakes")
async def snowflake_info(self, ctx, *, snowflake: typing.Optional[utils.ObjectPlus] = None):
if not snowflake:
await ctx.send(
"you either returned nothing or an invalid snowflake now going to the current time for information."
)
# change objectplus convert back to the before(discord.Object), same thing with utls.ObjectPlus, if edpy adds my pull request into the master.
generated_time = await utils.ObjectPlusConverter().convert(ctx, argument=f"{int(utils.generate_snowflake())}")
snowflake = snowflake or generated_time
embed = discord.Embed(title="❄️ SnowFlake Info:", color=5793266)
embed.add_field(
name="Created At:",
value=f"{discord.utils.format_dt(snowflake.created_at, style = 'd')}\n{discord.utils.format_dt(snowflake.created_at, style = 'T')}",
)
embed.add_field(name="Worker ID:", value=f"{snowflake.worker_id}")
embed.add_field(name="Process ID:", value=f"{snowflake.process_id}")
embed.add_field(name="Increment:", value=f"{snowflake.increment_id}")
embed.set_footer(text=f"Snowflake ID: {snowflake.id}")
await ctx.send(embed=embed)
@commands.command(brief="Generates a fake token from the current time")
async def fake_token(self, ctx):
object = discord.Object(utils.generate_snowflake())
first_encoded = base64.b64encode(f"{object.id}".encode())
first_bit = first_encoded.decode()
timestamp = int(object.created_at.timestamp() - 129384000)
d = timestamp.to_bytes(4, "big")
second_bit_encoded = base64.standard_b64encode(d)
second_bit = second_bit_encoded.decode().rstrip("==")
last_bit = secrets.token_urlsafe(20)
embed = discord.Embed(
title=f"Newly Generated Fake Token",
description=f"ID: ``{object.id}``\nCreated at : \n{discord.utils.format_dt(object.created_at, style = 'd')}\n{discord.utils.format_dt(object.created_at, style = 'T')}",
)
embed.add_field(name="Generated Token:", value=f"``{first_bit}.{second_bit}.{last_bit}``")
embed.set_thumbnail(url=ctx.author.display_avatar.url)
embed.set_footer(text=f"Requested by {ctx.author}")
await ctx.send("We generated a fake token :clap::", embed=embed)
@commands.cooldown(1, 60, BucketType.user)
@commands.command(brief="makes a request to add a bot to the test guild")
async def addbot(self, ctx, *, user: typing.Optional[discord.User] = None):
user = user or ctx.author
if not user.bot:
ctx.command.reset_cooldown(ctx)
return await ctx.send("Please Use A **Bot** ID, not a **User** ID.")
modal = utils.AddBotView(ctx, timeout=180.0)
message = await ctx.send("Please Tell us the reason you want to add your bot to the Test Guild:", view=modal)
await modal.wait()
if modal.value is None:
ctx.command.reset_cooldown(ctx)
return await message.edit("Provide a reason why you want your bot added to your guild")
guild = self.bot.get_guild(438848185008390158)
member = await self.bot.try_member(guild, ctx.author.id)
if member is None:
view = discord.ui.View()
view.add_item(
discord.ui.Button(
label=f"Test Guild Invite",
url="https://discord.gg/hKn8qgCDzK",
style=discord.ButtonStyle.link,
row=1,
)
)
return await message.edit(
"Make sure to join the guild linked soon... then rerun the command. If you are in the guild contact the owner(the owner is listed in the owner command)",
view=view,
)
embed = discord.Embed(
title="Bot Request",
colour=discord.Colour.blurple(),
description=f"reason: \n{modal.value}\n\n[Invite URL]({discord.utils.oauth_url(client_id = user.id, scopes=('bot',))})",
timestamp=ctx.message.created_at,
)
embed.add_field(name="Author", value=f"{ctx.author} (ID: {ctx.author.id})", inline=False)
embed.add_field(name="Bot", value=f"{user} (ID: {user.id})", inline=False)
embed.set_footer(text=ctx.author.id)
embed.set_author(name=user.id, icon_url=user.display_avatar.with_format("png"))
jdjg = self.bot.get_user(168422909482762240)
benitz = self.bot.get_user(529499034495483926)
await self.bot.get_channel(816807453215424573).send(content=f"{jdjg.mention} {benitz.mention}", embed=embed)
await ctx.reply(
f"It appears adding your bot worked. \nIf you leave your bot will be kicked, unless you have an alt there, a friend, etc. \n(It will be kicked to prevent raiding and taking up guild space if you leave). \nYour bot will be checked out. {jdjg} will then determine if your bot is good to add to the guild. Make sure to open your Dms to JDJG, so he can dm you about the bot being added. \nIf you don't add him, your bot will be denied."
)
@commands.command(
brief="a command that takes a url and sees if it's an image (requires embed permissions at the moment)."
)
async def image_check(self, ctx):
await ctx.send(
"Please wait for discord to edit your message, if it does error about not a valid image, please send a screenshot of your usage and the bot's message."
)
await asyncio.sleep(5)
images = list(filter(lambda e: e.type == "image", ctx.message.embeds))
if not images or not ctx.message.embeds:
return await ctx.send(
"you need to pass a url with an image, if you did, then please run again. This is a discord issue, and I do not want to wait for discord to change its message."
)
await ctx.send(f"You have {len(images)} / {len(ctx.message.embeds)} links that are valid images.")
@commands.command(brief="Gives info on npm packages")
async def npm(self, ctx, *, args=None):
if args:
npm_response = await self.bot.session.get(f"https://registry.npmjs.com/{args}")
if npm_response.ok:
npm_response = await npm_response.json()
data = utils.get_required_npm(npm_response)
await ctx.send(embed=utils.npm_create_embed(data))
else:
await ctx.send(
f"Could not find package **{args}** on npm.", allowed_mentions=discord.AllowedMentions.none()
)
else:
await ctx.send("Please look for a library to get the info of.")
@commands.cooldown(1, 30, BucketType.user)
@commands.command(
brief="runs some code in a sandbox(based on Soos's Run command)", aliases=["eval", "run", "sandbox"]
)
async def console(self, ctx, *, code: codeblock_converter = None):
if not code:
return await ctx.send("You need to give me some code to use, otherwise I can not determine what it is.")
if not code.language:
return await ctx.send("You Must provide a language to use")
if not code.content:
return await ctx.send("No code provided")
tio = await async_tio.Tio(session=self.bot.session)
output = await tio.execute(f"{code.content}", language=f"{code.language}")
text_returned = (
f"```{code.language}\n{output}```"
if len(f"{output}") < 200
else await utils.post(self.bot, code=f"{output}")
)
embed = discord.Embed(
title=f"Your code exited with code {output.exit_status}", description=f"{text_returned}", color=242424
)
embed.set_author(name=f"{ctx.author}", icon_url=ctx.author.display_avatar.url)
embed.set_footer(text="Powered by Tio.run")
await ctx.send(content="I executed your code in a sandbox", embed=embed)
async def setup(bot):
await bot.add_cog(Info(bot))
await bot.add_cog(DevTools(bot))
|
"""
Automatically document Augmax augmentations
including sample outputs
"""
from docutils import nodes
from sphinx.ext.autodoc.directive import AutodocDirective
import jax
import jax.numpy as jnp
import json
import augmax
from imageio import imread, imwrite
from pathlib import Path
import inspect
SEED = 42
N_IMGS = 3
def generate_images(augmentation, args, kwargs={}, to_float: bool=False):
augname = augmentation.__name__
basedir = Path(__file__).parent.parent
image = imread(basedir / 'teddy.png')
keys = jax.random.split(jax.random.PRNGKey(SEED), N_IMGS)
transform = augmentation(*args, **kwargs)
if to_float:
transform = augmax.Chain(augmax.ByteToFloat(), transform)
transform = jax.jit(jax.vmap(transform, (0, None)))
images = transform(keys, image)
if augname == 'ByteToFloat' or (to_float and augname not in ['Normalize']):
# assert images.min() >= 0.0, f"augmented images.min() = {images.min()}, which should not happen!"
# assert images.max() <= 1.0, f"augmented images.max() = {images.max()}, which should not happen!"
images = (images * 255.0).astype(jnp.uint8)
imgdir = Path(basedir / 'generated_imgs').absolute()
imgdir.mkdir(exist_ok=True)
imgnames = []
for i in range(N_IMGS):
imgname = str(imgdir / f'{augname}_{i}.png')
imwrite(imgname, images[i])
imgnames.append('/' + imgname)
return imgnames
class AutoAugmentation(AutodocDirective):
required_arguments = 1
optional_arguments = 10
def run(self):
# Leverage autoclass for the base documentation.
# To do so, we have to go incognito and change our name... ;)
self.name = 'autoclass'
augname, *args = self.arguments
args = json.loads('[' + ', '.join(args) + ']')
augmentation = getattr(augmax, augname)
self.arguments = [augname]
cls_result = super().run()
to_float = False
if args and args[0] == 'flt':
args = args[1:]
to_float = True
kwargs = {}
if 'p' in inspect.getfullargspec(augmentation)[0]:
kwargs['p'] = 1.0
images = generate_images(augmentation, args, kwargs, to_float=to_float)
figure = nodes.figure(align='center')
for img in images:
figure += nodes.image(uri=img)
argstrings = []
for arg in args:
argstrings.append(str(arg))
for argname, argval in kwargs.items():
argstrings.append(f'{argname}={argval}')
caption = f'Augmentation Examples for {augname}({', '.join(argstrings)})'
figure += nodes.caption(text=caption, align='center')
cls_result.insert(0, nodes.title(text=augname))
cls_result.append(figure)
entry = cls_result[2].children[0]
section = nodes.section('', *cls_result, ids=entry['ids'], names=[augname])
entry['ids'] = []
return [section]
def setup(app):
app.add_directive('autoaug', AutoAugmentation)
| """
Automatically document Augmax augmentations
including sample outputs
"""
from docutils import nodes
from sphinx.ext.autodoc.directive import AutodocDirective
import jax
import jax.numpy as jnp
import json
import augmax
from imageio import imread, imwrite
from pathlib import Path
import inspect
SEED = 42
N_IMGS = 3
def generate_images(augmentation, args, kwargs={}, to_float: bool=False):
augname = augmentation.__name__
basedir = Path(__file__).parent.parent
image = imread(basedir / 'teddy.png')
keys = jax.random.split(jax.random.PRNGKey(SEED), N_IMGS)
transform = augmentation(*args, **kwargs)
if to_float:
transform = augmax.Chain(augmax.ByteToFloat(), transform)
transform = jax.jit(jax.vmap(transform, (0, None)))
images = transform(keys, image)
if augname == 'ByteToFloat' or (to_float and augname not in ['Normalize']):
# assert images.min() >= 0.0, f"augmented images.min() = {images.min()}, which should not happen!"
# assert images.max() <= 1.0, f"augmented images.max() = {images.max()}, which should not happen!"
images = (images * 255.0).astype(jnp.uint8)
imgdir = Path(basedir / 'generated_imgs').absolute()
imgdir.mkdir(exist_ok=True)
imgnames = []
for i in range(N_IMGS):
imgname = str(imgdir / f'{augname}_{i}.png')
imwrite(imgname, images[i])
imgnames.append('/' + imgname)
return imgnames
class AutoAugmentation(AutodocDirective):
required_arguments = 1
optional_arguments = 10
def run(self):
# Leverage autoclass for the base documentation.
# To do so, we have to go incognito and change our name... ;)
self.name = 'autoclass'
augname, *args = self.arguments
args = json.loads('[' + ', '.join(args) + ']')
augmentation = getattr(augmax, augname)
self.arguments = [augname]
cls_result = super().run()
to_float = False
if args and args[0] == 'flt':
args = args[1:]
to_float = True
kwargs = {}
if 'p' in inspect.getfullargspec(augmentation)[0]:
kwargs['p'] = 1.0
images = generate_images(augmentation, args, kwargs, to_float=to_float)
figure = nodes.figure(align='center')
for img in images:
figure += nodes.image(uri=img)
argstrings = []
for arg in args:
argstrings.append(str(arg))
for argname, argval in kwargs.items():
argstrings.append(f'{argname}={argval}')
caption = f'Augmentation Examples for {augname}({", ".join(argstrings)})'
figure += nodes.caption(text=caption, align='center')
cls_result.insert(0, nodes.title(text=augname))
cls_result.append(figure)
entry = cls_result[2].children[0]
section = nodes.section('', *cls_result, ids=entry['ids'], names=[augname])
entry['ids'] = []
return [section]
def setup(app):
app.add_directive('autoaug', AutoAugmentation)
|
import sys
sys.path.append("components/summarizer/pointer-generator")
import components.summarizer.summarizer_utils as sutils
import components.summarizer.story_converter as sconv
import pickle
import nltk.tokenize as tokenize
import os
from nltk.tokenize.moses import MosesDetokenizer
# Define which articles you want to summarize:
articles = [
"http://www.bbc.com/news/business-43967923",
"https://www.theguardian.com/technology/2018/may/02/tesla-loss-model-3-elon-musk",
"https://www.theguardian.com/world/2018/may/03/japan-robot-dogs-get-solemn-buddhist-send-off-at-funerals"
]
# Fetch the articles from the internet, and store them in a pickle:
print("Downloading articles...")
story_data = sutils.fetch_and_pickle_stories(articles, 'data/pickles/raw_stories.pickle', 'data/stories/', False)
print("Downloading articles DONE")
print("-"*100)
# Conver the articles into a format that a the summarizer model can consume
print("Converting articles into binary format for summarization model...")
sconv.process_and_save_to_disk(story_data['stories'], "test.bin", "data/converted_articles")
print("Converting articles into binary format for summarization model DONE")
# Run summarizer model (uncomment different exp_names to run with different models):
DATA_DIR = 'data/'
summarizer_internal_pickle = f"{DATA_DIR}pickles/decoded_stories.pickle"
data_path = f"{DATA_DIR}converted_articles/chunked/test_*"
vocab_path = f"{DATA_DIR}summarizer_training_data/finished_files/vocab"
log_root = f"{DATA_DIR}summarizer_models"
exp_name = "more_coverage"
#exp_name = "no_coverage"
#exp_name = "some_coverage"
sutils.run_summarization_model_decoder(summarizer_internal_pickle, data_path = data_path,
vocab_path = vocab_path, log_root = log_root, exp_name = exp_name)
# Load the results of the summarizer model output:
summarization_output = pickle.load(open(summarizer_internal_pickle, "rb" ))
print("-"*100)
print("-"*100)
print("-"*100)
print("Summaries generated by the neural summarizer - case insensitive:")
for s in summarization_output['summaries']:
print(s+"\n\n")
print("-"*100)
# Attempt to fix lower case to correct upper case:
tokenized_summaries = sutils.try_fix_upper_case_for_summaries(story_data['stories'], summarization_output['summaries_tokens'])
detokenizer = MosesDetokenizer()
detokenized_summaries = []
print("Summaries generated by the neural summarizer - casing fixed:")
for s in tokenized_summaries:
s_detok = detokenizer.detokenize(s, return_str=True)
detokenized_summaries.append(s_detok)
print(s_detok+"\n\n")
print("-"*100)
print("Look at baseline summaries")
print("-"*100)
print("Extractive summaries:\n")
print("-"*100)
for s1 in story_data['summaries_extractive']:
print(s1+"\n\n")
print("-"*100)
print("3 sentence summaries:\n")
print("-"*100)
for s2 in story_data['summaries_3sent']:
print(s2+"\n\n")
print("-"*100)
print("Load NER model")
import components.ner.NERutils as ner
print("-"*100)
all_orgs = []
# Run NER model:
for story in detokenized_summaries:
storyCombined = story.replace('\n', ' ')
print('RUNNING TOKENIZER')
storyTokenized = tokenize.word_tokenize(storyCombined)
print('SPLITTING SENTENCES LINE BY LINE')
split = ner.sentenceSplitter(storyTokenized)
inputFile = open(r'components/ner/input.txt','w')
ner.writeArticle(split,inputFile)
inputFile.close()
print('RUNNING MODEL')
os.system('python2.7 components/ner/tagger-master/tagger.py --model components/ner/tagger-master/models/english/ --input components/ner/input.txt --output components/ner/output.txt')
with open(r'components/ner/output.txt','r') as namedStory:
namedStory=namedStory.read().replace('\n', ' ')
print('NAMED ENTITIES:')
orgs = ner.findNamedEntities(namedStory.split(' '))
all_orgs.append(orgs)
print(orgs)
print("\n\n")
print("-"*100)
print("Results:")
for i, url in enumerate(articles):
print("-"*100)
print(f"{url}\n")
print(f"Neural summary: {detokenized_summaries[i]}\n")
print(f"3 sentence summary: {story_data["summaries_3sent"][i]}\n")
print(f"Organizations: {all_orgs[i]}")
print("-"*100)
print("-"*100)
| import sys
sys.path.append("components/summarizer/pointer-generator")
import components.summarizer.summarizer_utils as sutils
import components.summarizer.story_converter as sconv
import pickle
import nltk.tokenize as tokenize
import os
from nltk.tokenize.moses import MosesDetokenizer
# Define which articles you want to summarize:
articles = [
"http://www.bbc.com/news/business-43967923",
"https://www.theguardian.com/technology/2018/may/02/tesla-loss-model-3-elon-musk",
"https://www.theguardian.com/world/2018/may/03/japan-robot-dogs-get-solemn-buddhist-send-off-at-funerals"
]
# Fetch the articles from the internet, and store them in a pickle:
print("Downloading articles...")
story_data = sutils.fetch_and_pickle_stories(articles, 'data/pickles/raw_stories.pickle', 'data/stories/', False)
print("Downloading articles DONE")
print("-"*100)
# Conver the articles into a format that a the summarizer model can consume
print("Converting articles into binary format for summarization model...")
sconv.process_and_save_to_disk(story_data['stories'], "test.bin", "data/converted_articles")
print("Converting articles into binary format for summarization model DONE")
# Run summarizer model (uncomment different exp_names to run with different models):
DATA_DIR = 'data/'
summarizer_internal_pickle = f"{DATA_DIR}pickles/decoded_stories.pickle"
data_path = f"{DATA_DIR}converted_articles/chunked/test_*"
vocab_path = f"{DATA_DIR}summarizer_training_data/finished_files/vocab"
log_root = f"{DATA_DIR}summarizer_models"
exp_name = "more_coverage"
#exp_name = "no_coverage"
#exp_name = "some_coverage"
sutils.run_summarization_model_decoder(summarizer_internal_pickle, data_path = data_path,
vocab_path = vocab_path, log_root = log_root, exp_name = exp_name)
# Load the results of the summarizer model output:
summarization_output = pickle.load(open(summarizer_internal_pickle, "rb" ))
print("-"*100)
print("-"*100)
print("-"*100)
print("Summaries generated by the neural summarizer - case insensitive:")
for s in summarization_output['summaries']:
print(s+"\n\n")
print("-"*100)
# Attempt to fix lower case to correct upper case:
tokenized_summaries = sutils.try_fix_upper_case_for_summaries(story_data['stories'], summarization_output['summaries_tokens'])
detokenizer = MosesDetokenizer()
detokenized_summaries = []
print("Summaries generated by the neural summarizer - casing fixed:")
for s in tokenized_summaries:
s_detok = detokenizer.detokenize(s, return_str=True)
detokenized_summaries.append(s_detok)
print(s_detok+"\n\n")
print("-"*100)
print("Look at baseline summaries")
print("-"*100)
print("Extractive summaries:\n")
print("-"*100)
for s1 in story_data['summaries_extractive']:
print(s1+"\n\n")
print("-"*100)
print("3 sentence summaries:\n")
print("-"*100)
for s2 in story_data['summaries_3sent']:
print(s2+"\n\n")
print("-"*100)
print("Load NER model")
import components.ner.NERutils as ner
print("-"*100)
all_orgs = []
# Run NER model:
for story in detokenized_summaries:
storyCombined = story.replace('\n', ' ')
print('RUNNING TOKENIZER')
storyTokenized = tokenize.word_tokenize(storyCombined)
print('SPLITTING SENTENCES LINE BY LINE')
split = ner.sentenceSplitter(storyTokenized)
inputFile = open(r'components/ner/input.txt','w')
ner.writeArticle(split,inputFile)
inputFile.close()
print('RUNNING MODEL')
os.system('python2.7 components/ner/tagger-master/tagger.py --model components/ner/tagger-master/models/english/ --input components/ner/input.txt --output components/ner/output.txt')
with open(r'components/ner/output.txt','r') as namedStory:
namedStory=namedStory.read().replace('\n', ' ')
print('NAMED ENTITIES:')
orgs = ner.findNamedEntities(namedStory.split(' '))
all_orgs.append(orgs)
print(orgs)
print("\n\n")
print("-"*100)
print("Results:")
for i, url in enumerate(articles):
print("-"*100)
print(f"{url}\n")
print(f"Neural summary: {detokenized_summaries[i]}\n")
print(f"3 sentence summary: {story_data['summaries_3sent'][i]}\n")
print(f"Organizations: {all_orgs[i]}")
print("-"*100)
print("-"*100)
|
import logging
from concurrent.futures import ThreadPoolExecutor
import pytest
from functools import partial
from ocs_ci.framework.testlib import ManageTest, tier4, tier4c
from ocs_ci.ocs import constants
from ocs_ci.ocs.resources.pod import (
get_mds_pods,
get_mon_pods,
get_mgr_pods,
get_osd_pods,
get_plugin_pods,
get_rbdfsplugin_provisioner_pods,
get_cephfsplugin_provisioner_pods,
get_operator_pods,
)
from ocs_ci.utility.utils import TimeoutSampler
from ocs_ci.helpers import helpers, disruption_helpers
log = logging.getLogger(__name__)
@tier4
@tier4c
@pytest.mark.parametrize(
argnames=["interface", "resource_name"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, "mgr"], marks=pytest.mark.polarion_id("OCS-1135")
),
pytest.param(
*[constants.CEPHBLOCKPOOL, "mon"], marks=pytest.mark.polarion_id("OCS-1121")
),
pytest.param(
*[constants.CEPHBLOCKPOOL, "osd"], marks=pytest.mark.polarion_id("OCS-1128")
),
pytest.param(
*[constants.CEPHFILESYSTEM, "mgr"],
marks=pytest.mark.polarion_id("OCS-1107"),
),
pytest.param(
*[constants.CEPHFILESYSTEM, "mon"],
marks=pytest.mark.polarion_id("OCS-1094"),
),
pytest.param(
*[constants.CEPHFILESYSTEM, "osd"],
marks=pytest.mark.polarion_id("OCS-1100"),
),
pytest.param(
*[constants.CEPHFILESYSTEM, "mds"],
marks=pytest.mark.polarion_id("OCS-1114"),
),
],
)
class TestDaemonKillDuringCreationOperations(ManageTest):
"""
This class consists of tests which verifies ceph daemon kill during
multiple operations - pods creation, PVC creation and IO
"""
num_of_pvcs = 6
pvc_size = 5
@pytest.fixture()
def setup(self, interface, multi_pvc_factory, pod_factory):
"""
Create PVCs and pods
"""
access_modes = [constants.ACCESS_MODE_RWO]
if interface == constants.CEPHFILESYSTEM:
access_modes.append(constants.ACCESS_MODE_RWX)
# Modify access_modes list to create rbd `block` type volume with
# RWX access mode. RWX is not supported in filesystem type rbd
if interface == constants.CEPHBLOCKPOOL:
access_modes.extend(
[
f"{constants.ACCESS_MODE_RWO}-Block",
f"{constants.ACCESS_MODE_RWX}-Block",
]
)
pvc_objs = multi_pvc_factory(
interface=interface,
project=None,
storageclass=None,
size=self.pvc_size,
access_modes=access_modes,
status=constants.STATUS_BOUND,
num_of_pvc=self.num_of_pvcs,
wait_each=False,
)
# Set volume mode on PVC objects
for pvc_obj in pvc_objs:
pvc_info = pvc_obj.get()
setattr(pvc_obj, "volume_mode", pvc_info["spec"]["volumeMode"])
rwo_pvcs = [
pvc_obj
for pvc_obj in pvc_objs
if (pvc_obj.access_mode == constants.ACCESS_MODE_RWO)
]
rwx_pvcs = [
pvc_obj
for pvc_obj in pvc_objs
if (pvc_obj.access_mode == constants.ACCESS_MODE_RWX)
]
num_of_rwo_pvc = len(rwo_pvcs)
num_of_rwx_pvc = len(rwx_pvcs)
block_rwo_pvcs = []
for pvc_obj in rwo_pvcs[:]:
if pvc_obj.volume_mode == "Block":
block_rwo_pvcs.append(pvc_obj)
rwo_pvcs.remove(pvc_obj)
log.info(
f"Created {num_of_rwo_pvc} RWO PVCs in which "
f"{len(block_rwo_pvcs)} are rbd block type."
)
log.info(f"Created {num_of_rwx_pvc} RWX PVCs.")
# Select 3 PVCs for IO pods and the remaining PVCs to create new pods
if block_rwo_pvcs:
pvc_objs_for_io_pods = rwo_pvcs[0:1] + rwx_pvcs[0:1] + block_rwo_pvcs[0:1]
pvc_objs_new_pods = rwo_pvcs[1:] + rwx_pvcs[1:] + block_rwo_pvcs[1:]
else:
pvc_objs_for_io_pods = rwo_pvcs[0:2] + rwx_pvcs[0:1]
pvc_objs_new_pods = rwo_pvcs[2:] + rwx_pvcs[1:]
# Create one pod using each RWO PVC and two pods using each RWX PVC
# for running IO
io_pods = helpers.create_pods(pvc_objs_for_io_pods, pod_factory, interface, 2)
# Wait for pods to be in Running state
for pod_obj in io_pods:
helpers.wait_for_resource_state(
resource=pod_obj, state=constants.STATUS_RUNNING, timeout=90
)
pod_obj.reload()
log.info(f"Created {len(io_pods)} pods for running IO.")
return pvc_objs, io_pods, pvc_objs_new_pods, access_modes
def test_daemon_kill_during_pvc_pod_creation_and_io(
self, interface, resource_name, setup, multi_pvc_factory, pod_factory
):
"""
Kill 'resource_name' daemon while PVCs creation, pods
creation and IO operation are progressing.
"""
num_of_new_pvcs = 5
pvc_objs, io_pods, pvc_objs_new_pods, access_modes = setup
proj_obj = pvc_objs[0].project
storageclass = pvc_objs[0].storageclass
pod_functions = {
"mds": partial(get_mds_pods),
"mon": partial(get_mon_pods),
"mgr": partial(get_mgr_pods),
"osd": partial(get_osd_pods),
"rbdplugin": partial(get_plugin_pods, interface=interface),
"cephfsplugin": partial(get_plugin_pods, interface=interface),
"cephfsplugin_provisioner": partial(get_cephfsplugin_provisioner_pods),
"rbdplugin_provisioner": partial(get_rbdfsplugin_provisioner_pods),
"operator": partial(get_operator_pods),
}
executor = ThreadPoolExecutor(max_workers=len(io_pods))
disruption = disruption_helpers.Disruptions()
disruption.set_resource(resource=resource_name)
# Get number of pods of type 'resource_name'
resource_pods_num = len(pod_functions[resource_name]())
# Do setup for running IO on pods
log.info("Setting up pods for running IO")
for pod_obj in io_pods:
if pod_obj.pvc.volume_mode == "Block":
storage_type = "block"
else:
storage_type = "fs"
executor.submit(pod_obj.workload_setup, storage_type=storage_type)
# Wait for setup on pods to complete
for pod_obj in io_pods:
log.info(f"Waiting for IO setup to complete on pod {pod_obj.name}")
for sample in TimeoutSampler(360, 2, getattr, pod_obj, "wl_setup_done"):
if sample:
log.info(
f"Setup for running IO is completed on pod " f"{pod_obj.name}."
)
break
log.info("Setup for running IO is completed on pods")
# Set daemon to be killed
disruption.select_daemon()
# Start creating new pods
log.info("Start creating new pods.")
bulk_pod_create = executor.submit(
helpers.create_pods, pvc_objs_new_pods, pod_factory, interface, 2
)
# Start creation of new PVCs
log.info("Start creating new PVCs.")
bulk_pvc_create = executor.submit(
multi_pvc_factory,
interface=interface,
project=proj_obj,
storageclass=storageclass,
size=self.pvc_size,
access_modes=access_modes,
access_modes_selection="distribute_random",
status="",
num_of_pvc=num_of_new_pvcs,
wait_each=False,
)
# Start IO on each pod
log.info("Start IO on pods")
for pod_obj in io_pods:
if pod_obj.pvc.volume_mode == "Block":
storage_type = "block"
else:
storage_type = "fs"
pod_obj.run_io(
storage_type=storage_type,
size="1G",
runtime=10,
fio_filename=f"{pod_obj.name}_io_file1",
)
log.info("IO started on all pods.")
# Kill daemon
disruption.kill_daemon()
# Getting result of PVC creation as list of PVC objects
pvc_objs_new = bulk_pvc_create.result()
# Confirm PVCs are Bound
for pvc_obj in pvc_objs_new:
helpers.wait_for_resource_state(
resource=pvc_obj, state=constants.STATUS_BOUND, timeout=180
)
pvc_obj.reload()
log.info("Verified: New PVCs are Bound.")
# Getting result of pods creation as list of Pod objects
pod_objs_new = bulk_pod_create.result()
# Verify new pods are Running
for pod_obj in pod_objs_new:
helpers.wait_for_resource_state(
resource=pod_obj, state=constants.STATUS_RUNNING, timeout=90
)
pod_obj.reload()
log.info("Verified: All new pods are Running.")
# Verify IO
log.info("Fetching IO results from IO pods.")
for pod_obj in io_pods:
fio_result = pod_obj.get_fio_results()
err_count = fio_result.get("jobs")[0].get("error")
assert (
err_count == 0
), f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
log.info(f"IOPs after FIO on pod {pod_obj.name}:")
log.info(f"Read: {fio_result.get("jobs")[0].get("read").get("iops")}")
log.info(f"Write: {fio_result.get("jobs")[0].get("write").get("iops")}")
log.info("Verified IO result on IO pods.")
all_pod_objs = io_pods + pod_objs_new
# Fetch volume details from pods for the purpose of verification
node_pv_dict = {}
for pod in all_pod_objs:
pod_info = pod.get()
node = pod_info["spec"]["nodeName"]
pvc = pod_info["spec"]["volumes"][0]["persistentVolumeClaim"]["claimName"]
for pvc_obj in pvc_objs:
if pvc_obj.name == pvc:
pvc_obj.reload()
pv = pvc_obj.backed_pv
break
if node in node_pv_dict:
node_pv_dict[node].append(pv)
else:
node_pv_dict[node] = [pv]
# Delete pods
for pod_obj in all_pod_objs:
pod_obj.delete(wait=False)
# Verify pods are deleted
for pod_obj in all_pod_objs:
pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
# Verify number of 'resource_name' type pods
final_resource_pods_num = len(pod_functions[resource_name]())
assert final_resource_pods_num == resource_pods_num, (
f"Total number of {resource_name} pods is not matching with "
f"initial value. Total number of pods before daemon kill: "
f"{resource_pods_num}. Total number of pods present now: "
f"{final_resource_pods_num}"
)
# Verify volumes are unmapped from nodes after deleting the pods
node_pv_mounted = helpers.verify_pv_mounted_on_node(node_pv_dict)
for node, pvs in node_pv_mounted.items():
assert not pvs, (
f"PVs {pvs} is still present on node {node} after "
f"deleting the pods."
)
log.info(
"Verified: mount points are removed from nodes after deleting " "the pods"
)
# Set volume mode on PVC objects
for pvc_obj in pvc_objs_new:
pvc_info = pvc_obj.get()
setattr(pvc_obj, "volume_mode", pvc_info["spec"]["volumeMode"])
# Verify that PVCs are reusable by creating new pods
all_pvc_objs = pvc_objs + pvc_objs_new
pod_objs_re = helpers.create_pods(all_pvc_objs, pod_factory, interface, 2)
# Verify pods are Running
for pod_obj in pod_objs_re:
helpers.wait_for_resource_state(
resource=pod_obj, state=constants.STATUS_RUNNING, timeout=90
)
pod_obj.reload()
log.info("Successfully created new pods using all PVCs.")
# Select pods from newly created pods list to run IO
pod_objs_re_io = [
pod_obj
for pod_obj in pod_objs_re
if pod_obj.pvc
in helpers.select_unique_pvcs([pod_obj.pvc for pod_obj in pod_objs_re])
]
for pod_obj in pod_objs_re_io:
if pod_obj.pvc.volume_mode == "Block":
storage_type = "block"
else:
storage_type = "fs"
pod_obj.run_io(
storage_type=storage_type,
size="1G",
runtime=10,
fio_filename=f"{pod_obj.name}_io_file2",
)
log.info("Fetching IO results from newly created pods")
for pod_obj in pod_objs_re_io:
fio_result = pod_obj.get_fio_results()
err_count = fio_result.get("jobs")[0].get("error")
assert (
err_count == 0
), f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
log.info(f"IOPs after FIO on pod {pod_obj.name}:")
log.info(f"Read: {fio_result.get("jobs")[0].get("read").get("iops")}")
log.info(f"Write: {fio_result.get("jobs")[0].get("write").get("iops")}")
log.info("Verified IO result on newly created pods.")
| import logging
from concurrent.futures import ThreadPoolExecutor
import pytest
from functools import partial
from ocs_ci.framework.testlib import ManageTest, tier4, tier4c
from ocs_ci.ocs import constants
from ocs_ci.ocs.resources.pod import (
get_mds_pods,
get_mon_pods,
get_mgr_pods,
get_osd_pods,
get_plugin_pods,
get_rbdfsplugin_provisioner_pods,
get_cephfsplugin_provisioner_pods,
get_operator_pods,
)
from ocs_ci.utility.utils import TimeoutSampler
from ocs_ci.helpers import helpers, disruption_helpers
log = logging.getLogger(__name__)
@tier4
@tier4c
@pytest.mark.parametrize(
argnames=["interface", "resource_name"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, "mgr"], marks=pytest.mark.polarion_id("OCS-1135")
),
pytest.param(
*[constants.CEPHBLOCKPOOL, "mon"], marks=pytest.mark.polarion_id("OCS-1121")
),
pytest.param(
*[constants.CEPHBLOCKPOOL, "osd"], marks=pytest.mark.polarion_id("OCS-1128")
),
pytest.param(
*[constants.CEPHFILESYSTEM, "mgr"],
marks=pytest.mark.polarion_id("OCS-1107"),
),
pytest.param(
*[constants.CEPHFILESYSTEM, "mon"],
marks=pytest.mark.polarion_id("OCS-1094"),
),
pytest.param(
*[constants.CEPHFILESYSTEM, "osd"],
marks=pytest.mark.polarion_id("OCS-1100"),
),
pytest.param(
*[constants.CEPHFILESYSTEM, "mds"],
marks=pytest.mark.polarion_id("OCS-1114"),
),
],
)
class TestDaemonKillDuringCreationOperations(ManageTest):
"""
This class consists of tests which verifies ceph daemon kill during
multiple operations - pods creation, PVC creation and IO
"""
num_of_pvcs = 6
pvc_size = 5
@pytest.fixture()
def setup(self, interface, multi_pvc_factory, pod_factory):
"""
Create PVCs and pods
"""
access_modes = [constants.ACCESS_MODE_RWO]
if interface == constants.CEPHFILESYSTEM:
access_modes.append(constants.ACCESS_MODE_RWX)
# Modify access_modes list to create rbd `block` type volume with
# RWX access mode. RWX is not supported in filesystem type rbd
if interface == constants.CEPHBLOCKPOOL:
access_modes.extend(
[
f"{constants.ACCESS_MODE_RWO}-Block",
f"{constants.ACCESS_MODE_RWX}-Block",
]
)
pvc_objs = multi_pvc_factory(
interface=interface,
project=None,
storageclass=None,
size=self.pvc_size,
access_modes=access_modes,
status=constants.STATUS_BOUND,
num_of_pvc=self.num_of_pvcs,
wait_each=False,
)
# Set volume mode on PVC objects
for pvc_obj in pvc_objs:
pvc_info = pvc_obj.get()
setattr(pvc_obj, "volume_mode", pvc_info["spec"]["volumeMode"])
rwo_pvcs = [
pvc_obj
for pvc_obj in pvc_objs
if (pvc_obj.access_mode == constants.ACCESS_MODE_RWO)
]
rwx_pvcs = [
pvc_obj
for pvc_obj in pvc_objs
if (pvc_obj.access_mode == constants.ACCESS_MODE_RWX)
]
num_of_rwo_pvc = len(rwo_pvcs)
num_of_rwx_pvc = len(rwx_pvcs)
block_rwo_pvcs = []
for pvc_obj in rwo_pvcs[:]:
if pvc_obj.volume_mode == "Block":
block_rwo_pvcs.append(pvc_obj)
rwo_pvcs.remove(pvc_obj)
log.info(
f"Created {num_of_rwo_pvc} RWO PVCs in which "
f"{len(block_rwo_pvcs)} are rbd block type."
)
log.info(f"Created {num_of_rwx_pvc} RWX PVCs.")
# Select 3 PVCs for IO pods and the remaining PVCs to create new pods
if block_rwo_pvcs:
pvc_objs_for_io_pods = rwo_pvcs[0:1] + rwx_pvcs[0:1] + block_rwo_pvcs[0:1]
pvc_objs_new_pods = rwo_pvcs[1:] + rwx_pvcs[1:] + block_rwo_pvcs[1:]
else:
pvc_objs_for_io_pods = rwo_pvcs[0:2] + rwx_pvcs[0:1]
pvc_objs_new_pods = rwo_pvcs[2:] + rwx_pvcs[1:]
# Create one pod using each RWO PVC and two pods using each RWX PVC
# for running IO
io_pods = helpers.create_pods(pvc_objs_for_io_pods, pod_factory, interface, 2)
# Wait for pods to be in Running state
for pod_obj in io_pods:
helpers.wait_for_resource_state(
resource=pod_obj, state=constants.STATUS_RUNNING, timeout=90
)
pod_obj.reload()
log.info(f"Created {len(io_pods)} pods for running IO.")
return pvc_objs, io_pods, pvc_objs_new_pods, access_modes
def test_daemon_kill_during_pvc_pod_creation_and_io(
self, interface, resource_name, setup, multi_pvc_factory, pod_factory
):
"""
Kill 'resource_name' daemon while PVCs creation, pods
creation and IO operation are progressing.
"""
num_of_new_pvcs = 5
pvc_objs, io_pods, pvc_objs_new_pods, access_modes = setup
proj_obj = pvc_objs[0].project
storageclass = pvc_objs[0].storageclass
pod_functions = {
"mds": partial(get_mds_pods),
"mon": partial(get_mon_pods),
"mgr": partial(get_mgr_pods),
"osd": partial(get_osd_pods),
"rbdplugin": partial(get_plugin_pods, interface=interface),
"cephfsplugin": partial(get_plugin_pods, interface=interface),
"cephfsplugin_provisioner": partial(get_cephfsplugin_provisioner_pods),
"rbdplugin_provisioner": partial(get_rbdfsplugin_provisioner_pods),
"operator": partial(get_operator_pods),
}
executor = ThreadPoolExecutor(max_workers=len(io_pods))
disruption = disruption_helpers.Disruptions()
disruption.set_resource(resource=resource_name)
# Get number of pods of type 'resource_name'
resource_pods_num = len(pod_functions[resource_name]())
# Do setup for running IO on pods
log.info("Setting up pods for running IO")
for pod_obj in io_pods:
if pod_obj.pvc.volume_mode == "Block":
storage_type = "block"
else:
storage_type = "fs"
executor.submit(pod_obj.workload_setup, storage_type=storage_type)
# Wait for setup on pods to complete
for pod_obj in io_pods:
log.info(f"Waiting for IO setup to complete on pod {pod_obj.name}")
for sample in TimeoutSampler(360, 2, getattr, pod_obj, "wl_setup_done"):
if sample:
log.info(
f"Setup for running IO is completed on pod " f"{pod_obj.name}."
)
break
log.info("Setup for running IO is completed on pods")
# Set daemon to be killed
disruption.select_daemon()
# Start creating new pods
log.info("Start creating new pods.")
bulk_pod_create = executor.submit(
helpers.create_pods, pvc_objs_new_pods, pod_factory, interface, 2
)
# Start creation of new PVCs
log.info("Start creating new PVCs.")
bulk_pvc_create = executor.submit(
multi_pvc_factory,
interface=interface,
project=proj_obj,
storageclass=storageclass,
size=self.pvc_size,
access_modes=access_modes,
access_modes_selection="distribute_random",
status="",
num_of_pvc=num_of_new_pvcs,
wait_each=False,
)
# Start IO on each pod
log.info("Start IO on pods")
for pod_obj in io_pods:
if pod_obj.pvc.volume_mode == "Block":
storage_type = "block"
else:
storage_type = "fs"
pod_obj.run_io(
storage_type=storage_type,
size="1G",
runtime=10,
fio_filename=f"{pod_obj.name}_io_file1",
)
log.info("IO started on all pods.")
# Kill daemon
disruption.kill_daemon()
# Getting result of PVC creation as list of PVC objects
pvc_objs_new = bulk_pvc_create.result()
# Confirm PVCs are Bound
for pvc_obj in pvc_objs_new:
helpers.wait_for_resource_state(
resource=pvc_obj, state=constants.STATUS_BOUND, timeout=180
)
pvc_obj.reload()
log.info("Verified: New PVCs are Bound.")
# Getting result of pods creation as list of Pod objects
pod_objs_new = bulk_pod_create.result()
# Verify new pods are Running
for pod_obj in pod_objs_new:
helpers.wait_for_resource_state(
resource=pod_obj, state=constants.STATUS_RUNNING, timeout=90
)
pod_obj.reload()
log.info("Verified: All new pods are Running.")
# Verify IO
log.info("Fetching IO results from IO pods.")
for pod_obj in io_pods:
fio_result = pod_obj.get_fio_results()
err_count = fio_result.get("jobs")[0].get("error")
assert (
err_count == 0
), f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
log.info(f"IOPs after FIO on pod {pod_obj.name}:")
log.info(f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
log.info(f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")
log.info("Verified IO result on IO pods.")
all_pod_objs = io_pods + pod_objs_new
# Fetch volume details from pods for the purpose of verification
node_pv_dict = {}
for pod in all_pod_objs:
pod_info = pod.get()
node = pod_info["spec"]["nodeName"]
pvc = pod_info["spec"]["volumes"][0]["persistentVolumeClaim"]["claimName"]
for pvc_obj in pvc_objs:
if pvc_obj.name == pvc:
pvc_obj.reload()
pv = pvc_obj.backed_pv
break
if node in node_pv_dict:
node_pv_dict[node].append(pv)
else:
node_pv_dict[node] = [pv]
# Delete pods
for pod_obj in all_pod_objs:
pod_obj.delete(wait=False)
# Verify pods are deleted
for pod_obj in all_pod_objs:
pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
# Verify number of 'resource_name' type pods
final_resource_pods_num = len(pod_functions[resource_name]())
assert final_resource_pods_num == resource_pods_num, (
f"Total number of {resource_name} pods is not matching with "
f"initial value. Total number of pods before daemon kill: "
f"{resource_pods_num}. Total number of pods present now: "
f"{final_resource_pods_num}"
)
# Verify volumes are unmapped from nodes after deleting the pods
node_pv_mounted = helpers.verify_pv_mounted_on_node(node_pv_dict)
for node, pvs in node_pv_mounted.items():
assert not pvs, (
f"PVs {pvs} is still present on node {node} after "
f"deleting the pods."
)
log.info(
"Verified: mount points are removed from nodes after deleting " "the pods"
)
# Set volume mode on PVC objects
for pvc_obj in pvc_objs_new:
pvc_info = pvc_obj.get()
setattr(pvc_obj, "volume_mode", pvc_info["spec"]["volumeMode"])
# Verify that PVCs are reusable by creating new pods
all_pvc_objs = pvc_objs + pvc_objs_new
pod_objs_re = helpers.create_pods(all_pvc_objs, pod_factory, interface, 2)
# Verify pods are Running
for pod_obj in pod_objs_re:
helpers.wait_for_resource_state(
resource=pod_obj, state=constants.STATUS_RUNNING, timeout=90
)
pod_obj.reload()
log.info("Successfully created new pods using all PVCs.")
# Select pods from newly created pods list to run IO
pod_objs_re_io = [
pod_obj
for pod_obj in pod_objs_re
if pod_obj.pvc
in helpers.select_unique_pvcs([pod_obj.pvc for pod_obj in pod_objs_re])
]
for pod_obj in pod_objs_re_io:
if pod_obj.pvc.volume_mode == "Block":
storage_type = "block"
else:
storage_type = "fs"
pod_obj.run_io(
storage_type=storage_type,
size="1G",
runtime=10,
fio_filename=f"{pod_obj.name}_io_file2",
)
log.info("Fetching IO results from newly created pods")
for pod_obj in pod_objs_re_io:
fio_result = pod_obj.get_fio_results()
err_count = fio_result.get("jobs")[0].get("error")
assert (
err_count == 0
), f"FIO error on pod {pod_obj.name}. FIO result: {fio_result}"
log.info(f"IOPs after FIO on pod {pod_obj.name}:")
log.info(f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
log.info(f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")
log.info("Verified IO result on newly created pods.")
|
import os
import sys
__all__ = [
"load_module",
"mytest_the_file",
]
def load_module(file_path, module_name=None):
"""
Load a module by name and search path
This function should work with python 2.7 and 3.x
Returns None if Module could not be loaded.
"""
if module_name is None:
module_name = os.path.basename(os.path.splitext(file_path)[0])
if sys.version_info >= (3, 5,):
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, file_path)
if not spec:
return
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
else:
import imp
mod = imp.load_source(module_name, file_path)
return mod
def mytest_the_file(filename):
file_dir, file_name = os.path.split(filename)
module_test = load_module(file_path=filename)
if "_t" in file_name:
filename_val = os.path.join(file_dir, f"{file_name.split("_t")[0]}.py")
module_val = load_module(file_path=filename_val)
else:
module_val = module_test
module_val.Solution.check_class(module_test.Solution)
return True, file_name
| import os
import sys
__all__ = [
"load_module",
"mytest_the_file",
]
def load_module(file_path, module_name=None):
"""
Load a module by name and search path
This function should work with python 2.7 and 3.x
Returns None if Module could not be loaded.
"""
if module_name is None:
module_name = os.path.basename(os.path.splitext(file_path)[0])
if sys.version_info >= (3, 5,):
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, file_path)
if not spec:
return
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
else:
import imp
mod = imp.load_source(module_name, file_path)
return mod
def mytest_the_file(filename):
file_dir, file_name = os.path.split(filename)
module_test = load_module(file_path=filename)
if "_t" in file_name:
filename_val = os.path.join(file_dir, f"{file_name.split('_t')[0]}.py")
module_val = load_module(file_path=filename_val)
else:
module_val = module_test
module_val.Solution.check_class(module_test.Solution)
return True, file_name
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""Google Cloud function code to extract periodic transactions from data source."""
import base64
import json
import logging
import os
import sys
from typing import Any, Dict, Optional
from google.cloud.functions_v1.context import Context
from google.cloud import bigquery
import google.cloud.logging
from custom_functions import hook_get_load_predictions_query
from custom_functions import hook_get_bq_schema
from custom_functions import hook_apply_formulas
from custom_functions import hook_on_completion
# Set-up logging
logger = logging.getLogger('predict_transactions_batch')
logger.setLevel(logging.DEBUG)
handler = None
if os.getenv('LOCAL_LOGGING'):
handler = logging.StreamHandler(sys.stderr)
else:
client = google.cloud.logging.Client()
handler = google.cloud.logging.handlers.CloudLoggingHandler(client)
logger.addHandler(handler)
BQ_LTV_GCP_PROJECT = str(os.getenv("BQ_LTV_GCP_PROJECT", ""))
BQ_LTV_DATASET = str(os.getenv("BQ_LTV_DATASET", ""))
BQ_LTV_PREDICTIONS_TABLE = str(
os.getenv("BQ_LTV_PREDICTIONS_TABLE", ""))
DATAFRAME_PROCESSING_ENABLED = os.getenv('DATAFRAME_PROCESSING_ENABLED', 'Y')
def _load_data_from_bq(query):
"""Loads all the transactions from the table.
Args:
query: A string with the query to run on the table
Returns:
A dataframe with all the table data
"""
job_config = bigquery.job.QueryJobConfig()
return bigquery.Client(project=BQ_LTV_GCP_PROJECT).query(query, job_config=job_config).to_dataframe()
def _write_to_bigquery(df, table_name):
"""Writes the given dataframe into the BQ table.
Args:
df: A pandas dataframe representing the data to be written
table_name: A string representing the full path of the metadata BQ table
"""
dataframe = df
client = bigquery.Client(project=BQ_LTV_GCP_PROJECT)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = "WRITE_TRUNCATE"
job_config.schema = hook_get_bq_schema()
job = client.load_table_from_dataframe(
dataframe, table_name, job_config=job_config)
job.result()
table = client.get_table(table_name)
print("Loaded {} rows and {} columns to {}".format(table.num_rows,
len(table.schema),
table_name))
def _load_direct_to_bigquery(query, output_table):
"""Runs the load query and outputs the data directly to the next table in the pipeline.
Args:
query: string Prepared SQL query to load from the prediction output table
output_table: string Fully qualified name of the output BQ table where query
result is written.
Returns:
int Number of rows in the target table after job completion.
"""
# Set query to output directly to output table
job_config = bigquery.QueryJobConfig(
destination=output_table,
write_disposition='WRITE_TRUNCATE'
)
client = bigquery.Client()
job = client.query(query, job_config=job_config) # Make an API request.
job.result() # Wait for the job to complete.
table = client.get_table(output_table) # Make an API request.
print('Loaded {} rows and {} columns to {}'.format(table.num_rows,
len(table.schema),
output_table))
return table.num_rows
def _delete_dataset(dataset):
"""Deletes the dataset specified by the dataset parameter.
Args:
dataset: The name of the dataset to be deleted.
"""
client = bigquery.Client()
client.delete_dataset(
dataset, delete_contents=True, not_found_ok=True
)
def main(event: Dict[str, Any], context=Optional[Context]):
"""Checks if the data source table is available & no extract table generated.
Depending on the existence it will trigger the data transfer.
Args:
event (dict): The dictionary with data specific to this type of event.
The `data` field contains the PubsubMessage message. The `attributes`
field will contain custom attributes if there are any.
context (google.cloud.functions.Context): The Cloud Functions event
metadata. The `event_id` field contains the Pub/Sub message ID. The
`timestamp` field contains the publish time.
"""
del context
data = base64.b64decode(event["data"]).decode("utf-8")
msg = json.loads(data)
input_dataset = (msg['operation']
['outputInfo']['bigqueryOutputDataset']).split("://")[1]
input_table = f"""{input_dataset}.predictions_*"""
output_table = f'{BQ_LTV_GCP_PROJECT}.{BQ_LTV_DATASET}.{BQ_LTV_PREDICTIONS_TABLE}_{msg['date']}'
dataframe_processing = not (DATAFRAME_PROCESSING_ENABLED == 'N')
query = hook_get_load_predictions_query(input_table)
if dataframe_processing:
_write_to_bigquery(
hook_apply_formulas(_load_data_from_bq(query)), output_table)
else:
_load_direct_to_bigquery(query, output_table)
_delete_dataset(input_dataset)
hook_on_completion(msg['date'])
def _test():
msg_data = base64.b64encode(bytes('{"payload": {"bq_input_to_predict_table": "decent-fulcrum-316414.test.filtered_periodic_transactions", "bq_output_table": "decent-fulcrum-316414.test.predictions", "date": "20210401", "operation": {"name": "projects/988912752389/locations/europe-west4/batchPredictionJobs/7138777155428679680", "displayName": "pablogil_test_pltv_batch_predict - 2021-06-17 13:27:04.054958", "model": "projects/988912752389/locations/europe-west4/models/7662206262901211136", "inputConfig": {"instancesFormat": "bigquery", "bigquerySource": {"inputUri": "bq://decent-fulcrum-316414.test.filtered_periodic_transactions_20210401"}}, "outputConfig": {"predictionsFormat": "bigquery", "bigqueryDestination": {"outputUri": "bq://decent-fulcrum-316414"}}, "dedicatedResources": {"machineSpec": {"machineType": "n1-highmem-8"}, "startingReplicaCount": 20, "maxReplicaCount": 20}, "manualBatchTuningParameters": {"batchSize": 100}, "outputInfo": {"bigqueryOutputDataset": "bq://decent-fulcrum-316414.prediction_automl_training_data_20200605_0608_2021_06_17T06_27_04_428Z"}, "state": "JOB_STATE_SUCCEEDED", "completionStats": {"successfulCount": "280"}, "createTime": "2021-06-17T13:27:04.571081Z", "startTime": "2021-06-17T13:27:05.550439Z", "endTime": "2021-06-17T13:44:29Z", "updateTime": "2021-06-17T13:45:41.481342Z"}}, "status_check_url": "https://europe-west4-aiplatform.googleapis.com/v1/projects/988912752389/locations/europe-west4/batchPredictionJobs/7138777155428679680", "success_topic": "pablogil_test.pltv.post_process_batch_predictions", "concurrent_slot_document": "pablogil_test_pltv_prediction_tracking/concurrent_document", "status_success_values": ["JOB_STATE_SUCCEEDED"], "status_error_values": ["JOB_STATE_FAILED", "JOB_STATE_EXPIRED"], "inserted_timestamp": "0", "error_topic": "pablogil_test.pltv.", "expiration_timestamp": "0", "status_field": "state", "updated_timestamp": "0", "source_topic": "pablogil_test.pltv.predict_transactions_batch"}'.encode("utf-8")))
main(
event={
"data": msg_data,
"attributes": {
"forwarded": "true"
}
},
context=None)
if __name__ == "__main__":
_test()
| # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""Google Cloud function code to extract periodic transactions from data source."""
import base64
import json
import logging
import os
import sys
from typing import Any, Dict, Optional
from google.cloud.functions_v1.context import Context
from google.cloud import bigquery
import google.cloud.logging
from custom_functions import hook_get_load_predictions_query
from custom_functions import hook_get_bq_schema
from custom_functions import hook_apply_formulas
from custom_functions import hook_on_completion
# Set-up logging
logger = logging.getLogger('predict_transactions_batch')
logger.setLevel(logging.DEBUG)
handler = None
if os.getenv('LOCAL_LOGGING'):
handler = logging.StreamHandler(sys.stderr)
else:
client = google.cloud.logging.Client()
handler = google.cloud.logging.handlers.CloudLoggingHandler(client)
logger.addHandler(handler)
BQ_LTV_GCP_PROJECT = str(os.getenv("BQ_LTV_GCP_PROJECT", ""))
BQ_LTV_DATASET = str(os.getenv("BQ_LTV_DATASET", ""))
BQ_LTV_PREDICTIONS_TABLE = str(
os.getenv("BQ_LTV_PREDICTIONS_TABLE", ""))
DATAFRAME_PROCESSING_ENABLED = os.getenv('DATAFRAME_PROCESSING_ENABLED', 'Y')
def _load_data_from_bq(query):
"""Loads all the transactions from the table.
Args:
query: A string with the query to run on the table
Returns:
A dataframe with all the table data
"""
job_config = bigquery.job.QueryJobConfig()
return bigquery.Client(project=BQ_LTV_GCP_PROJECT).query(query, job_config=job_config).to_dataframe()
def _write_to_bigquery(df, table_name):
"""Writes the given dataframe into the BQ table.
Args:
df: A pandas dataframe representing the data to be written
table_name: A string representing the full path of the metadata BQ table
"""
dataframe = df
client = bigquery.Client(project=BQ_LTV_GCP_PROJECT)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = "WRITE_TRUNCATE"
job_config.schema = hook_get_bq_schema()
job = client.load_table_from_dataframe(
dataframe, table_name, job_config=job_config)
job.result()
table = client.get_table(table_name)
print("Loaded {} rows and {} columns to {}".format(table.num_rows,
len(table.schema),
table_name))
def _load_direct_to_bigquery(query, output_table):
"""Runs the load query and outputs the data directly to the next table in the pipeline.
Args:
query: string Prepared SQL query to load from the prediction output table
output_table: string Fully qualified name of the output BQ table where query
result is written.
Returns:
int Number of rows in the target table after job completion.
"""
# Set query to output directly to output table
job_config = bigquery.QueryJobConfig(
destination=output_table,
write_disposition='WRITE_TRUNCATE'
)
client = bigquery.Client()
job = client.query(query, job_config=job_config) # Make an API request.
job.result() # Wait for the job to complete.
table = client.get_table(output_table) # Make an API request.
print('Loaded {} rows and {} columns to {}'.format(table.num_rows,
len(table.schema),
output_table))
return table.num_rows
def _delete_dataset(dataset):
"""Deletes the dataset specified by the dataset parameter.
Args:
dataset: The name of the dataset to be deleted.
"""
client = bigquery.Client()
client.delete_dataset(
dataset, delete_contents=True, not_found_ok=True
)
def main(event: Dict[str, Any], context=Optional[Context]):
"""Checks if the data source table is available & no extract table generated.
Depending on the existence it will trigger the data transfer.
Args:
event (dict): The dictionary with data specific to this type of event.
The `data` field contains the PubsubMessage message. The `attributes`
field will contain custom attributes if there are any.
context (google.cloud.functions.Context): The Cloud Functions event
metadata. The `event_id` field contains the Pub/Sub message ID. The
`timestamp` field contains the publish time.
"""
del context
data = base64.b64decode(event["data"]).decode("utf-8")
msg = json.loads(data)
input_dataset = (msg['operation']
['outputInfo']['bigqueryOutputDataset']).split("://")[1]
input_table = f"""{input_dataset}.predictions_*"""
output_table = f'{BQ_LTV_GCP_PROJECT}.{BQ_LTV_DATASET}.{BQ_LTV_PREDICTIONS_TABLE}_{msg["date"]}'
dataframe_processing = not (DATAFRAME_PROCESSING_ENABLED == 'N')
query = hook_get_load_predictions_query(input_table)
if dataframe_processing:
_write_to_bigquery(
hook_apply_formulas(_load_data_from_bq(query)), output_table)
else:
_load_direct_to_bigquery(query, output_table)
_delete_dataset(input_dataset)
hook_on_completion(msg['date'])
def _test():
msg_data = base64.b64encode(bytes('{"payload": {"bq_input_to_predict_table": "decent-fulcrum-316414.test.filtered_periodic_transactions", "bq_output_table": "decent-fulcrum-316414.test.predictions", "date": "20210401", "operation": {"name": "projects/988912752389/locations/europe-west4/batchPredictionJobs/7138777155428679680", "displayName": "pablogil_test_pltv_batch_predict - 2021-06-17 13:27:04.054958", "model": "projects/988912752389/locations/europe-west4/models/7662206262901211136", "inputConfig": {"instancesFormat": "bigquery", "bigquerySource": {"inputUri": "bq://decent-fulcrum-316414.test.filtered_periodic_transactions_20210401"}}, "outputConfig": {"predictionsFormat": "bigquery", "bigqueryDestination": {"outputUri": "bq://decent-fulcrum-316414"}}, "dedicatedResources": {"machineSpec": {"machineType": "n1-highmem-8"}, "startingReplicaCount": 20, "maxReplicaCount": 20}, "manualBatchTuningParameters": {"batchSize": 100}, "outputInfo": {"bigqueryOutputDataset": "bq://decent-fulcrum-316414.prediction_automl_training_data_20200605_0608_2021_06_17T06_27_04_428Z"}, "state": "JOB_STATE_SUCCEEDED", "completionStats": {"successfulCount": "280"}, "createTime": "2021-06-17T13:27:04.571081Z", "startTime": "2021-06-17T13:27:05.550439Z", "endTime": "2021-06-17T13:44:29Z", "updateTime": "2021-06-17T13:45:41.481342Z"}}, "status_check_url": "https://europe-west4-aiplatform.googleapis.com/v1/projects/988912752389/locations/europe-west4/batchPredictionJobs/7138777155428679680", "success_topic": "pablogil_test.pltv.post_process_batch_predictions", "concurrent_slot_document": "pablogil_test_pltv_prediction_tracking/concurrent_document", "status_success_values": ["JOB_STATE_SUCCEEDED"], "status_error_values": ["JOB_STATE_FAILED", "JOB_STATE_EXPIRED"], "inserted_timestamp": "0", "error_topic": "pablogil_test.pltv.", "expiration_timestamp": "0", "status_field": "state", "updated_timestamp": "0", "source_topic": "pablogil_test.pltv.predict_transactions_batch"}'.encode("utf-8")))
main(
event={
"data": msg_data,
"attributes": {
"forwarded": "true"
}
},
context=None)
if __name__ == "__main__":
_test()
|
import asyncio
from aiovk.longpoll import BotsLongPoll
import os
from aiovk import TokenSession, API
from dotenv import load_dotenv
from utils.message import Message
from vk_bot.bot import VkBot
load_dotenv()
ses = TokenSession(access_token=str(os.getenv('BOT_VK_KEY')))
api = API(ses)
lp = BotsLongPoll(api, int(os.getenv('GROUP_ID')), version=100)
loop = asyncio.get_event_loop()
async def write_msg(vk_bot: VkBot = None, message: Message = None, peer_id=None):
if vk_bot and vk_bot.text is not None:
await api.messages.send(peer_id=int(vk_bot.peer_id), message=vk_bot.text, keyboard=vk_bot.keyboard)
elif message:
await api.messages.send(peer_id=peer_id, message=message)
async def get_user_title(vk_bot: VkBot):
res = (await api.users.get(user_ids=int(vk_bot.from_id)))[0]
return f"{res["first_name"]} {res["last_name"]}"
async def get_chat_title(vk_bot: VkBot = None, peer_id=None):
peer_id = vk_bot.peer_id if vk_bot else peer_id
convs = await api.messages.getConversationsById(peer_ids=peer_id)
for conv in convs['items']:
if conv['peer']['type'] == 'chat' and conv['peer']['id'] == peer_id:
return conv['chat_settings']['title']
async def edit_last_message(vk_bot: VkBot):
await api.messages.edit(peer_id=int(vk_bot.peer_id), message=vk_bot.text,
conversation_message_id=vk_bot.conversation_message_id)
| import asyncio
from aiovk.longpoll import BotsLongPoll
import os
from aiovk import TokenSession, API
from dotenv import load_dotenv
from utils.message import Message
from vk_bot.bot import VkBot
load_dotenv()
ses = TokenSession(access_token=str(os.getenv('BOT_VK_KEY')))
api = API(ses)
lp = BotsLongPoll(api, int(os.getenv('GROUP_ID')), version=100)
loop = asyncio.get_event_loop()
async def write_msg(vk_bot: VkBot = None, message: Message = None, peer_id=None):
if vk_bot and vk_bot.text is not None:
await api.messages.send(peer_id=int(vk_bot.peer_id), message=vk_bot.text, keyboard=vk_bot.keyboard)
elif message:
await api.messages.send(peer_id=peer_id, message=message)
async def get_user_title(vk_bot: VkBot):
res = (await api.users.get(user_ids=int(vk_bot.from_id)))[0]
return f"{res['first_name']} {res['last_name']}"
async def get_chat_title(vk_bot: VkBot = None, peer_id=None):
peer_id = vk_bot.peer_id if vk_bot else peer_id
convs = await api.messages.getConversationsById(peer_ids=peer_id)
for conv in convs['items']:
if conv['peer']['type'] == 'chat' and conv['peer']['id'] == peer_id:
return conv['chat_settings']['title']
async def edit_last_message(vk_bot: VkBot):
await api.messages.edit(peer_id=int(vk_bot.peer_id), message=vk_bot.text,
conversation_message_id=vk_bot.conversation_message_id)
|
import os
import subprocess
from utils import pColors, checkService, pullDockerImage, updateComposeFile, abort
def create_php_container(dockerClient):
print(f"{pColors.HEADER}Creating the {os.getenv("PHP_IMAGE")} container (2/4){pColors.ENDC}")
# Here' we're pulling the needed image from Docker Hub
# Image name comes from the .env file
# It must be filled and correct!
pullDockerImage(dockerClient, os.getenv(
'PHP_IMAGE'), os.getenv('PHP_VERSION'))
# Update our compose file
# If not, it'll abort!
try:
updateComposeFile("./stack/php/docker-compose.yml.template", "./stack/php/docker-compose.yml",
os.getenv('PHP_SERVICE_NAME'), os.getenv('PHP_IMAGE'), os.getenv('PHP_VERSION'))
except:
print(f"{pColors.FAIL}Cannot create the docker-compose.{pColors.ENDC}")
abort()
print("Buiding the image!")
# Create the stack and deploy the service to our swarm
# This service must access to every nginx volumes
subprocess.run(["docker", "stack", "deploy", "-c",
"./stack/php/docker-compose.yml", "local"])
# Check if the container is successfully deployed!
# If not, it'll abort
if checkService(dockerClient, f"local_{os.getenv("PHP_SERVICE_NAME")}") == False:
print(
f"{pColors.FAIL}Cannot detect the {os.getenv("PHP_SERVICE_NAME")} service!{pColors.ENDC}")
quit()
print(f"{pColors.OKGREEN}{os.getenv("PHP_SERVICE_NAME")} container created ! (2/4){pColors.ENDC}")
| import os
import subprocess
from utils import pColors, checkService, pullDockerImage, updateComposeFile, abort
def create_php_container(dockerClient):
print(f"{pColors.HEADER}Creating the {os.getenv('PHP_IMAGE')} container (2/4){pColors.ENDC}")
# Here' we're pulling the needed image from Docker Hub
# Image name comes from the .env file
# It must be filled and correct!
pullDockerImage(dockerClient, os.getenv(
'PHP_IMAGE'), os.getenv('PHP_VERSION'))
# Update our compose file
# If not, it'll abort!
try:
updateComposeFile("./stack/php/docker-compose.yml.template", "./stack/php/docker-compose.yml",
os.getenv('PHP_SERVICE_NAME'), os.getenv('PHP_IMAGE'), os.getenv('PHP_VERSION'))
except:
print(f"{pColors.FAIL}Cannot create the docker-compose.{pColors.ENDC}")
abort()
print("Buiding the image!")
# Create the stack and deploy the service to our swarm
# This service must access to every nginx volumes
subprocess.run(["docker", "stack", "deploy", "-c",
"./stack/php/docker-compose.yml", "local"])
# Check if the container is successfully deployed!
# If not, it'll abort
if checkService(dockerClient, f"local_{os.getenv('PHP_SERVICE_NAME')}") == False:
print(
f"{pColors.FAIL}Cannot detect the {os.getenv('PHP_SERVICE_NAME')} service!{pColors.ENDC}")
quit()
print(f"{pColors.OKGREEN}{os.getenv('PHP_SERVICE_NAME')} container created ! (2/4){pColors.ENDC}")
|
from functools import reduce
import numpy as np
import random as r
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
import argparse
import pickle
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
thread_record = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
received_time = []
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
shared_resource_lock = threading.Lock()
t_track = 1
def ping(host):
cmd = [f'ping -c 1 {host}']
output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n')
try:
value = float(output[-2].split('=')[-1].split('/')[0])
except ValueError:
value = None
return value
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(my_algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
try:
_time_.append(g[1])
except IndexError:
print(f'indexError on Time: {g}')
_time_.append('0')
return _time_
def get_rtt(host):
rtt = ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global run
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
elif data.strip() == 'stop': # stop {hostname: ip}
print('sending stop alert')
run = 0
def connect_to_broker(stop):
global _client
username = 'mec'
password = 'password'
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
while True:
if stop():
_client.loop_stop()
_client.disconnect()
print('broker loop terminated')
break
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
total_received_task = 0
def edf():
global total_received_task
t_lcm = lcm([tasks[i]['period'] for i in tasks])
t_dead = {i: tasks[i]['deadline'] for i in tasks}
sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))
# print(sorted_dead)
ready_task = []
for i in sorted_dead:
period = tasks[i[0]]['period']
# print('lcm: ', t_lcm, ' period: ', period)
t_range = int(t_lcm / period)
last_dead = 0
for j in range(t_range):
ready_task.append((i[0], last_dead + tasks[i[0]]['deadline']))
last_dead += period
ready_task = sorted(ready_task, key=lambda t: t[1])
print(ready_task)
t_time_ = 0
schedule = []
missed = []
register = {i: 0 for i in tasks.keys()} # {ti : amount executed}
for i in ready_task:
if (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
while (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
t_time_ += 1
# schedule.append(('idle', t_time))
if (t_time_ // tasks[i[0]]['period']) + 1 > register[i[0]]:
if t_time_ + tasks[i[0]]['wcet'] <= i[1]:
register[i[0]] += 1
t_time_ += tasks[i[0]]['wcet']
schedule.append(i[0])
else:
print('Deadline missed: ', i)
missed.append(i[0])
# print('s : ', schedule)
# print('r: ', register)
if len(missed) > 0:
# print('missed deadline: ', missed)
cooperative_mec(missed)
_edf_ = task_time_map(schedule, tasks)
total_received_task += len(_edf_)
return _edf_
# generate execution sequence
def wait_die(processes, avail, n_need, allocat):
global deadlock
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 'w' or 0 in work:
if 0 in work:
ind = work.index(0)
i = processes[ind]
elif 'w' in work:
# print('wk: ', work)
ind = work.index('w')
i = processes[ind]
else:
break
# print('comparing| process: ', i, _need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
# print('added: ', exec_seq)
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', _need[_max])
if processes.index(_max) > processes.index(i): # if true, i is older
# if process is already waiting then offload process
if work[ind] == 'w':
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload reentry: ', i, offload)
else:
# wait put process to waiting
work[processes.index(i)] = 'w'
# print('waiting: ', i)
else:
# abort i
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload: ', i)
if len(offload) > 0:
# print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
# print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wait_die(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(stop): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop():
print('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split("_")[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
outward_mec = 0
offload_check = [0, 0]
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
outward_mec += len(exec_list)
for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(stop): # run as a thread
global _inward_mec
global t_track
while True:
if stop():
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload(stop):
global reoffload_list, outward_mec
global offload_check
while True:
if stop():
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
outward_mec += 1
offload_check[0] += 1
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
offload_check[1] += len(o)
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results edf+wait-die {} {}'.format(get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def save_and_send(send_path):
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_16_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_16_{mec_no} = {mec_rtt} \ncpu{_id_}_16_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_16_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_16_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_16_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_16_{mec_no} = {deadlock} \nmemory{_id_}_16_{mec_no} = {memory}" \
f"\ntask_received = {total_received_task} \nsent_t = {clients_record}" \
f"\ncooperate{_id_}_16_{mec_no} = {cooperate} \ntask_record{_id_}_16_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_16_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_16_{mec_no} = {offload_check}"
list_result = [
f"\nwt{_id_}_16_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_16_{mec_no} = {mec_rtt} \ncpu{_id_}_16_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_16_{mec_no} = {_off_mec} \noff_cloud{_id_}_16_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_16_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_16_{mec_no} = {deadlock} \nmemory{_id_}_16_{mec_no} = {memory}",
f"\ntask_received{_id_}_16_{mec_no} = {total_received_task} \nsent_t{_id_}_16_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_16_{mec_no} = {cooperate} \ntask_record{_id_}_16_{mec_no} = {task_record} "
f"\noutward_mec{_id_}_16_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_16_{mec_no} = {offload_check}"
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datap.py"
os.system(cmd)
else:
os.system(f'mkdir -p {path_}')
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datap.py"
os.system(cmd)
file_ = open(f'{path_}{_id_}_16_{mec_no}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}{_id_}_16_{mec_no}datal.py'
file_.write(i)
os.system(cmd)
file_.close()
sp.run(
["scp", f"{path_}{_id_}_16_{mec_no}datap.py", f"mec@{hosts["osboxes-0"]}:{send_path}"])
send_result(hosts['osboxes-0'], list_result)
send_email(result, send_path)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )
def start_loop():
global _loc
global tasks
global t_time
global node_id
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
threads_ = []
stop = False
for i in func_to_thread:
threads_.append(Thread(target=i, args=(lambda: stop,)))
threads_[-1].daemon = True
threads_[-1].start()
input('start..')
print('========= Waiting for tasks ==========')
_time_ = dt.datetime.now()
while True:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
list_seq = get_exec_seq(edf())
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(.4)
now = dt.datetime.now()
delta = now - _time_
if delta > dt.timedelta(minutes=4):
print('terminating programme 3 mins elapsed')
stop = False
break
except KeyboardInterrupt:
print('\nProgramme Terminated')
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
break
print('algo stopped!')
class BrokerSend:
def __init__(self, user, pw, ip, sub_topic, data):
self.user = user
self.pw = pw
self.ip = ip
self.port = 1883
self.topic = sub_topic
self.response = None
self.client = mqtt.Client()
self.client.username_pw_set(self.user, self.pw)
self.client.connect(self.ip, self.port, 60)
self.data = data
def publish(self):
self.client.publish(self.topic, self.data)
def __del__(self):
print('BrokerSend Object Deleted!')
def run_me(mec_no_, send_path, broker_ip_): # call this from agent
global discovering
global mec_no
global host_ip
global my_algo
global broker_ip
print('mec ip: ', ip_address())
my_algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
mec_no = mec_no_
broker_ip = broker_ip_
host_ip = ip_address()
print('MEC Details: ', hosts)
discovering = 1
time.sleep(2)
for host in hosts:
if hosts[host] != host_ip:
mec_rtt[hosts[host]] = []
os.system(f'echo {mec_no}/{send_path} >> started.txt')
start_loop()
print('saving data')
save_and_send(send_path)
print('send alert to control')
time.sleep(r.uniform(1, 30))
data = pickle.dumps([get_hostname(), host_ip])
broker_dict = {'user': 'mec', 'pw': 'password', 'sub_topic': 'control', 'ip': '192.168.122.111', 'data': data}
BrokerSend(**broker_dict).publish()
print('Terminating process')
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
def main():
global hosts
global cloud_ip
# (--n, --mec_no_, --cloud_ip, --s_path, --b_ip) send_path = f'/home/mec/result/{kind}/{count}'
mec_nodes = {'mec-9': '192.168.122.119', 'mec-8': '192.168.122.118', 'mec-7': '192.168.122.117',
'mec-6': '192.168.122.116', 'mec-5': '192.168.122.115', 'mec-4': '192.168.122.114',
'mec-3': '192.168.122.113', 'mec-2': '192.168.122.112', 'mec-1': '192.168.122.111',
}
gui = {'osboxes-0': '192.168.122.110'}
cloud_ips = ['192.168.200.11', '192.168.200.12']
b_ip = '192.168.122.111'
parser = argparse.ArgumentParser()
parser.add_argument('--n', type=int, default=1.0, help='Number of MEC nodes')
parser.add_argument('--p', type=str, default='/home/mec/result/python', help='Path to send result: homo_1')
args = parser.parse_args()
kind, count = args.p.split('_')
send_path = f'/home/mec/result/{kind}/{count}'
ho = sorted(list(mec_nodes))[:args.n - 1]
hosts = {**{host: mec_nodes[host] for host in ho if ho != get_hostname()}, **gui}
ho += ['osboxes-0']
cloud_ip = cloud_ips[ho.index(get_hostname()) % 2]
os.system('clear')
run_me(mec_no_=args.n, send_path=send_path, broker_ip_=b_ip)
if __name__ == '__main__':
main()
| from functools import reduce
import numpy as np
import random as r
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
import argparse
import pickle
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
thread_record = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
received_time = []
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
shared_resource_lock = threading.Lock()
t_track = 1
def ping(host):
cmd = [f'ping -c 1 {host}']
output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n')
try:
value = float(output[-2].split('=')[-1].split('/')[0])
except ValueError:
value = None
return value
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(my_algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
try:
_time_.append(g[1])
except IndexError:
print(f'indexError on Time: {g}')
_time_.append('0')
return _time_
def get_rtt(host):
rtt = ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global run
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
elif data.strip() == 'stop': # stop {hostname: ip}
print('sending stop alert')
run = 0
def connect_to_broker(stop):
global _client
username = 'mec'
password = 'password'
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
while True:
if stop():
_client.loop_stop()
_client.disconnect()
print('broker loop terminated')
break
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
total_received_task = 0
def edf():
global total_received_task
t_lcm = lcm([tasks[i]['period'] for i in tasks])
t_dead = {i: tasks[i]['deadline'] for i in tasks}
sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))
# print(sorted_dead)
ready_task = []
for i in sorted_dead:
period = tasks[i[0]]['period']
# print('lcm: ', t_lcm, ' period: ', period)
t_range = int(t_lcm / period)
last_dead = 0
for j in range(t_range):
ready_task.append((i[0], last_dead + tasks[i[0]]['deadline']))
last_dead += period
ready_task = sorted(ready_task, key=lambda t: t[1])
print(ready_task)
t_time_ = 0
schedule = []
missed = []
register = {i: 0 for i in tasks.keys()} # {ti : amount executed}
for i in ready_task:
if (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
while (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
t_time_ += 1
# schedule.append(('idle', t_time))
if (t_time_ // tasks[i[0]]['period']) + 1 > register[i[0]]:
if t_time_ + tasks[i[0]]['wcet'] <= i[1]:
register[i[0]] += 1
t_time_ += tasks[i[0]]['wcet']
schedule.append(i[0])
else:
print('Deadline missed: ', i)
missed.append(i[0])
# print('s : ', schedule)
# print('r: ', register)
if len(missed) > 0:
# print('missed deadline: ', missed)
cooperative_mec(missed)
_edf_ = task_time_map(schedule, tasks)
total_received_task += len(_edf_)
return _edf_
# generate execution sequence
def wait_die(processes, avail, n_need, allocat):
global deadlock
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 'w' or 0 in work:
if 0 in work:
ind = work.index(0)
i = processes[ind]
elif 'w' in work:
# print('wk: ', work)
ind = work.index('w')
i = processes[ind]
else:
break
# print('comparing| process: ', i, _need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
# print('added: ', exec_seq)
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', _need[_max])
if processes.index(_max) > processes.index(i): # if true, i is older
# if process is already waiting then offload process
if work[ind] == 'w':
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload reentry: ', i, offload)
else:
# wait put process to waiting
work[processes.index(i)] = 'w'
# print('waiting: ', i)
else:
# abort i
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload: ', i)
if len(offload) > 0:
# print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
# print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wait_die(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(stop): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop():
print('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
outward_mec = 0
offload_check = [0, 0]
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
outward_mec += len(exec_list)
for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(stop): # run as a thread
global _inward_mec
global t_track
while True:
if stop():
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload(stop):
global reoffload_list, outward_mec
global offload_check
while True:
if stop():
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
outward_mec += 1
offload_check[0] += 1
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
offload_check[1] += len(o)
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results edf+wait-die {} {}'.format(get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def save_and_send(send_path):
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_16_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_16_{mec_no} = {mec_rtt} \ncpu{_id_}_16_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_16_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_16_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_16_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_16_{mec_no} = {deadlock} \nmemory{_id_}_16_{mec_no} = {memory}" \
f"\ntask_received = {total_received_task} \nsent_t = {clients_record}" \
f"\ncooperate{_id_}_16_{mec_no} = {cooperate} \ntask_record{_id_}_16_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_16_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_16_{mec_no} = {offload_check}"
list_result = [
f"\nwt{_id_}_16_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_16_{mec_no} = {mec_rtt} \ncpu{_id_}_16_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_16_{mec_no} = {_off_mec} \noff_cloud{_id_}_16_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_16_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_16_{mec_no} = {deadlock} \nmemory{_id_}_16_{mec_no} = {memory}",
f"\ntask_received{_id_}_16_{mec_no} = {total_received_task} \nsent_t{_id_}_16_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_16_{mec_no} = {cooperate} \ntask_record{_id_}_16_{mec_no} = {task_record} "
f"\noutward_mec{_id_}_16_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_16_{mec_no} = {offload_check}"
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datap.py"
os.system(cmd)
else:
os.system(f'mkdir -p {path_}')
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datap.py"
os.system(cmd)
file_ = open(f'{path_}{_id_}_16_{mec_no}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}{_id_}_16_{mec_no}datal.py'
file_.write(i)
os.system(cmd)
file_.close()
sp.run(
["scp", f"{path_}{_id_}_16_{mec_no}datap.py", f"mec@{hosts['osboxes-0']}:{send_path}"])
send_result(hosts['osboxes-0'], list_result)
send_email(result, send_path)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )
def start_loop():
global _loc
global tasks
global t_time
global node_id
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
threads_ = []
stop = False
for i in func_to_thread:
threads_.append(Thread(target=i, args=(lambda: stop,)))
threads_[-1].daemon = True
threads_[-1].start()
input('start..')
print('========= Waiting for tasks ==========')
_time_ = dt.datetime.now()
while True:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
list_seq = get_exec_seq(edf())
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(.4)
now = dt.datetime.now()
delta = now - _time_
if delta > dt.timedelta(minutes=4):
print('terminating programme 3 mins elapsed')
stop = False
break
except KeyboardInterrupt:
print('\nProgramme Terminated')
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
break
print('algo stopped!')
class BrokerSend:
def __init__(self, user, pw, ip, sub_topic, data):
self.user = user
self.pw = pw
self.ip = ip
self.port = 1883
self.topic = sub_topic
self.response = None
self.client = mqtt.Client()
self.client.username_pw_set(self.user, self.pw)
self.client.connect(self.ip, self.port, 60)
self.data = data
def publish(self):
self.client.publish(self.topic, self.data)
def __del__(self):
print('BrokerSend Object Deleted!')
def run_me(mec_no_, send_path, broker_ip_): # call this from agent
global discovering
global mec_no
global host_ip
global my_algo
global broker_ip
print('mec ip: ', ip_address())
my_algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
mec_no = mec_no_
broker_ip = broker_ip_
host_ip = ip_address()
print('MEC Details: ', hosts)
discovering = 1
time.sleep(2)
for host in hosts:
if hosts[host] != host_ip:
mec_rtt[hosts[host]] = []
os.system(f'echo {mec_no}/{send_path} >> started.txt')
start_loop()
print('saving data')
save_and_send(send_path)
print('send alert to control')
time.sleep(r.uniform(1, 30))
data = pickle.dumps([get_hostname(), host_ip])
broker_dict = {'user': 'mec', 'pw': 'password', 'sub_topic': 'control', 'ip': '192.168.122.111', 'data': data}
BrokerSend(**broker_dict).publish()
print('Terminating process')
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
def main():
global hosts
global cloud_ip
# (--n, --mec_no_, --cloud_ip, --s_path, --b_ip) send_path = f'/home/mec/result/{kind}/{count}'
mec_nodes = {'mec-9': '192.168.122.119', 'mec-8': '192.168.122.118', 'mec-7': '192.168.122.117',
'mec-6': '192.168.122.116', 'mec-5': '192.168.122.115', 'mec-4': '192.168.122.114',
'mec-3': '192.168.122.113', 'mec-2': '192.168.122.112', 'mec-1': '192.168.122.111',
}
gui = {'osboxes-0': '192.168.122.110'}
cloud_ips = ['192.168.200.11', '192.168.200.12']
b_ip = '192.168.122.111'
parser = argparse.ArgumentParser()
parser.add_argument('--n', type=int, default=1.0, help='Number of MEC nodes')
parser.add_argument('--p', type=str, default='/home/mec/result/python', help='Path to send result: homo_1')
args = parser.parse_args()
kind, count = args.p.split('_')
send_path = f'/home/mec/result/{kind}/{count}'
ho = sorted(list(mec_nodes))[:args.n - 1]
hosts = {**{host: mec_nodes[host] for host in ho if ho != get_hostname()}, **gui}
ho += ['osboxes-0']
cloud_ip = cloud_ips[ho.index(get_hostname()) % 2]
os.system('clear')
run_me(mec_no_=args.n, send_path=send_path, broker_ip_=b_ip)
if __name__ == '__main__':
main()
|
import logging
from pathlib import Path
import re
import scipy.stats as ss
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.stats.multitest as multitest
import sklearn.metrics
from intermine.webservice import Service
import biclust_comp.utils as utils
def plot_sample_enrichment_impc(X_file, max_factors=None, max_traits=None):
sample_info = read_sample_info_IMPC("data/real/IMPC/sample_info.txt")
X = utils.read_matrix_tsv(X_file)
trait_dummies = pd.get_dummies(sample_info[['tissue', 'genotype']])
return plot_enrichment(trait_dummies, X, max_factors, max_traits)
def plot_pathway_enrichment(B_file, gene_ensembl_ids_file,
full_pathways_file="analysis/IMPC/full_pathways.tsv",
max_factors=None, max_pathways=None):
with open(gene_ensembl_ids_file) as f:
gene_ensembl_ids = [line.strip() for line in f.readlines()]
B = pd.read_csv(B_file, sep="\t")
full_pathways_df = pd.read_csv(full_pathways_file, sep="\t")
pathways_df = construct_pathways_df(gene_ensembl_ids, full_pathways_df)
return plot_enrichment(pathways_df, B, max_factors, max_pathways)
def construct_ko_pathways_df():
sample_info = read_sample_info_IMPC("data/real/IMPC/sample_info.txt")
service = Service("http://www.mousemine.org/mousemine/service")
knocked_out_genes = []
for genotype in sample_info.genotype.unique():
match = re.match(r"(.*) knockout", genotype)
if match:
knocked_out_genes.append(match[1])
ko_genes_pathways = {}
pathway_names_dict = {}
for knocked_out_gene in knocked_out_genes:
query = service.new_query("ProteinCodingGene")
query.add_view("pathways.identifier", "pathways.name", "symbol")
query.add_constraint("symbol", "=", knocked_out_gene)
pathways = [f"{row["pathways.name"]}_-_{row["pathways.identifier"]}" for row in query.rows()]
ko_genes_pathways[knocked_out_gene] = pathways
for row in query.rows():
pathway_names_dict[row["pathways.identifier"]] = row["pathways.name"]
ko_genes_pathways_df = utils.transform_dict_to_count_df(ko_genes_pathways)
return ko_genes_pathways_df, pathway_names_dict
def construct_full_pathways_df(pathways):
service = Service("http://www.mousemine.org/mousemine/service")
pathways_dict = {}
for pathway in pathways:
query = service.new_query("Pathway")
query.add_view(
"genes.primaryIdentifier", "genes.symbol", "genes.name",
"genes.sequenceOntologyTerm.name", "genes.chromosome.primaryIdentifier"
)
query.add_constraint("identifier", "=", pathway)
pathways_dict[pathway] = [row["genes.primaryIdentifier"]
for row in query.rows()]
pathways_df = utils.transform_dict_to_count_df(pathways_dict).T
return pathways_df
def construct_pathways_df(gene_ensembl_ids, full_pathways_df,
ensembl_to_mgi_file="analysis/mart_export.txt"):
ensembl_to_mgi = pd.read_csv(ensembl_to_mgi_file,
sep="\t",
index_col=0)
pathways_df = pd.DataFrame(index=gene_ensembl_ids,
columns=full_pathways_df.columns,
dtype=int,
data=0)
for ensembl_id in gene_ensembl_ids:
unversioned_id = ensembl_id.split('.')[0]
try:
mgi_id = ensembl_to_mgi.loc[unversioned_id, 'MGI ID']
if isinstance(mgi_id, str) and mgi_id.startswith('MGI'):
pass
else:
raise KeyError
except KeyError as e:
print(f"Unable to translate ID {ensembl_id}")
try:
pathways_df.loc[ensembl_id, :] = full_pathways_df.loc[mgi_id, :]
except KeyError as e:
print(f"MGI ID not found in pathways matrix {mgi_id}")
return pathways_df
def plot_enrichment(trait_df, factor_df, max_factors, max_traits):
f1_scores, intersections, _fisher_pvals = calculate_trait_enrichment(factor_df, trait_df)
if max_factors:
num_factors = min(factor_df.shape[1], max_factors)
else:
num_factors = factor_df.shape[1]
if max_traits:
num_traits = min(trait_df.shape[1], max_traits)
else:
num_traits = trait_df.shape[1]
# Sort the columns and rows by maximum f1 score, so that the factors with
# best enrichment will be left-most in the chart, and traits with best
# enrichment will be highest in the chart
ordered_columns = sorted(list(f1_scores.columns),
key=lambda k: f1_scores.iloc[:, k].max(),
reverse=True)
ordered_rows = sorted(list(f1_scores.index),
key=lambda row: f1_scores.loc[row, :].max(),
reverse=True)
intersections.loc['total', :] = (factor_df != 0).sum()
f1_scores.loc['total', :] = 0
ordered_rows.insert(0, 'total')
ordered_intersections = intersections.loc[ordered_rows, ordered_columns]
ordered_intersections.insert(0, 'total', trait_df.sum())
ordered_f1_scores = f1_scores.loc[ordered_rows, ordered_columns]
ordered_f1_scores.insert(0, 'total', 0)
fig, ax = plt.subplots(figsize=(num_factors * 0.7 + 3,
num_traits * 0.7))
# Colour each square by the F1 score
plt.imshow(ordered_f1_scores.iloc[:num_traits + 1, :num_factors + 1],
aspect='auto',
cmap='Blues')
# Sort out axis labels
ax.set_yticks(np.arange(num_traits + 1))
ax.set_xticks(np.arange(num_factors + 1))
ax.set_yticklabels(ordered_f1_scores.index)
ax.set_xticklabels(ordered_f1_scores.columns)
# Add text that notes the number of samples in intersection of trait and factor
threshold_black = 0.5
for j in range(num_factors + 1):
for i in range(num_traits + 1):
value = ordered_intersections.iloc[i, j]
opacity = ordered_f1_scores.iloc[i, j]
if opacity < threshold_black and value != 0:
color="black"
else:
color="white"
text = ax.text(j, i, value,
ha="center", va="center", color=color)
plt.axvline(x=0.5, color='black')
plt.axhline(y=0.5, color='black')
plt.colorbar()
fig.tight_layout()
plt.show()
return ordered_f1_scores, ordered_intersections
def calculate_trait_enrichment(factor_df, trait_df):
f1_scores = pd.DataFrame(index=trait_df.columns,
columns=factor_df.columns,
dtype=float)
fisher_pvals = pd.DataFrame(index=trait_df.columns,
columns=factor_df.columns,
dtype=float)
odds_ratios = pd.DataFrame(index=trait_df.columns,
columns=factor_df.columns,
dtype=float)
intersections = pd.DataFrame(index=trait_df.columns,
columns=factor_df.columns,
dtype=int)
for trait_name, trait_column in trait_df.items():
for factor_index, factor_column in factor_df.items():
total_from_trait = trait_column.sum()
total_population = len(trait_column)
factor_size = (factor_column != 0).sum()
trait_non_zero = np.where(trait_column)[0]
intersection_size = ((factor_column.iloc[trait_non_zero]) != 0).sum()
trait_size = trait_column.sum()
intersections.loc[trait_name, factor_index] = intersection_size
f1_scores.loc[trait_name, factor_index] = sklearn.metrics.f1_score(trait_column,
factor_column != 0)
# sf is the 'survival' function i.e. 1 - cdf
# So we are finding the probability that the intersection size is at least
# equal to the intersection size we have observed, under the assumption that this
# has Hypergeometric distribution with M=total_population, n=trait_size and N=factor_size
# where M is 'total number of objects in the bin', N is 'number of objects we pick'
# n is 'total number of objects which are successes' and
# m is 'number of objects we pick which are successes'
fisher_pvals.loc[trait_name, factor_index] = ss.hypergeom.sf(intersection_size - 1,
total_population,
trait_size,
factor_size)
odds_in_factor = intersection_size / (factor_size - intersection_size)
notfactor_nottrait = total_population - trait_size - factor_size + intersection_size
odds_out_of_factor = (trait_size - intersection_size) / notfactor_nottrait
odds_ratios.loc[trait_name, factor_index] = odds_in_factor / odds_out_of_factor
_reject, corrected_fisher_pvals = utils.correct_multiple_testing(fisher_pvals)
return f1_scores, intersections, corrected_fisher_pvals, odds_ratios
def summarise_enrichment(sort_measure_name, measures_dict, factor_df, trait_df):
trait_enrichment_dicts = []
sort_measure_df = measures_dict[sort_measure_name]
for trait in sort_measure_df.index:
best_factor = sort_measure_df.loc[trait, :].argmax()
trait_enrichment_dict = {'trait': trait,
'best factor (by F1 score)': best_factor,
'factor size': (factor_df.loc[:, best_factor] != 0).sum(),
'trait size': (trait_df.loc[:, trait] != 0).sum()}
for measure, measure_df in measures_dict.items():
trait_enrichment_dict[measure] = measure_df.loc[trait, best_factor]
trait_enrichment_dicts.append(trait_enrichment_dict)
return pd.DataFrame(trait_enrichment_dicts)
def read_sample_info_IMPC(filename, read_ID=False):
sample_info = pd.read_csv(filename, sep="\t")
sample_info['genotype'] = sample_info['Factor Value[genotype]']
sample_info['tissue'] = sample_info['Factor Value[organism part]']
if read_ID:
sample_info['ID'] = sample_info['Comment[ENA_SAMPLE]']
return sample_info
def summarise_pathways_summary_IMPC(folder, postprocessing='*'):
logging.info(f"Looking in folder {folder} for files of the form 'pathways_summary{postprocessing}.tsv'")
files = [str(filename) for filename in Path(folder).rglob(f"pathways_summary{postprocessing}.tsv")]
logging.info(f"Found {len(files)} files")
file_pattern = re.compile(r'analysis/IMPC/(\w+)/real/IMPC/([\w/]+)/(run_.+)/pathways_summary(.*).tsv')
run_info_dicts = []
for file in files:
logging.info(f"Processing file {file}")
match = re.match(file_pattern, str(file))
if match:
run_info = {'method': match[1],
'dataset': match[2],
'run_id': match[3],
'postprocessing': match[4]}
try:
pathways = pd.read_csv(str(file), sep="\t", header=0)
# Mean (over factors) of log10 of the smallest p-value
run_info['factors_pathways_mean_min_pval'] = np.log10(pathways['min_pval']).mean()
for alpha_col in pathways.columns[pathways.columns.str.startswith('alpha')]:
# For each threshold, the mean (over factors) number of pathways significant at that threshold and
# the proportion of factors that had at least one pathway significant at that threshold
run_info[f"factors_pathways_mean_{alpha_col}"] = pathways[alpha_col].mean()
run_info[f"factors_pathways_nz_{alpha_col}"] = (pathways[alpha_col] != 0).mean()
except pd.errors.EmptyDataError as e:
logging.warning(f"Empty file: {file}")
except KeyError as e:
logging.warning(f"Required columns not found: {file}")
run_info_dicts.append(run_info)
else:
logging.warning(f"Failed to decode file name: {file}")
return pd.DataFrame(run_info_dicts)
def summarise_traits_summary_IMPC(folder, postprocessing='*'):
logging.info(f"Looking in folder {folder} for files of the form 'traits_summary{postprocessing}.tsv'")
files = [str(filename) for filename in Path(folder).rglob(f"traits_summary{postprocessing}.tsv")]
logging.info(f"Found {len(files)} files")
file_pattern = re.compile(r'analysis/IMPC/(\w+)/real/IMPC/([\w/]+)/(run_.+)/traits_summary(.*).tsv')
run_info_dicts = []
for file in files:
logging.info(f"Processing file {file}")
match = re.match(file_pattern, str(file))
if match:
run_info = {'method': match[1],
'dataset': match[2],
'run_id': match[3],
'postprocessing': match[4]}
try:
traits = pd.read_csv(str(file), sep="\t", header=0)
tissue_rows = traits['trait'].str.startswith('tissue')
genotype_rows = traits['trait'].str.startswith('genotype')
# Mean (over traits) of f1 score from best factor, mean (over traits) of log of Fisher exact p-value
# (again from best factor), min p-value (min over traits, of p-value from best factor), max (over traits)
# of f1 score from best factor
run_info['traits_mean_f1_score'] = traits.loc[:, 'F1 score'].mean()
run_info['traits_mean_log10_pval'] = np.log10(traits.loc[:, 'Fisher\'s exact test']).mean()
run_info['traits_min_pval'] = traits.loc[:, 'Fisher\'s exact test'].min()
run_info['traits_max_f1_score'] = traits.loc[:, 'F1 score'].max()
# Same as above, but only for 'genotype traits'
run_info['traits_genotype_mean_f1_score'] = traits.loc[genotype_rows, 'F1 score'].mean()
run_info['traits_genotype_mean_log10_pval'] = np.log10(traits.loc[genotype_rows, 'Fisher\'s exact test']).mean()
run_info['traits_genotype_min_pval'] = traits.loc[genotype_rows, 'Fisher\'s exact test'].min()
run_info['traits_genotype_max_f1_score'] = traits.loc[genotype_rows, 'F1 score'].max()
# Same as above, but only for 'tissue traits'
run_info['traits_tissue_mean_f1_score'] = traits.loc[tissue_rows, 'F1 score'].mean()
run_info['traits_tissue_mean_log10_pval'] = np.log10(traits.loc[tissue_rows, 'Fisher\'s exact test']).mean()
run_info['traits_tissue_min_pval'] = traits.loc[tissue_rows, 'Fisher\'s exact test'].min()
run_info['traits_tissue_max_f1_score'] = traits.loc[tissue_rows, 'F1 score'].max()
# Proportion of traits which have a factor significant for them, with threshold 0.01 and 0.05 resp.
run_info['traits_sig_traits 0.01'] = (traits.loc[:, 'Fisher\'s exact test'] < 0.01).sum() / len(traits)
run_info['traits_sig_traits 0.05'] = (traits.loc[:, 'Fisher\'s exact test'] < 0.05).sum() / len(traits)
except pd.errors.EmptyDataError as e:
logging.warning(f"Empty file: {file}")
except KeyError as e:
logging.warning(f"Required columns not found: {file}")
run_info_dicts.append(run_info)
else:
logging.warning(f"Failed to decode file name: {file}")
return pd.DataFrame(run_info_dicts)
def summarise_traits_fisherpvals_IMPC(folder, postprocessing='*'):
logging.info(f"Looking in folder {folder} for files of the form 'traits_fisherpvals{postprocessing}.tsv'")
files = [str(filename) for filename in Path(folder).rglob(f"traits_fisherpvals{postprocessing}.tsv")]
logging.info(f"Found {len(files)} files")
file_pattern = re.compile(r'analysis/IMPC/(\w+)/real/IMPC/([\w/]+)/(run_.+)/traits_fisherpvals(.*).tsv')
run_info_dicts = []
for file in files:
logging.info(f"Processing file {file}")
match = re.match(file_pattern, str(file))
if match:
run_info = {'method': match[1],
'dataset': match[2],
'run_id': match[3],
'postprocessing': match[4]}
try:
traits_pvals = pd.read_csv(str(file), header=0, index_col=0, sep="\t")
min_pvals_per_factor = traits_pvals.min(axis=0)
# For each threshold, the proportion of factors that are enriched for at least one trait
for threshold in [1, 0.1, 0.05, 0.01, 0.001, 0.0001, 0.00001]:
run_info[f"traits_factors_alpha {threshold}"] = (min_pvals_per_factor < threshold).mean()
except pd.errors.EmptyDataError as e:
logging.warning(f"Empty file: {file}")
except KeyError as e:
logging.warning(f"Required columns not found: {file}")
run_info_dicts.append(run_info)
else:
logging.warning(f"Failed to decode file name: {file}")
return pd.DataFrame(run_info_dicts)
def summarise_traits_f1scores_IMPC(folder, postprocessing='*'):
logging.info(f"Looking in folder {folder} for files of the form 'traits_f1scores{postprocessing}.tsv'")
files = [str(filename) for filename in Path(folder).rglob(f"traits_f1scores{postprocessing}.tsv")]
logging.info(f"Found {len(files)} files")
file_pattern = re.compile(r'analysis/IMPC/(\w+)/real/IMPC/([\w/]+)/(run_.+)/traits_f1scores(.*).tsv')
run_info_dicts = []
for file in files:
logging.info(f"Processing file {file}")
match = re.match(file_pattern, str(file))
if match:
run_info = {'method': match[1],
'dataset': match[2],
'run_id': match[3],
'postprocessing': match[4]}
try:
traits_f1scores = pd.read_csv(str(file), header=0, index_col=0, sep="\t")
# Mean (over factors) of the best F1 score that factor attains (across all traits)
run_info['traits_factors_mean_max_f1_score'] = traits_f1scores.max(axis=0).mean()
except pd.errors.EmptyDataError as e:
logging.warning(f"Empty file: {file}")
except KeyError as e:
logging.warning(f"Required columns not found: {file}")
run_info_dicts.append(run_info)
else:
logging.warning(f"Failed to decode file name: {file}")
return pd.DataFrame(run_info_dicts)
def summarise_ko_enrichment_summary_IMPC(folder, postprocessing='*'):
logging.info(f"Looking in folder {folder} for files of the form 'ko_enrichment_summary{postprocessing}.tsv'")
files = [str(filename) for filename in Path(folder).rglob(f"ko_enrichment_summary{postprocessing}.tsv")]
logging.info(f"Found {len(files)} files")
file_pattern = re.compile(r'analysis/IMPC/(\w+)/real/IMPC/([\w/]+)/(run_.+)/ko_enrichment_summary(.*).tsv')
run_info_dicts = []
for file in files:
logging.info(f"Processing file {file}")
match = re.match(file_pattern, str(file))
if match:
run_info = {'method': match[1],
'dataset': match[2],
'run_id': match[3],
'postprocessing': match[4]}
try:
ko_enrichment = pd.read_csv(str(file), sep="\t", header=0)
# Mean (over traits - only knockout genes) of the best F1 score obtained by any factor on that trait,
# also minimum pvalue
run_info['ko_traits_mean_f1_score'] = ko_enrichment['f1_score (trait)'].mean()
run_info['ko_traits_mean_min_pval'] = np.log10(ko_enrichment['min_pval']).mean()
# For the threshold 0.05, the mean of precision and recall, considering the set of pathways
# significantly enriched at that threshold as the set of predictions, and the set
# of pathways that contained the gene knocked out as successes
run_info['ko_traits_mean_precision_0.05'] = (ko_enrichment['alpha 0.05'] / ko_enrichment['all_pathways alpha 0.05']).mean()
run_info['ko_traits_mean_recall_0.05'] = (ko_enrichment['alpha 0.05'] / ko_enrichment['pathways']).mean()
for alpha_col in ko_enrichment.columns[ko_enrichment.columns.str.startswith('alpha')]:
# Mean recall, as above but for different thresholds
run_info[f"ko_traits_mean_recall_{alpha_col}"] = (ko_enrichment[alpha_col] / ko_enrichment['pathways']).mean()
# Proportion of traits (only ko genotype traits) that had at least one relevant pathway
# (i.e. one containing this knocked out gene) significant at this threshold
run_info[f"ko_traits_nz_{alpha_col}"] = (ko_enrichment[alpha_col] != 0).mean()
except pd.errors.EmptyDataError as e:
logging.warning(f"Empty file: {file}")
except KeyError as e:
logging.warning(f"Required columns not found: {file}")
run_info_dicts.append(run_info)
else:
logging.warning(f"Failed to decode file name: {file}")
return pd.DataFrame(run_info_dicts)
def summarise_factor_info_IMPC(folder, postprocessing='*'):
logging.info(f"Looking in folder {folder} for files of the form 'factor_info{postprocessing}.tsv'")
files = [str(filename) for filename in Path(folder).rglob(f"factor_info{postprocessing}.tsv")]
logging.info(f"Found {len(files)} files")
files = Path(folder).rglob(f"factor_info{postprocessing}.tsv")
file_pattern = re.compile(r'analysis/IMPC/(\w+)/real/IMPC/([\w/]+)/(run_.+)/factor_info(.*).tsv')
run_info_dicts = []
for file in files:
logging.info(f"Processing file {file}")
match = re.match(file_pattern, str(file))
if match:
run_info = {'method': match[1],
'dataset': match[2],
'run_id': match[3],
'postprocessing': match[4]}
try:
factor_info = pd.read_csv(str(file), sep="\t", index_col=0, header=0)
# Number of factors, mean number of genes and samples in factor,
# mean of genes*samples (over factors), which I'm calling number of cells
run_info['recovered_K'] = factor_info.shape[0]
run_info['mean_num_genes'] = factor_info['num_genes'].mean()
run_info['mean_num_samples'] = factor_info['num_samples'].mean()
run_info['mean_num_cells'] = (factor_info['num_samples'] * factor_info['num_genes']).mean()
# Mean (over factors) of the maximum (over other factors) Jaccard similarity
run_info['mean_redundancy_max'] = factor_info['redundancy_max'].mean()
# Mean (over factors) of the mean (over other factors) Jaccard similarity
run_info['mean_redundancy_mean'] = factor_info['redundancy_mean'].mean()
except pd.errors.EmptyDataError as e:
logging.warning(f"Empty file: {file}")
except KeyError as e:
logging.warning(f"Required columns not found: {file}")
run_info_dicts.append(run_info)
else:
logging.warning(f"Failed to decode file name: {file}")
return pd.DataFrame(run_info_dicts)
def get_number_unique_pathways_mdr(method_dataset_run_id, enrich_thresholds=[0.001, 0.01, 0.05]):
if 'Plaid' in method_dataset_run_id:
thresh = "0e+0"
else:
thresh = "1e-2"
pathway_pvals = pd.read_csv(f"analysis/IMPC/{method_dataset_run_id}/pathways_fisherpvals_thresh_{thresh}.tsv",
sep='\t',
index_col=0)
main_pathways = (pathway_pvals.values.argmin(axis=0))
results = {'method_dataset_run_id': method_dataset_run_id}
results["unique_best_pathways"] = len(set(main_pathways))
for threshold in enrich_thresholds:
results[f"pathways_{threshold}"] = sum(pathway_pvals.min(axis=1) < threshold)
return results
def get_number_unique_pathways(error_df_file):
error_df = pd.read_csv(error_df_file)
pathways_dicts = []
for mdr in error_df[error_df['run_complete']]['method_dataset_run_id'].unique():
try:
results = get_number_unique_pathways_mdr(mdr)
pathways_dicts.append(results)
except FileNotFoundError:
logging.warn(f"Skipping mdr {mdr}")
continue
return pd.DataFrame(pathways_dicts)
| import logging
from pathlib import Path
import re
import scipy.stats as ss
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.stats.multitest as multitest
import sklearn.metrics
from intermine.webservice import Service
import biclust_comp.utils as utils
def plot_sample_enrichment_impc(X_file, max_factors=None, max_traits=None):
sample_info = read_sample_info_IMPC("data/real/IMPC/sample_info.txt")
X = utils.read_matrix_tsv(X_file)
trait_dummies = pd.get_dummies(sample_info[['tissue', 'genotype']])
return plot_enrichment(trait_dummies, X, max_factors, max_traits)
def plot_pathway_enrichment(B_file, gene_ensembl_ids_file,
full_pathways_file="analysis/IMPC/full_pathways.tsv",
max_factors=None, max_pathways=None):
with open(gene_ensembl_ids_file) as f:
gene_ensembl_ids = [line.strip() for line in f.readlines()]
B = pd.read_csv(B_file, sep="\t")
full_pathways_df = pd.read_csv(full_pathways_file, sep="\t")
pathways_df = construct_pathways_df(gene_ensembl_ids, full_pathways_df)
return plot_enrichment(pathways_df, B, max_factors, max_pathways)
def construct_ko_pathways_df():
sample_info = read_sample_info_IMPC("data/real/IMPC/sample_info.txt")
service = Service("http://www.mousemine.org/mousemine/service")
knocked_out_genes = []
for genotype in sample_info.genotype.unique():
match = re.match(r"(.*) knockout", genotype)
if match:
knocked_out_genes.append(match[1])
ko_genes_pathways = {}
pathway_names_dict = {}
for knocked_out_gene in knocked_out_genes:
query = service.new_query("ProteinCodingGene")
query.add_view("pathways.identifier", "pathways.name", "symbol")
query.add_constraint("symbol", "=", knocked_out_gene)
pathways = [f"{row['pathways.name']}_-_{row['pathways.identifier']}" for row in query.rows()]
ko_genes_pathways[knocked_out_gene] = pathways
for row in query.rows():
pathway_names_dict[row["pathways.identifier"]] = row["pathways.name"]
ko_genes_pathways_df = utils.transform_dict_to_count_df(ko_genes_pathways)
return ko_genes_pathways_df, pathway_names_dict
def construct_full_pathways_df(pathways):
service = Service("http://www.mousemine.org/mousemine/service")
pathways_dict = {}
for pathway in pathways:
query = service.new_query("Pathway")
query.add_view(
"genes.primaryIdentifier", "genes.symbol", "genes.name",
"genes.sequenceOntologyTerm.name", "genes.chromosome.primaryIdentifier"
)
query.add_constraint("identifier", "=", pathway)
pathways_dict[pathway] = [row["genes.primaryIdentifier"]
for row in query.rows()]
pathways_df = utils.transform_dict_to_count_df(pathways_dict).T
return pathways_df
def construct_pathways_df(gene_ensembl_ids, full_pathways_df,
ensembl_to_mgi_file="analysis/mart_export.txt"):
ensembl_to_mgi = pd.read_csv(ensembl_to_mgi_file,
sep="\t",
index_col=0)
pathways_df = pd.DataFrame(index=gene_ensembl_ids,
columns=full_pathways_df.columns,
dtype=int,
data=0)
for ensembl_id in gene_ensembl_ids:
unversioned_id = ensembl_id.split('.')[0]
try:
mgi_id = ensembl_to_mgi.loc[unversioned_id, 'MGI ID']
if isinstance(mgi_id, str) and mgi_id.startswith('MGI'):
pass
else:
raise KeyError
except KeyError as e:
print(f"Unable to translate ID {ensembl_id}")
try:
pathways_df.loc[ensembl_id, :] = full_pathways_df.loc[mgi_id, :]
except KeyError as e:
print(f"MGI ID not found in pathways matrix {mgi_id}")
return pathways_df
def plot_enrichment(trait_df, factor_df, max_factors, max_traits):
f1_scores, intersections, _fisher_pvals = calculate_trait_enrichment(factor_df, trait_df)
if max_factors:
num_factors = min(factor_df.shape[1], max_factors)
else:
num_factors = factor_df.shape[1]
if max_traits:
num_traits = min(trait_df.shape[1], max_traits)
else:
num_traits = trait_df.shape[1]
# Sort the columns and rows by maximum f1 score, so that the factors with
# best enrichment will be left-most in the chart, and traits with best
# enrichment will be highest in the chart
ordered_columns = sorted(list(f1_scores.columns),
key=lambda k: f1_scores.iloc[:, k].max(),
reverse=True)
ordered_rows = sorted(list(f1_scores.index),
key=lambda row: f1_scores.loc[row, :].max(),
reverse=True)
intersections.loc['total', :] = (factor_df != 0).sum()
f1_scores.loc['total', :] = 0
ordered_rows.insert(0, 'total')
ordered_intersections = intersections.loc[ordered_rows, ordered_columns]
ordered_intersections.insert(0, 'total', trait_df.sum())
ordered_f1_scores = f1_scores.loc[ordered_rows, ordered_columns]
ordered_f1_scores.insert(0, 'total', 0)
fig, ax = plt.subplots(figsize=(num_factors * 0.7 + 3,
num_traits * 0.7))
# Colour each square by the F1 score
plt.imshow(ordered_f1_scores.iloc[:num_traits + 1, :num_factors + 1],
aspect='auto',
cmap='Blues')
# Sort out axis labels
ax.set_yticks(np.arange(num_traits + 1))
ax.set_xticks(np.arange(num_factors + 1))
ax.set_yticklabels(ordered_f1_scores.index)
ax.set_xticklabels(ordered_f1_scores.columns)
# Add text that notes the number of samples in intersection of trait and factor
threshold_black = 0.5
for j in range(num_factors + 1):
for i in range(num_traits + 1):
value = ordered_intersections.iloc[i, j]
opacity = ordered_f1_scores.iloc[i, j]
if opacity < threshold_black and value != 0:
color="black"
else:
color="white"
text = ax.text(j, i, value,
ha="center", va="center", color=color)
plt.axvline(x=0.5, color='black')
plt.axhline(y=0.5, color='black')
plt.colorbar()
fig.tight_layout()
plt.show()
return ordered_f1_scores, ordered_intersections
def calculate_trait_enrichment(factor_df, trait_df):
f1_scores = pd.DataFrame(index=trait_df.columns,
columns=factor_df.columns,
dtype=float)
fisher_pvals = pd.DataFrame(index=trait_df.columns,
columns=factor_df.columns,
dtype=float)
odds_ratios = pd.DataFrame(index=trait_df.columns,
columns=factor_df.columns,
dtype=float)
intersections = pd.DataFrame(index=trait_df.columns,
columns=factor_df.columns,
dtype=int)
for trait_name, trait_column in trait_df.items():
for factor_index, factor_column in factor_df.items():
total_from_trait = trait_column.sum()
total_population = len(trait_column)
factor_size = (factor_column != 0).sum()
trait_non_zero = np.where(trait_column)[0]
intersection_size = ((factor_column.iloc[trait_non_zero]) != 0).sum()
trait_size = trait_column.sum()
intersections.loc[trait_name, factor_index] = intersection_size
f1_scores.loc[trait_name, factor_index] = sklearn.metrics.f1_score(trait_column,
factor_column != 0)
# sf is the 'survival' function i.e. 1 - cdf
# So we are finding the probability that the intersection size is at least
# equal to the intersection size we have observed, under the assumption that this
# has Hypergeometric distribution with M=total_population, n=trait_size and N=factor_size
# where M is 'total number of objects in the bin', N is 'number of objects we pick'
# n is 'total number of objects which are successes' and
# m is 'number of objects we pick which are successes'
fisher_pvals.loc[trait_name, factor_index] = ss.hypergeom.sf(intersection_size - 1,
total_population,
trait_size,
factor_size)
odds_in_factor = intersection_size / (factor_size - intersection_size)
notfactor_nottrait = total_population - trait_size - factor_size + intersection_size
odds_out_of_factor = (trait_size - intersection_size) / notfactor_nottrait
odds_ratios.loc[trait_name, factor_index] = odds_in_factor / odds_out_of_factor
_reject, corrected_fisher_pvals = utils.correct_multiple_testing(fisher_pvals)
return f1_scores, intersections, corrected_fisher_pvals, odds_ratios
def summarise_enrichment(sort_measure_name, measures_dict, factor_df, trait_df):
trait_enrichment_dicts = []
sort_measure_df = measures_dict[sort_measure_name]
for trait in sort_measure_df.index:
best_factor = sort_measure_df.loc[trait, :].argmax()
trait_enrichment_dict = {'trait': trait,
'best factor (by F1 score)': best_factor,
'factor size': (factor_df.loc[:, best_factor] != 0).sum(),
'trait size': (trait_df.loc[:, trait] != 0).sum()}
for measure, measure_df in measures_dict.items():
trait_enrichment_dict[measure] = measure_df.loc[trait, best_factor]
trait_enrichment_dicts.append(trait_enrichment_dict)
return pd.DataFrame(trait_enrichment_dicts)
def read_sample_info_IMPC(filename, read_ID=False):
sample_info = pd.read_csv(filename, sep="\t")
sample_info['genotype'] = sample_info['Factor Value[genotype]']
sample_info['tissue'] = sample_info['Factor Value[organism part]']
if read_ID:
sample_info['ID'] = sample_info['Comment[ENA_SAMPLE]']
return sample_info
def summarise_pathways_summary_IMPC(folder, postprocessing='*'):
logging.info(f"Looking in folder {folder} for files of the form 'pathways_summary{postprocessing}.tsv'")
files = [str(filename) for filename in Path(folder).rglob(f"pathways_summary{postprocessing}.tsv")]
logging.info(f"Found {len(files)} files")
file_pattern = re.compile(r'analysis/IMPC/(\w+)/real/IMPC/([\w/]+)/(run_.+)/pathways_summary(.*).tsv')
run_info_dicts = []
for file in files:
logging.info(f"Processing file {file}")
match = re.match(file_pattern, str(file))
if match:
run_info = {'method': match[1],
'dataset': match[2],
'run_id': match[3],
'postprocessing': match[4]}
try:
pathways = pd.read_csv(str(file), sep="\t", header=0)
# Mean (over factors) of log10 of the smallest p-value
run_info['factors_pathways_mean_min_pval'] = np.log10(pathways['min_pval']).mean()
for alpha_col in pathways.columns[pathways.columns.str.startswith('alpha')]:
# For each threshold, the mean (over factors) number of pathways significant at that threshold and
# the proportion of factors that had at least one pathway significant at that threshold
run_info[f"factors_pathways_mean_{alpha_col}"] = pathways[alpha_col].mean()
run_info[f"factors_pathways_nz_{alpha_col}"] = (pathways[alpha_col] != 0).mean()
except pd.errors.EmptyDataError as e:
logging.warning(f"Empty file: {file}")
except KeyError as e:
logging.warning(f"Required columns not found: {file}")
run_info_dicts.append(run_info)
else:
logging.warning(f"Failed to decode file name: {file}")
return pd.DataFrame(run_info_dicts)
def summarise_traits_summary_IMPC(folder, postprocessing='*'):
logging.info(f"Looking in folder {folder} for files of the form 'traits_summary{postprocessing}.tsv'")
files = [str(filename) for filename in Path(folder).rglob(f"traits_summary{postprocessing}.tsv")]
logging.info(f"Found {len(files)} files")
file_pattern = re.compile(r'analysis/IMPC/(\w+)/real/IMPC/([\w/]+)/(run_.+)/traits_summary(.*).tsv')
run_info_dicts = []
for file in files:
logging.info(f"Processing file {file}")
match = re.match(file_pattern, str(file))
if match:
run_info = {'method': match[1],
'dataset': match[2],
'run_id': match[3],
'postprocessing': match[4]}
try:
traits = pd.read_csv(str(file), sep="\t", header=0)
tissue_rows = traits['trait'].str.startswith('tissue')
genotype_rows = traits['trait'].str.startswith('genotype')
# Mean (over traits) of f1 score from best factor, mean (over traits) of log of Fisher exact p-value
# (again from best factor), min p-value (min over traits, of p-value from best factor), max (over traits)
# of f1 score from best factor
run_info['traits_mean_f1_score'] = traits.loc[:, 'F1 score'].mean()
run_info['traits_mean_log10_pval'] = np.log10(traits.loc[:, 'Fisher\'s exact test']).mean()
run_info['traits_min_pval'] = traits.loc[:, 'Fisher\'s exact test'].min()
run_info['traits_max_f1_score'] = traits.loc[:, 'F1 score'].max()
# Same as above, but only for 'genotype traits'
run_info['traits_genotype_mean_f1_score'] = traits.loc[genotype_rows, 'F1 score'].mean()
run_info['traits_genotype_mean_log10_pval'] = np.log10(traits.loc[genotype_rows, 'Fisher\'s exact test']).mean()
run_info['traits_genotype_min_pval'] = traits.loc[genotype_rows, 'Fisher\'s exact test'].min()
run_info['traits_genotype_max_f1_score'] = traits.loc[genotype_rows, 'F1 score'].max()
# Same as above, but only for 'tissue traits'
run_info['traits_tissue_mean_f1_score'] = traits.loc[tissue_rows, 'F1 score'].mean()
run_info['traits_tissue_mean_log10_pval'] = np.log10(traits.loc[tissue_rows, 'Fisher\'s exact test']).mean()
run_info['traits_tissue_min_pval'] = traits.loc[tissue_rows, 'Fisher\'s exact test'].min()
run_info['traits_tissue_max_f1_score'] = traits.loc[tissue_rows, 'F1 score'].max()
# Proportion of traits which have a factor significant for them, with threshold 0.01 and 0.05 resp.
run_info['traits_sig_traits 0.01'] = (traits.loc[:, 'Fisher\'s exact test'] < 0.01).sum() / len(traits)
run_info['traits_sig_traits 0.05'] = (traits.loc[:, 'Fisher\'s exact test'] < 0.05).sum() / len(traits)
except pd.errors.EmptyDataError as e:
logging.warning(f"Empty file: {file}")
except KeyError as e:
logging.warning(f"Required columns not found: {file}")
run_info_dicts.append(run_info)
else:
logging.warning(f"Failed to decode file name: {file}")
return pd.DataFrame(run_info_dicts)
def summarise_traits_fisherpvals_IMPC(folder, postprocessing='*'):
logging.info(f"Looking in folder {folder} for files of the form 'traits_fisherpvals{postprocessing}.tsv'")
files = [str(filename) for filename in Path(folder).rglob(f"traits_fisherpvals{postprocessing}.tsv")]
logging.info(f"Found {len(files)} files")
file_pattern = re.compile(r'analysis/IMPC/(\w+)/real/IMPC/([\w/]+)/(run_.+)/traits_fisherpvals(.*).tsv')
run_info_dicts = []
for file in files:
logging.info(f"Processing file {file}")
match = re.match(file_pattern, str(file))
if match:
run_info = {'method': match[1],
'dataset': match[2],
'run_id': match[3],
'postprocessing': match[4]}
try:
traits_pvals = pd.read_csv(str(file), header=0, index_col=0, sep="\t")
min_pvals_per_factor = traits_pvals.min(axis=0)
# For each threshold, the proportion of factors that are enriched for at least one trait
for threshold in [1, 0.1, 0.05, 0.01, 0.001, 0.0001, 0.00001]:
run_info[f"traits_factors_alpha {threshold}"] = (min_pvals_per_factor < threshold).mean()
except pd.errors.EmptyDataError as e:
logging.warning(f"Empty file: {file}")
except KeyError as e:
logging.warning(f"Required columns not found: {file}")
run_info_dicts.append(run_info)
else:
logging.warning(f"Failed to decode file name: {file}")
return pd.DataFrame(run_info_dicts)
def summarise_traits_f1scores_IMPC(folder, postprocessing='*'):
logging.info(f"Looking in folder {folder} for files of the form 'traits_f1scores{postprocessing}.tsv'")
files = [str(filename) for filename in Path(folder).rglob(f"traits_f1scores{postprocessing}.tsv")]
logging.info(f"Found {len(files)} files")
file_pattern = re.compile(r'analysis/IMPC/(\w+)/real/IMPC/([\w/]+)/(run_.+)/traits_f1scores(.*).tsv')
run_info_dicts = []
for file in files:
logging.info(f"Processing file {file}")
match = re.match(file_pattern, str(file))
if match:
run_info = {'method': match[1],
'dataset': match[2],
'run_id': match[3],
'postprocessing': match[4]}
try:
traits_f1scores = pd.read_csv(str(file), header=0, index_col=0, sep="\t")
# Mean (over factors) of the best F1 score that factor attains (across all traits)
run_info['traits_factors_mean_max_f1_score'] = traits_f1scores.max(axis=0).mean()
except pd.errors.EmptyDataError as e:
logging.warning(f"Empty file: {file}")
except KeyError as e:
logging.warning(f"Required columns not found: {file}")
run_info_dicts.append(run_info)
else:
logging.warning(f"Failed to decode file name: {file}")
return pd.DataFrame(run_info_dicts)
def summarise_ko_enrichment_summary_IMPC(folder, postprocessing='*'):
logging.info(f"Looking in folder {folder} for files of the form 'ko_enrichment_summary{postprocessing}.tsv'")
files = [str(filename) for filename in Path(folder).rglob(f"ko_enrichment_summary{postprocessing}.tsv")]
logging.info(f"Found {len(files)} files")
file_pattern = re.compile(r'analysis/IMPC/(\w+)/real/IMPC/([\w/]+)/(run_.+)/ko_enrichment_summary(.*).tsv')
run_info_dicts = []
for file in files:
logging.info(f"Processing file {file}")
match = re.match(file_pattern, str(file))
if match:
run_info = {'method': match[1],
'dataset': match[2],
'run_id': match[3],
'postprocessing': match[4]}
try:
ko_enrichment = pd.read_csv(str(file), sep="\t", header=0)
# Mean (over traits - only knockout genes) of the best F1 score obtained by any factor on that trait,
# also minimum pvalue
run_info['ko_traits_mean_f1_score'] = ko_enrichment['f1_score (trait)'].mean()
run_info['ko_traits_mean_min_pval'] = np.log10(ko_enrichment['min_pval']).mean()
# For the threshold 0.05, the mean of precision and recall, considering the set of pathways
# significantly enriched at that threshold as the set of predictions, and the set
# of pathways that contained the gene knocked out as successes
run_info['ko_traits_mean_precision_0.05'] = (ko_enrichment['alpha 0.05'] / ko_enrichment['all_pathways alpha 0.05']).mean()
run_info['ko_traits_mean_recall_0.05'] = (ko_enrichment['alpha 0.05'] / ko_enrichment['pathways']).mean()
for alpha_col in ko_enrichment.columns[ko_enrichment.columns.str.startswith('alpha')]:
# Mean recall, as above but for different thresholds
run_info[f"ko_traits_mean_recall_{alpha_col}"] = (ko_enrichment[alpha_col] / ko_enrichment['pathways']).mean()
# Proportion of traits (only ko genotype traits) that had at least one relevant pathway
# (i.e. one containing this knocked out gene) significant at this threshold
run_info[f"ko_traits_nz_{alpha_col}"] = (ko_enrichment[alpha_col] != 0).mean()
except pd.errors.EmptyDataError as e:
logging.warning(f"Empty file: {file}")
except KeyError as e:
logging.warning(f"Required columns not found: {file}")
run_info_dicts.append(run_info)
else:
logging.warning(f"Failed to decode file name: {file}")
return pd.DataFrame(run_info_dicts)
def summarise_factor_info_IMPC(folder, postprocessing='*'):
logging.info(f"Looking in folder {folder} for files of the form 'factor_info{postprocessing}.tsv'")
files = [str(filename) for filename in Path(folder).rglob(f"factor_info{postprocessing}.tsv")]
logging.info(f"Found {len(files)} files")
files = Path(folder).rglob(f"factor_info{postprocessing}.tsv")
file_pattern = re.compile(r'analysis/IMPC/(\w+)/real/IMPC/([\w/]+)/(run_.+)/factor_info(.*).tsv')
run_info_dicts = []
for file in files:
logging.info(f"Processing file {file}")
match = re.match(file_pattern, str(file))
if match:
run_info = {'method': match[1],
'dataset': match[2],
'run_id': match[3],
'postprocessing': match[4]}
try:
factor_info = pd.read_csv(str(file), sep="\t", index_col=0, header=0)
# Number of factors, mean number of genes and samples in factor,
# mean of genes*samples (over factors), which I'm calling number of cells
run_info['recovered_K'] = factor_info.shape[0]
run_info['mean_num_genes'] = factor_info['num_genes'].mean()
run_info['mean_num_samples'] = factor_info['num_samples'].mean()
run_info['mean_num_cells'] = (factor_info['num_samples'] * factor_info['num_genes']).mean()
# Mean (over factors) of the maximum (over other factors) Jaccard similarity
run_info['mean_redundancy_max'] = factor_info['redundancy_max'].mean()
# Mean (over factors) of the mean (over other factors) Jaccard similarity
run_info['mean_redundancy_mean'] = factor_info['redundancy_mean'].mean()
except pd.errors.EmptyDataError as e:
logging.warning(f"Empty file: {file}")
except KeyError as e:
logging.warning(f"Required columns not found: {file}")
run_info_dicts.append(run_info)
else:
logging.warning(f"Failed to decode file name: {file}")
return pd.DataFrame(run_info_dicts)
def get_number_unique_pathways_mdr(method_dataset_run_id, enrich_thresholds=[0.001, 0.01, 0.05]):
if 'Plaid' in method_dataset_run_id:
thresh = "0e+0"
else:
thresh = "1e-2"
pathway_pvals = pd.read_csv(f"analysis/IMPC/{method_dataset_run_id}/pathways_fisherpvals_thresh_{thresh}.tsv",
sep='\t',
index_col=0)
main_pathways = (pathway_pvals.values.argmin(axis=0))
results = {'method_dataset_run_id': method_dataset_run_id}
results["unique_best_pathways"] = len(set(main_pathways))
for threshold in enrich_thresholds:
results[f"pathways_{threshold}"] = sum(pathway_pvals.min(axis=1) < threshold)
return results
def get_number_unique_pathways(error_df_file):
error_df = pd.read_csv(error_df_file)
pathways_dicts = []
for mdr in error_df[error_df['run_complete']]['method_dataset_run_id'].unique():
try:
results = get_number_unique_pathways_mdr(mdr)
pathways_dicts.append(results)
except FileNotFoundError:
logging.warn(f"Skipping mdr {mdr}")
continue
return pd.DataFrame(pathways_dicts)
|
from src.dataToCode.languages.classToCode import ClassToCode
from src.dataToCode.dataClasses.classData import ClassData
from src.dataToCode.dataClasses.modifier import Modifier
from src.dataToCode.languages.ToJava.methodToJava import MethodToJava
from src.dataToCode.languages.ToJava.interfaceToJava import InterfaceToJava
from src.dataToCode.languages.ToJava.inheritanceToJava import InheritanceToJava
class ClassToJava(ClassToCode):
def __init__(self, class_data: ClassData):
self.class_data = class_data
self.method_to_code = MethodToJava(self.class_data.methods, False)
self.inheritance_to_code = InheritanceToJava(self.class_data.inheritances)
def convert(self) -> str:
return (f"import java.util.*;\n\n{self.__formatted_class_header()}\n"
f"{self.__formatted_fields()}"
f"{self.method_to_code.get_formatted_methods()}\n"
f"}}")
def __formatted_class_header(self):
return (f"{self.class_data.visibility.name} {self.class_data.modifier.value}"
f"{"" if self.class_data.modifier is Modifier.none else " "}"
f"class {self.class_data.name}{self.inheritance_to_code.get_formatted()}"
f"{InterfaceToJava.codeImplementedInterfaces(self.class_data.implementations)}"
f" {{\n")
def __formatted_fields(self):
if len(self.class_data.fields) > 0:
class_fields = [f"\t{fields.visibility.value} {fields.type_} {fields.name};"
for fields in self.class_data.fields]
return '\n'.join(class_fields) + \
("\n\n" if self.class_data.methods else "")
else:
return ""
| from src.dataToCode.languages.classToCode import ClassToCode
from src.dataToCode.dataClasses.classData import ClassData
from src.dataToCode.dataClasses.modifier import Modifier
from src.dataToCode.languages.ToJava.methodToJava import MethodToJava
from src.dataToCode.languages.ToJava.interfaceToJava import InterfaceToJava
from src.dataToCode.languages.ToJava.inheritanceToJava import InheritanceToJava
class ClassToJava(ClassToCode):
def __init__(self, class_data: ClassData):
self.class_data = class_data
self.method_to_code = MethodToJava(self.class_data.methods, False)
self.inheritance_to_code = InheritanceToJava(self.class_data.inheritances)
def convert(self) -> str:
return (f"import java.util.*;\n\n{self.__formatted_class_header()}\n"
f"{self.__formatted_fields()}"
f"{self.method_to_code.get_formatted_methods()}\n"
f"}}")
def __formatted_class_header(self):
return (f"{self.class_data.visibility.name} {self.class_data.modifier.value}"
f"{'' if self.class_data.modifier is Modifier.none else ' '}"
f"class {self.class_data.name}{self.inheritance_to_code.get_formatted()}"
f"{InterfaceToJava.codeImplementedInterfaces(self.class_data.implementations)}"
f" {{\n")
def __formatted_fields(self):
if len(self.class_data.fields) > 0:
class_fields = [f"\t{fields.visibility.value} {fields.type_} {fields.name};"
for fields in self.class_data.fields]
return '\n'.join(class_fields) + \
("\n\n" if self.class_data.methods else "")
else:
return ""
|
import io
import uuid
import pytz
import json
import logging
import pandas as pd
from constance import config
from django.db import models
from django.contrib.postgres.fields import ArrayField, JSONField
from django.core.validators import (
int_list_validator,
MinValueValidator,
)
from django.db.models.signals import pre_delete
from datetime import datetime
from backend.apps.core import clients
logger = logging.getLogger(__name__)
NOT_ATTRIBUTES_KEYS_SUBSCRIPTION = ["id", "type", "TimeInstant"]
class DataModel(models.Model):
"""Class which holds everything related to a Blackbox Anomaly Detection model."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=128, help_text="Model name")
is_training = models.BooleanField(
help_text="Wether the model is being trained or not", default=False,
)
trained = models.BooleanField(
help_text="Wether the model is trained or not", default=False
)
deployed = models.BooleanField(
help_text="Wether the model is deployed or not", default=False
)
date_trained = models.DateTimeField(
help_text="Date the model was trained", default=None, blank=True, null=True
)
date_deployed = models.DateTimeField(
help_text="Date the model was deployed", default=None, blank=True, null=True
)
num_predictions = models.IntegerField(
help_text="Number of predictions made by this model", default=0
)
task_status = models.CharField(
help_text="URL to get the progress of training process",
null=True,
blank=True,
max_length=512,
)
# sensors
plcs = JSONField()
contamination = models.FloatField(
help_text="Contamination fraction in the training dataset",
default=0.1,
validators=[MinValueValidator(0.0)],
null=True,
blank=True,
)
scaler = models.CharField(
help_text="The scaler used to scale the data before training and predicting",
default="minmax",
max_length=48,
null=True,
blank=True,
)
# PCA Mahalanobis
pca_mahalanobis = models.BooleanField(null=True, blank=True, default=False)
n_components = models.IntegerField(
help_text="Numbers of components for the PCA algorithm",
default=2,
validators=[MinValueValidator(1)],
null=True,
blank=True,
)
# Autoencoder
autoencoder = models.BooleanField(null=True, blank=True, default=False)
hidden_neurons = models.CharField(
help_text="Neural Network layers and the number of neurons in each layer",
validators=[
int_list_validator(
sep=",",
message="It should be a string with a list of integers separeted by a comma",
allow_negative=False,
)
],
default="32,16,16,32",
max_length=128,
null=True,
blank=True,
)
dropout_rate = models.FloatField(
help_text="Dropout rate across all the layers of the Neural Network",
default=0.2,
null=True,
blank=True,
)
activation = models.CharField(
help_text="Layers activation function of Neural Network",
choices=[
("elu", "elu"),
("softmax", "softmax"),
("selu", "selu"),
("softplus", "softplus"),
("softsign", "softsign"),
("relu", "relu"),
("tanh", "tanh"),
("sigmoid", "sigmoid"),
("hard_sigmoid", "hard_sigmoid"),
("exponential", "exponential"),
],
default="elu",
max_length=24,
null=True,
blank=True,
)
kernel_initializer = models.CharField(
help_text="Layers kernel initializer of Neural Network",
choices=[
("Zeros", "Zeros"),
("Ones", "Ones"),
("Constant", "Constant"),
("RandomNormal", "RandomNormal"),
("RandomUniform", "RandomUniform"),
("TruncatedNormal", "TruncatedNormal"),
("VarianceScaling", "VarianceScaling"),
("Orthogonal", "Orthogonal"),
("Identity", "Identity"),
("lecun_uniform", "lecun_uniform"),
("glorot_normal", "glorot_normal"),
("glorot_uniform", "glorot_uniform"),
("he_normal", "he_normal"),
("lecun_normal", "lecun_normal"),
("he_uniform", "he_uniform"),
],
default="glorot_uniform",
max_length=24,
null=True,
blank=True,
)
loss_function = models.CharField(
help_text="Loss function of the Neural Network",
default="mse",
max_length=24,
null=True,
blank=True,
)
optimizer = models.CharField(
help_text="Optimizer of Neural Network",
choices=[
("sgd", "sgd"),
("rmsprop", "rmsprop"),
("adagrad", "adagrad"),
("adadelta", "adadelta"),
("adam", "adam"),
("adamax", "adamax"),
("nadam", "nadam"),
],
default="adam",
max_length=24,
null=True,
blank=True,
)
epochs = models.IntegerField(
help_text="Number of times that all the batches will be processed in the "
" Neural Network",
default=100,
null=True,
blank=True,
)
batch_size = models.IntegerField(
help_text="Batch size", default=32, null=True, blank=True
)
validation_split = models.FloatField(
help_text="Percentage of the training data that will be used for purpouses in"
" the Neural Network",
default=0.05,
null=True,
blank=True,
)
early_stopping = models.BooleanField(
help_text="Stops the training process in the Neural Network when it's not"
" getting any improvement",
default=False,
null=True,
blank=True,
)
# K-Means
kmeans = models.BooleanField(null=True, blank=True, default=False)
n_clusters = models.IntegerField(
help_text="Number of clusters for the K-Means algorithm",
default=None,
null=True,
blank=True,
)
max_cluster_elbow = models.IntegerField(
help_text="Maximun number of cluster to test in the Elbow Method",
default=100,
null=True,
blank=True,
)
# One Class SVM
ocsvm = models.BooleanField(null=True, blank=True, default=False)
kernel = models.CharField(
help_text="Kernel type for One Class SVM",
choices=[
("linear", "linear"),
("poly", "poly"),
("rbf", "rbf"),
("sigmoid", "sigmoid"),
("precomputed", "precomputed"),
],
default="rbf",
max_length=24,
null=True,
blank=True,
)
degree = models.IntegerField(
help_text="Degree of the polynomal kernel function for One Class SVM",
default=3,
null=True,
blank=True,
)
gamma = models.CharField(
help_text="Kernel coefficient for 'rbf', 'poly' and 'sigmoid' in One Class SVM."
" It can 'scale', 'auto' or float",
default="scale",
max_length=24,
null=True,
blank=True,
)
coef0 = models.FloatField(
help_text="Independent term in kernel function for One Class SVM. Only "
"significant in 'poly'",
default=0.0,
null=True,
blank=True,
)
tol = models.FloatField(
help_text="Tolerance for stopping criterion for One Class SVM",
default=0.001,
null=True,
blank=True,
)
shrinking = models.BooleanField(
help_text="Whether to use the shrinking heuristic for One Class SVM",
default=True,
null=True,
blank=True,
)
cache_size = models.IntegerField(
help_text="Specify the size of the kernel cache in MB for One Class SVM",
default=200,
null=True,
blank=True,
)
# Gaussian Distribution
gaussian_distribution = models.BooleanField(null=True, blank=True, default=False)
epsilon_candidates = models.IntegerField(
help_text="Number of epsilon values that will be tested to find the best one",
default=100000000,
null=True,
blank=True,
)
# Isolation Forest
isolation_forest = models.BooleanField(null=True, blank=True, default=False)
n_estimators = models.IntegerField(
help_text="The number of base estimators in the ensemble for Isolation "
"Forest",
default=100,
null=True,
blank=True,
)
max_features = models.FloatField(
help_text="Number of features to draw from X to train each base estimator"
" for Isolation Forest",
default=1.0,
null=True,
blank=True,
)
bootstrap = models.BooleanField(
help_text="Indicates if the Bootstrap technique is going to be applied "
"for Isolation FOrest",
default=False,
null=True,
blank=True,
)
# Local Outlier Factor
lof = models.BooleanField(null=True, blank=True, default=False)
n_neighbors_lof = models.IntegerField(
help_text="Number of neighbors to use in LOF", default=20, null=True, blank=True
)
algorithm_lof = models.CharField(
help_text="Algorithm used to compute the nearest neighbors in LOF",
choices=[
("ball_tree", "ball_tree"),
("kd_tree", "kd_tree"),
("brute", "brute"),
("auto", "auto"),
],
default="auto",
max_length=24,
null=True,
blank=True,
)
leaf_size_lof = models.IntegerField(
help_text="Leaf size passed to BallTree or KDTree in LOF",
default=30,
null=True,
blank=True,
)
metric_lof = models.CharField(
help_text="The distance metric to use for the tree in LOF",
default="minkowski",
max_length=24,
null=True,
blank=True,
)
p_lof = models.IntegerField(
help_text="Paremeter of the Minkowski metric in LOF",
default=2,
null=True,
blank=True,
)
# K-Nearest Neighbors
knn = models.BooleanField(null=True, blank=True, default=False)
n_neighbors_knn = models.IntegerField(
help_text="Number of neighbors to use in KNN", default=5, null=True, blank=True
)
radius = models.FloatField(
help_text="The range of parameter space to use by default for "
"radius_neighbors",
default=1.0,
null=True,
blank=True,
)
algorithm_knn = models.CharField(
help_text="Algorithm used to compute the nearest neighbors in KNN",
choices=[
("ball_tree", "ball_tree"),
("kd_tree", "kd_tree"),
("brute", "brute"),
("auto", "auto"),
],
default="auto",
max_length=24,
null=True,
blank=True,
)
leaf_size_knn = models.IntegerField(
help_text="Leaf size passed to BallTree or KDTree in KNN",
default=30,
null=True,
blank=True,
)
metric_knn = models.CharField(
help_text="The distance metric to use for the tree in KNN",
default="minkowski",
max_length=24,
null=True,
blank=True,
)
p_knn = models.IntegerField(
help_text="Paremeter of the Minkowski metric in knn",
default=2,
null=True,
blank=True,
)
score_func = models.CharField(
help_text="The function used to score anomalies in KNN",
choices=[
("max_distance", "max_distance"),
("average", "average"),
("median", "median"),
],
default="max_distance",
max_length=24,
null=True,
blank=True,
)
# orion subscriptions
subscriptions = ArrayField(models.CharField(max_length=128), default=list)
# data from subscripitons
data_from_subscriptions = JSONField(default=dict)
dates = JSONField(default=dict)
# clients
blackbox_client = clients.BlackboxClient()
crate_client = clients.CrateClient()
orion_client = clients.OrionClient()
def create_blackbox(self):
"""Creates a Blackbox model in the Anomaly Detection API."""
self.blackbox_client.create_blackbox(self)
def get_models_columns(self):
"""Returns a dict containing two lists, one with the columns and the other
with the models
Returns:
dict or None: containing two lists.
"""
data = {"models": [], "columns": []}
if self.pca_mahalanobis:
data["models"].append("pca_mahalanobis")
if self.autoencoder:
data["models"].append("autoencoder")
if self.kmeans:
data["models"].append("kmeans")
if self.ocsvm:
data["models"].append("one_class_svm")
if self.gaussian_distribution:
data["models"].append("gaussian_distribution")
if self.isolation_forest:
data["models"].append("isolation_forest")
if self.lof:
data["models"].append("local_outlier_factor")
if self.knn:
data["models"].append("knearest_neighbors")
for sensors in self.plcs.values():
data["columns"] = data["columns"] + sensors
if data["models"] and data["columns"]:
return data
return None
def train(
self,
with_source: str,
n: int = None,
from_date: str = None,
to_date: str = None,
train_df=None,
) -> bool:
"""Trains the datamodel either with data from Crate or from a CSV
Args:
with_source (:obj:`str`): source of the training data. Valid choices are
'db' or 'csv'.
n (:obj:`int`): the number of rows to take from the database. Defaults to
None.
from_date (:obj:`str`): date from which the rows has to be taken. Defaults
to None.
to_date (:obj:`str`): date until which the rows has to be taken. Defaults to
None.
train_df (:obj:`pandas.core.frame.DataFrame`): the dataframe to perform the
training of the model. Defaults to None.
Returns:
bool: wether the process of training has been initiated or not.
"""
if not self.is_training:
if with_source == "db":
df = self.crate_client.get_data_from_plc(
self.plcs, n=n, from_date=from_date, to_date=to_date
)
# train with data from CSV
else:
df = train_df
if df is None:
return False
train_data_json = json.loads(df.to_json(orient="split"))
payload = self.to_json()
payload["columns"] = train_data_json["columns"]
payload["data"] = train_data_json["data"]
self.task_status = self.blackbox_client.train(self.id, payload)
self.is_training = True
self.trained = False
if self.deployed:
self.set_deployed()
self.save()
return True
return False
def to_json(self):
"""Gets the model as json format."""
json_ = {
"contamination": self.contamination,
"scaler": self.scaler,
"n_jobs": -1,
}
if self.pca_mahalanobis:
json_["pca_mahalanobis"] = {"n_components": self.n_components}
if self.autoencoder:
json_["autoencoder"] = {
"hidden_neurons": list(
map(lambda x: int(x), self.hidden_neurons.split(","))
),
"dropout_rate": self.dropout_rate,
"activation": self.activation,
"kernel_initializer": self.kernel_initializer,
"loss_function": self.loss_function,
"optimizer": self.optimizer,
"epochs": self.epochs,
"batch_size": self.batch_size,
"validation_split": self.validation_split,
"early_stopping": self.early_stopping,
}
if self.kmeans:
json_["kmeans"] = {"max_cluster_elbow": self.max_cluster_elbow}
if self.n_clusters:
json_["kmeans"]["n_clusters"] = self.n_clusters
if self.ocsvm:
json_["one_class_svm"] = {
"kernel": self.kernel,
"degree": self.degree,
"gamma": self.gamma,
"coef0": self.coef0,
"tol": self.tol,
"shrinking": self.shrinking,
"cache_size": self.cache_size,
}
if self.gaussian_distribution:
json_["gaussian_distribution"] = {
"epsilon_candidates": self.epsilon_candidates
}
if self.isolation_forest:
json_["isolation_forest"] = {
"n_estimators": self.n_estimators,
"max_features": self.max_features,
"bootstrap": self.bootstrap,
}
if self.knn:
json_["knearest_neighbors"] = {
"n_neighbors": self.n_neighbors_knn,
"radius": self.radius,
"algorithm": self.algorithm_knn,
"leaf_size": self.leaf_size_knn,
"metric": self.metric_knn,
"p": self.p_knn,
"score_func": self.score_func,
}
if self.lof:
json_["local_outlier_factor"] = {
"n_neighbors": self.n_neighbors_lof,
"algorithm": self.algorithm_lof,
"leaf_size": self.leaf_size_knn,
"metric": self.metric_knn,
"p": self.p_knn,
}
return json_
def set_trained(self):
"""Sets the datamodel to the trained state."""
logger.info(f"Setting datamodel with id {self.id} to trained!")
self.is_training = False
self.trained = True
self.date_trained = datetime.now(tz=pytz.UTC)
self.save()
def set_deployed(self):
"""Sets the datamodel to the deployed state."""
self.deployed = not self.deployed
if self.deployed:
self.date_deployed = datetime.now(tz=pytz.UTC)
# create subscriptions in OCB
notification_url = (
f"http://{config.SERVER_IP}/api/v1/datamodels/{self.id}/predict/"
)
subscriptions = []
data_from_subscriptions = {}
for (plc, sensors) in self.plcs.items():
subscription = self.orion_client.create_subscription(
url=notification_url, pattern=plc, conditions=sensors, throttling=5
)
subscriptions.append(subscription)
data_from_subscriptions[plc] = {}
self.subscriptions = subscriptions
self.data_from_subscriptions = data_from_subscriptions
else:
self.date_deployed = None
# remove subscriptions in OCB
self.orion_client.delete_subscriptions(self.subscriptions)
self.subscriptions = []
self.save()
def check_csv_columns(self, file, index_column: str = None) -> bool:
"""Checks if a CSV has all the columns necessary to train this datamodel.
Args:
file (django.core.files.uploadedfile.TemporaryUploadedFile): training file.
index_column (:obj:`str`): the name of the index column if there is one.
Defaults to None.
Returns:
tuple: containing a bool which indicates if the CSV is valid. The second
value is a dataframe in the case that CSV was valid or None if not.
"""
if index_column:
df = pd.read_csv(
io.StringIO(file.read().decode("UTF-8")), index_col=index_column
)
else:
df = pd.read_csv(io.StringIO(file.read().decode("UTF-8")))
# get the columns that should be in the csv
columns_that_should_be_in_csv = []
for columns in self.plcs.values():
for column in columns:
columns_that_should_be_in_csv.append(column)
columns_csv = list(df.columns)
if all(
column in columns_csv for column in columns_that_should_be_in_csv
) and all(column in columns_that_should_be_in_csv for column in columns_csv):
return True, df
return False, None
def _all_data_from_subscriptions_received(self) -> bool:
"""Checks if data from all subscriptions has been received
Returns:
bool: weather if all data has been received.
"""
return all(
[data_sub != {} for data_sub in self.data_from_subscriptions.values()]
)
def _create_prediction_df(self):
"""Creates a dataframe which contains data from Orion subscriptions to make a
prediction.
Returns:
pandas.core.frame.DataFrame: dataframe with data from subscriptions.
"""
dfs = []
data_from_subscriptions = {}
for (plc, data_sub) in self.data_from_subscriptions.items():
df = pd.DataFrame(data=data_sub["rows"], columns=data_sub["columns"])
dfs.append(df)
data_from_subscriptions[plc] = {}
self.data_from_subscriptions = data_from_subscriptions
df = pd.concat(dfs, axis=1)
return df
def set_subscription_data_and_predict(self, data: dict):
"""Sets subscription data and once it has received the data from all the
subscriptions, it sends them to the Anomaly Detection API to generate a new
prediction.
Args:
data (:obj:`str`): data from a subscription in OCB entity form.
"""
entity_id = data["id"]
# Get the attributes data of the subscription
sub_data = {"rows": [[]], "columns": []}
for key in data.keys():
if key not in NOT_ATTRIBUTES_KEYS_SUBSCRIPTION:
sub_data["rows"][0].append(data[key]["value"])
sub_data["columns"].append(key)
# save the data from this subscription
if self.data_from_subscriptions[entity_id] == {}:
logger.info(
f"Received data from {entity_id} for datamodel {self.id}. Columns: {sub_data["columns"]}"
)
# Save the time instant when the value of the sensors were updated
for column in sub_data["columns"]:
self.dates[column] = data["TimeInstant"]["value"]
self.data_from_subscriptions[entity_id] = sub_data
if self._all_data_from_subscriptions_received():
logger.info(
f"All data received for datamodel {self.id}. Sending to Anomaly Backend..."
)
df = self._create_prediction_df()
payload = json.loads(df.to_json(orient="split"))
prediction = DataModelPrediction(
datamodel=self, data=payload.copy(), dates=self.dates
)
payload["id"] = str(prediction.id)
prediction.task_status = self.blackbox_client.predict(self.id, payload)
prediction.save()
prediction.send_notification()
self.save()
def send_prediction_to_orion(self, predictions: dict):
"""Sends the predictions received from the Anomaly Detection API to the Orion
Context Broker.
Args:
predictions (:obj:`dict`): predictions made by the Anomaly Detection API.
"""
prediction = DataModelPrediction.objects.get(
datamodel=self, id=predictions["id"]
)
logger.debug(f"Prediction is: {prediction}")
entity_id = f"urn:ngsi-ld:AnomalyPrediction:{self.id}"
entity_type = "AnomalyPrediction"
predictions_to_orion = {}
for (key, value) in predictions.items():
predictions_to_orion[key] = value[0]
attrs = {
"name": {"type": "String", "value": self.name},
"entities": {"type": "Object", "value": self.plcs},
"date": {"type": "DateTime", "value": datetime.now().isoformat()},
"predictions": {"type": "Object", "value": predictions_to_orion},
}
self.orion_client.create_entity(entity_id, entity_type, attrs)
self.num_predictions += 1
self.save()
def set_prediction_results(self, data: dict):
"""Set the results of the prediction received by the Anomaly Detection API.
Args:
data (:obj:`dict`): a dictionary containing the predictions and the ID of
the prediction.
"""
prediction = DataModelPrediction.objects.get(pk=data["id"])
prediction.predictions = {
key: value[0] for (key, value) in data.items() if key != "id"
}
prediction.predictions_received_on = datetime.now(tz=pytz.UTC)
prediction.save()
self.num_predictions += 1
self.save()
prediction.send_to_orion()
prediction.send_notification()
def get_task_status(self):
"""Gets the status of a task in the Anomaly Detection API."""
return self.blackbox_client.get_task_status(self.task_status)
def pre_delete_datamodel_handler(sender, instance, **kwargs):
"""Handles the signal post delete of a model `DataModel` requesting Anomaly
Detection to delete a Blackbox model
Args:
sender (backend.apps.models.DataModel): the datamodel just deleted.
"""
instance.blackbox_client.delete_blackbox(instance)
pre_delete.connect(pre_delete_datamodel_handler, sender=DataModel)
class DataModelPrediction(models.Model):
"""Class which holds data of a prediction made by a `DataModel`."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
datamodel = models.ForeignKey(DataModel, on_delete=models.CASCADE)
data = JSONField(help_text="The data to be predicted")
dates = JSONField(help_text="When the date to be predicted was created")
predictions = JSONField(help_text="The predictions", default=dict)
task_status = models.CharField(
help_text="URL to get the progress of predicting process",
null=True,
blank=True,
max_length=512,
)
ack = models.BooleanField(
help_text="Wether the prediction has been acknowledged", default=False
)
user_ack = models.CharField(
help_text="The name of the user who acknowledged the prediction",
max_length=128,
blank=True,
null=True,
)
created_on = models.DateTimeField(
help_text="When the prediction was created", auto_now_add=True
)
predictions_received_on = models.DateTimeField(
help_text="When the predictions where received",
default=None,
null=True,
blank=True,
)
orion_client = clients.OrionClient()
notification_client = clients.NotificationClient()
def send_to_orion(self):
"""Sends the prediction to the Orion Context Broker."""
entity_id = f"urn:ngsi-ld:AnomalyPrediction:{self.id}"
entity_type = "AnomalyPrediction"
attrs = {
"datamodel_id": {"type": "String", "value": str(self.datamodel.id)},
"datamodel_name": {"type": "String", "value": self.datamodel.name},
"data": {
"type": "Object",
"value": {
column: value
for (column, value) in zip(
self.data["columns"], self.data["data"][0]
)
},
},
"dates": {"type": "Object", "value": self.dates},
"predictions": {"type": "Object", "value": self.predictions},
}
self.orion_client.create_entity(entity_id, entity_type, attrs)
def send_notification(self):
"""Sends the prediction to the Notification Backend."""
self.notification_client.send_prediction(self.to_dict(["_state"]))
def to_dict(self, exclude: list = None):
"""Serialize the class into a dict.
Args:
exclude(:obj:`list`): a list of str containing the keys to exclude.
Returns:
dict: the DataModelPrediction data.
"""
to_exclude = exclude
if to_exclude is None:
to_exclude = []
data = {}
for (key, value) in self.__dict__.items():
if key not in to_exclude:
if type(value) is uuid.UUID:
data[key] = str(value)
elif type(value) is datetime:
data[key] = value.isoformat()
else:
data[key] = value
return data
def set_ack(self, user: str):
"""Sets the ACK for the prediction.
Args:
user (:obj:`str`): the user who sent the ACK.
"""
self.ack = True
self.user_ack = user
self.save()
logger.info(f"DataModel Prediction with {self.id} ACKed by {user}.")
class TrainFile(models.Model):
datamodel = models.ForeignKey(DataModel, on_delete=models.CASCADE)
file = models.FileField(
blank=False,
null=False,
help_text="A CSV training file containing the columns of the DataModel",
)
index_column = models.CharField(max_length=128, blank=True, null=True)
uploaded_at = models.DateTimeField(auto_now_add=True)
class Meta:
get_latest_by = "uploaded_at"
| import io
import uuid
import pytz
import json
import logging
import pandas as pd
from constance import config
from django.db import models
from django.contrib.postgres.fields import ArrayField, JSONField
from django.core.validators import (
int_list_validator,
MinValueValidator,
)
from django.db.models.signals import pre_delete
from datetime import datetime
from backend.apps.core import clients
logger = logging.getLogger(__name__)
NOT_ATTRIBUTES_KEYS_SUBSCRIPTION = ["id", "type", "TimeInstant"]
class DataModel(models.Model):
"""Class which holds everything related to a Blackbox Anomaly Detection model."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=128, help_text="Model name")
is_training = models.BooleanField(
help_text="Wether the model is being trained or not", default=False,
)
trained = models.BooleanField(
help_text="Wether the model is trained or not", default=False
)
deployed = models.BooleanField(
help_text="Wether the model is deployed or not", default=False
)
date_trained = models.DateTimeField(
help_text="Date the model was trained", default=None, blank=True, null=True
)
date_deployed = models.DateTimeField(
help_text="Date the model was deployed", default=None, blank=True, null=True
)
num_predictions = models.IntegerField(
help_text="Number of predictions made by this model", default=0
)
task_status = models.CharField(
help_text="URL to get the progress of training process",
null=True,
blank=True,
max_length=512,
)
# sensors
plcs = JSONField()
contamination = models.FloatField(
help_text="Contamination fraction in the training dataset",
default=0.1,
validators=[MinValueValidator(0.0)],
null=True,
blank=True,
)
scaler = models.CharField(
help_text="The scaler used to scale the data before training and predicting",
default="minmax",
max_length=48,
null=True,
blank=True,
)
# PCA Mahalanobis
pca_mahalanobis = models.BooleanField(null=True, blank=True, default=False)
n_components = models.IntegerField(
help_text="Numbers of components for the PCA algorithm",
default=2,
validators=[MinValueValidator(1)],
null=True,
blank=True,
)
# Autoencoder
autoencoder = models.BooleanField(null=True, blank=True, default=False)
hidden_neurons = models.CharField(
help_text="Neural Network layers and the number of neurons in each layer",
validators=[
int_list_validator(
sep=",",
message="It should be a string with a list of integers separeted by a comma",
allow_negative=False,
)
],
default="32,16,16,32",
max_length=128,
null=True,
blank=True,
)
dropout_rate = models.FloatField(
help_text="Dropout rate across all the layers of the Neural Network",
default=0.2,
null=True,
blank=True,
)
activation = models.CharField(
help_text="Layers activation function of Neural Network",
choices=[
("elu", "elu"),
("softmax", "softmax"),
("selu", "selu"),
("softplus", "softplus"),
("softsign", "softsign"),
("relu", "relu"),
("tanh", "tanh"),
("sigmoid", "sigmoid"),
("hard_sigmoid", "hard_sigmoid"),
("exponential", "exponential"),
],
default="elu",
max_length=24,
null=True,
blank=True,
)
kernel_initializer = models.CharField(
help_text="Layers kernel initializer of Neural Network",
choices=[
("Zeros", "Zeros"),
("Ones", "Ones"),
("Constant", "Constant"),
("RandomNormal", "RandomNormal"),
("RandomUniform", "RandomUniform"),
("TruncatedNormal", "TruncatedNormal"),
("VarianceScaling", "VarianceScaling"),
("Orthogonal", "Orthogonal"),
("Identity", "Identity"),
("lecun_uniform", "lecun_uniform"),
("glorot_normal", "glorot_normal"),
("glorot_uniform", "glorot_uniform"),
("he_normal", "he_normal"),
("lecun_normal", "lecun_normal"),
("he_uniform", "he_uniform"),
],
default="glorot_uniform",
max_length=24,
null=True,
blank=True,
)
loss_function = models.CharField(
help_text="Loss function of the Neural Network",
default="mse",
max_length=24,
null=True,
blank=True,
)
optimizer = models.CharField(
help_text="Optimizer of Neural Network",
choices=[
("sgd", "sgd"),
("rmsprop", "rmsprop"),
("adagrad", "adagrad"),
("adadelta", "adadelta"),
("adam", "adam"),
("adamax", "adamax"),
("nadam", "nadam"),
],
default="adam",
max_length=24,
null=True,
blank=True,
)
epochs = models.IntegerField(
help_text="Number of times that all the batches will be processed in the "
" Neural Network",
default=100,
null=True,
blank=True,
)
batch_size = models.IntegerField(
help_text="Batch size", default=32, null=True, blank=True
)
validation_split = models.FloatField(
help_text="Percentage of the training data that will be used for purpouses in"
" the Neural Network",
default=0.05,
null=True,
blank=True,
)
early_stopping = models.BooleanField(
help_text="Stops the training process in the Neural Network when it's not"
" getting any improvement",
default=False,
null=True,
blank=True,
)
# K-Means
kmeans = models.BooleanField(null=True, blank=True, default=False)
n_clusters = models.IntegerField(
help_text="Number of clusters for the K-Means algorithm",
default=None,
null=True,
blank=True,
)
max_cluster_elbow = models.IntegerField(
help_text="Maximun number of cluster to test in the Elbow Method",
default=100,
null=True,
blank=True,
)
# One Class SVM
ocsvm = models.BooleanField(null=True, blank=True, default=False)
kernel = models.CharField(
help_text="Kernel type for One Class SVM",
choices=[
("linear", "linear"),
("poly", "poly"),
("rbf", "rbf"),
("sigmoid", "sigmoid"),
("precomputed", "precomputed"),
],
default="rbf",
max_length=24,
null=True,
blank=True,
)
degree = models.IntegerField(
help_text="Degree of the polynomal kernel function for One Class SVM",
default=3,
null=True,
blank=True,
)
gamma = models.CharField(
help_text="Kernel coefficient for 'rbf', 'poly' and 'sigmoid' in One Class SVM."
" It can 'scale', 'auto' or float",
default="scale",
max_length=24,
null=True,
blank=True,
)
coef0 = models.FloatField(
help_text="Independent term in kernel function for One Class SVM. Only "
"significant in 'poly'",
default=0.0,
null=True,
blank=True,
)
tol = models.FloatField(
help_text="Tolerance for stopping criterion for One Class SVM",
default=0.001,
null=True,
blank=True,
)
shrinking = models.BooleanField(
help_text="Whether to use the shrinking heuristic for One Class SVM",
default=True,
null=True,
blank=True,
)
cache_size = models.IntegerField(
help_text="Specify the size of the kernel cache in MB for One Class SVM",
default=200,
null=True,
blank=True,
)
# Gaussian Distribution
gaussian_distribution = models.BooleanField(null=True, blank=True, default=False)
epsilon_candidates = models.IntegerField(
help_text="Number of epsilon values that will be tested to find the best one",
default=100000000,
null=True,
blank=True,
)
# Isolation Forest
isolation_forest = models.BooleanField(null=True, blank=True, default=False)
n_estimators = models.IntegerField(
help_text="The number of base estimators in the ensemble for Isolation "
"Forest",
default=100,
null=True,
blank=True,
)
max_features = models.FloatField(
help_text="Number of features to draw from X to train each base estimator"
" for Isolation Forest",
default=1.0,
null=True,
blank=True,
)
bootstrap = models.BooleanField(
help_text="Indicates if the Bootstrap technique is going to be applied "
"for Isolation FOrest",
default=False,
null=True,
blank=True,
)
# Local Outlier Factor
lof = models.BooleanField(null=True, blank=True, default=False)
n_neighbors_lof = models.IntegerField(
help_text="Number of neighbors to use in LOF", default=20, null=True, blank=True
)
algorithm_lof = models.CharField(
help_text="Algorithm used to compute the nearest neighbors in LOF",
choices=[
("ball_tree", "ball_tree"),
("kd_tree", "kd_tree"),
("brute", "brute"),
("auto", "auto"),
],
default="auto",
max_length=24,
null=True,
blank=True,
)
leaf_size_lof = models.IntegerField(
help_text="Leaf size passed to BallTree or KDTree in LOF",
default=30,
null=True,
blank=True,
)
metric_lof = models.CharField(
help_text="The distance metric to use for the tree in LOF",
default="minkowski",
max_length=24,
null=True,
blank=True,
)
p_lof = models.IntegerField(
help_text="Paremeter of the Minkowski metric in LOF",
default=2,
null=True,
blank=True,
)
# K-Nearest Neighbors
knn = models.BooleanField(null=True, blank=True, default=False)
n_neighbors_knn = models.IntegerField(
help_text="Number of neighbors to use in KNN", default=5, null=True, blank=True
)
radius = models.FloatField(
help_text="The range of parameter space to use by default for "
"radius_neighbors",
default=1.0,
null=True,
blank=True,
)
algorithm_knn = models.CharField(
help_text="Algorithm used to compute the nearest neighbors in KNN",
choices=[
("ball_tree", "ball_tree"),
("kd_tree", "kd_tree"),
("brute", "brute"),
("auto", "auto"),
],
default="auto",
max_length=24,
null=True,
blank=True,
)
leaf_size_knn = models.IntegerField(
help_text="Leaf size passed to BallTree or KDTree in KNN",
default=30,
null=True,
blank=True,
)
metric_knn = models.CharField(
help_text="The distance metric to use for the tree in KNN",
default="minkowski",
max_length=24,
null=True,
blank=True,
)
p_knn = models.IntegerField(
help_text="Paremeter of the Minkowski metric in knn",
default=2,
null=True,
blank=True,
)
score_func = models.CharField(
help_text="The function used to score anomalies in KNN",
choices=[
("max_distance", "max_distance"),
("average", "average"),
("median", "median"),
],
default="max_distance",
max_length=24,
null=True,
blank=True,
)
# orion subscriptions
subscriptions = ArrayField(models.CharField(max_length=128), default=list)
# data from subscripitons
data_from_subscriptions = JSONField(default=dict)
dates = JSONField(default=dict)
# clients
blackbox_client = clients.BlackboxClient()
crate_client = clients.CrateClient()
orion_client = clients.OrionClient()
def create_blackbox(self):
"""Creates a Blackbox model in the Anomaly Detection API."""
self.blackbox_client.create_blackbox(self)
def get_models_columns(self):
"""Returns a dict containing two lists, one with the columns and the other
with the models
Returns:
dict or None: containing two lists.
"""
data = {"models": [], "columns": []}
if self.pca_mahalanobis:
data["models"].append("pca_mahalanobis")
if self.autoencoder:
data["models"].append("autoencoder")
if self.kmeans:
data["models"].append("kmeans")
if self.ocsvm:
data["models"].append("one_class_svm")
if self.gaussian_distribution:
data["models"].append("gaussian_distribution")
if self.isolation_forest:
data["models"].append("isolation_forest")
if self.lof:
data["models"].append("local_outlier_factor")
if self.knn:
data["models"].append("knearest_neighbors")
for sensors in self.plcs.values():
data["columns"] = data["columns"] + sensors
if data["models"] and data["columns"]:
return data
return None
def train(
self,
with_source: str,
n: int = None,
from_date: str = None,
to_date: str = None,
train_df=None,
) -> bool:
"""Trains the datamodel either with data from Crate or from a CSV
Args:
with_source (:obj:`str`): source of the training data. Valid choices are
'db' or 'csv'.
n (:obj:`int`): the number of rows to take from the database. Defaults to
None.
from_date (:obj:`str`): date from which the rows has to be taken. Defaults
to None.
to_date (:obj:`str`): date until which the rows has to be taken. Defaults to
None.
train_df (:obj:`pandas.core.frame.DataFrame`): the dataframe to perform the
training of the model. Defaults to None.
Returns:
bool: wether the process of training has been initiated or not.
"""
if not self.is_training:
if with_source == "db":
df = self.crate_client.get_data_from_plc(
self.plcs, n=n, from_date=from_date, to_date=to_date
)
# train with data from CSV
else:
df = train_df
if df is None:
return False
train_data_json = json.loads(df.to_json(orient="split"))
payload = self.to_json()
payload["columns"] = train_data_json["columns"]
payload["data"] = train_data_json["data"]
self.task_status = self.blackbox_client.train(self.id, payload)
self.is_training = True
self.trained = False
if self.deployed:
self.set_deployed()
self.save()
return True
return False
def to_json(self):
"""Gets the model as json format."""
json_ = {
"contamination": self.contamination,
"scaler": self.scaler,
"n_jobs": -1,
}
if self.pca_mahalanobis:
json_["pca_mahalanobis"] = {"n_components": self.n_components}
if self.autoencoder:
json_["autoencoder"] = {
"hidden_neurons": list(
map(lambda x: int(x), self.hidden_neurons.split(","))
),
"dropout_rate": self.dropout_rate,
"activation": self.activation,
"kernel_initializer": self.kernel_initializer,
"loss_function": self.loss_function,
"optimizer": self.optimizer,
"epochs": self.epochs,
"batch_size": self.batch_size,
"validation_split": self.validation_split,
"early_stopping": self.early_stopping,
}
if self.kmeans:
json_["kmeans"] = {"max_cluster_elbow": self.max_cluster_elbow}
if self.n_clusters:
json_["kmeans"]["n_clusters"] = self.n_clusters
if self.ocsvm:
json_["one_class_svm"] = {
"kernel": self.kernel,
"degree": self.degree,
"gamma": self.gamma,
"coef0": self.coef0,
"tol": self.tol,
"shrinking": self.shrinking,
"cache_size": self.cache_size,
}
if self.gaussian_distribution:
json_["gaussian_distribution"] = {
"epsilon_candidates": self.epsilon_candidates
}
if self.isolation_forest:
json_["isolation_forest"] = {
"n_estimators": self.n_estimators,
"max_features": self.max_features,
"bootstrap": self.bootstrap,
}
if self.knn:
json_["knearest_neighbors"] = {
"n_neighbors": self.n_neighbors_knn,
"radius": self.radius,
"algorithm": self.algorithm_knn,
"leaf_size": self.leaf_size_knn,
"metric": self.metric_knn,
"p": self.p_knn,
"score_func": self.score_func,
}
if self.lof:
json_["local_outlier_factor"] = {
"n_neighbors": self.n_neighbors_lof,
"algorithm": self.algorithm_lof,
"leaf_size": self.leaf_size_knn,
"metric": self.metric_knn,
"p": self.p_knn,
}
return json_
def set_trained(self):
"""Sets the datamodel to the trained state."""
logger.info(f"Setting datamodel with id {self.id} to trained!")
self.is_training = False
self.trained = True
self.date_trained = datetime.now(tz=pytz.UTC)
self.save()
def set_deployed(self):
"""Sets the datamodel to the deployed state."""
self.deployed = not self.deployed
if self.deployed:
self.date_deployed = datetime.now(tz=pytz.UTC)
# create subscriptions in OCB
notification_url = (
f"http://{config.SERVER_IP}/api/v1/datamodels/{self.id}/predict/"
)
subscriptions = []
data_from_subscriptions = {}
for (plc, sensors) in self.plcs.items():
subscription = self.orion_client.create_subscription(
url=notification_url, pattern=plc, conditions=sensors, throttling=5
)
subscriptions.append(subscription)
data_from_subscriptions[plc] = {}
self.subscriptions = subscriptions
self.data_from_subscriptions = data_from_subscriptions
else:
self.date_deployed = None
# remove subscriptions in OCB
self.orion_client.delete_subscriptions(self.subscriptions)
self.subscriptions = []
self.save()
def check_csv_columns(self, file, index_column: str = None) -> bool:
"""Checks if a CSV has all the columns necessary to train this datamodel.
Args:
file (django.core.files.uploadedfile.TemporaryUploadedFile): training file.
index_column (:obj:`str`): the name of the index column if there is one.
Defaults to None.
Returns:
tuple: containing a bool which indicates if the CSV is valid. The second
value is a dataframe in the case that CSV was valid or None if not.
"""
if index_column:
df = pd.read_csv(
io.StringIO(file.read().decode("UTF-8")), index_col=index_column
)
else:
df = pd.read_csv(io.StringIO(file.read().decode("UTF-8")))
# get the columns that should be in the csv
columns_that_should_be_in_csv = []
for columns in self.plcs.values():
for column in columns:
columns_that_should_be_in_csv.append(column)
columns_csv = list(df.columns)
if all(
column in columns_csv for column in columns_that_should_be_in_csv
) and all(column in columns_that_should_be_in_csv for column in columns_csv):
return True, df
return False, None
def _all_data_from_subscriptions_received(self) -> bool:
"""Checks if data from all subscriptions has been received
Returns:
bool: weather if all data has been received.
"""
return all(
[data_sub != {} for data_sub in self.data_from_subscriptions.values()]
)
def _create_prediction_df(self):
"""Creates a dataframe which contains data from Orion subscriptions to make a
prediction.
Returns:
pandas.core.frame.DataFrame: dataframe with data from subscriptions.
"""
dfs = []
data_from_subscriptions = {}
for (plc, data_sub) in self.data_from_subscriptions.items():
df = pd.DataFrame(data=data_sub["rows"], columns=data_sub["columns"])
dfs.append(df)
data_from_subscriptions[plc] = {}
self.data_from_subscriptions = data_from_subscriptions
df = pd.concat(dfs, axis=1)
return df
def set_subscription_data_and_predict(self, data: dict):
"""Sets subscription data and once it has received the data from all the
subscriptions, it sends them to the Anomaly Detection API to generate a new
prediction.
Args:
data (:obj:`str`): data from a subscription in OCB entity form.
"""
entity_id = data["id"]
# Get the attributes data of the subscription
sub_data = {"rows": [[]], "columns": []}
for key in data.keys():
if key not in NOT_ATTRIBUTES_KEYS_SUBSCRIPTION:
sub_data["rows"][0].append(data[key]["value"])
sub_data["columns"].append(key)
# save the data from this subscription
if self.data_from_subscriptions[entity_id] == {}:
logger.info(
f"Received data from {entity_id} for datamodel {self.id}. Columns: {sub_data['columns']}"
)
# Save the time instant when the value of the sensors were updated
for column in sub_data["columns"]:
self.dates[column] = data["TimeInstant"]["value"]
self.data_from_subscriptions[entity_id] = sub_data
if self._all_data_from_subscriptions_received():
logger.info(
f"All data received for datamodel {self.id}. Sending to Anomaly Backend..."
)
df = self._create_prediction_df()
payload = json.loads(df.to_json(orient="split"))
prediction = DataModelPrediction(
datamodel=self, data=payload.copy(), dates=self.dates
)
payload["id"] = str(prediction.id)
prediction.task_status = self.blackbox_client.predict(self.id, payload)
prediction.save()
prediction.send_notification()
self.save()
def send_prediction_to_orion(self, predictions: dict):
"""Sends the predictions received from the Anomaly Detection API to the Orion
Context Broker.
Args:
predictions (:obj:`dict`): predictions made by the Anomaly Detection API.
"""
prediction = DataModelPrediction.objects.get(
datamodel=self, id=predictions["id"]
)
logger.debug(f"Prediction is: {prediction}")
entity_id = f"urn:ngsi-ld:AnomalyPrediction:{self.id}"
entity_type = "AnomalyPrediction"
predictions_to_orion = {}
for (key, value) in predictions.items():
predictions_to_orion[key] = value[0]
attrs = {
"name": {"type": "String", "value": self.name},
"entities": {"type": "Object", "value": self.plcs},
"date": {"type": "DateTime", "value": datetime.now().isoformat()},
"predictions": {"type": "Object", "value": predictions_to_orion},
}
self.orion_client.create_entity(entity_id, entity_type, attrs)
self.num_predictions += 1
self.save()
def set_prediction_results(self, data: dict):
"""Set the results of the prediction received by the Anomaly Detection API.
Args:
data (:obj:`dict`): a dictionary containing the predictions and the ID of
the prediction.
"""
prediction = DataModelPrediction.objects.get(pk=data["id"])
prediction.predictions = {
key: value[0] for (key, value) in data.items() if key != "id"
}
prediction.predictions_received_on = datetime.now(tz=pytz.UTC)
prediction.save()
self.num_predictions += 1
self.save()
prediction.send_to_orion()
prediction.send_notification()
def get_task_status(self):
"""Gets the status of a task in the Anomaly Detection API."""
return self.blackbox_client.get_task_status(self.task_status)
def pre_delete_datamodel_handler(sender, instance, **kwargs):
"""Handles the signal post delete of a model `DataModel` requesting Anomaly
Detection to delete a Blackbox model
Args:
sender (backend.apps.models.DataModel): the datamodel just deleted.
"""
instance.blackbox_client.delete_blackbox(instance)
pre_delete.connect(pre_delete_datamodel_handler, sender=DataModel)
class DataModelPrediction(models.Model):
"""Class which holds data of a prediction made by a `DataModel`."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
datamodel = models.ForeignKey(DataModel, on_delete=models.CASCADE)
data = JSONField(help_text="The data to be predicted")
dates = JSONField(help_text="When the date to be predicted was created")
predictions = JSONField(help_text="The predictions", default=dict)
task_status = models.CharField(
help_text="URL to get the progress of predicting process",
null=True,
blank=True,
max_length=512,
)
ack = models.BooleanField(
help_text="Wether the prediction has been acknowledged", default=False
)
user_ack = models.CharField(
help_text="The name of the user who acknowledged the prediction",
max_length=128,
blank=True,
null=True,
)
created_on = models.DateTimeField(
help_text="When the prediction was created", auto_now_add=True
)
predictions_received_on = models.DateTimeField(
help_text="When the predictions where received",
default=None,
null=True,
blank=True,
)
orion_client = clients.OrionClient()
notification_client = clients.NotificationClient()
def send_to_orion(self):
"""Sends the prediction to the Orion Context Broker."""
entity_id = f"urn:ngsi-ld:AnomalyPrediction:{self.id}"
entity_type = "AnomalyPrediction"
attrs = {
"datamodel_id": {"type": "String", "value": str(self.datamodel.id)},
"datamodel_name": {"type": "String", "value": self.datamodel.name},
"data": {
"type": "Object",
"value": {
column: value
for (column, value) in zip(
self.data["columns"], self.data["data"][0]
)
},
},
"dates": {"type": "Object", "value": self.dates},
"predictions": {"type": "Object", "value": self.predictions},
}
self.orion_client.create_entity(entity_id, entity_type, attrs)
def send_notification(self):
"""Sends the prediction to the Notification Backend."""
self.notification_client.send_prediction(self.to_dict(["_state"]))
def to_dict(self, exclude: list = None):
"""Serialize the class into a dict.
Args:
exclude(:obj:`list`): a list of str containing the keys to exclude.
Returns:
dict: the DataModelPrediction data.
"""
to_exclude = exclude
if to_exclude is None:
to_exclude = []
data = {}
for (key, value) in self.__dict__.items():
if key not in to_exclude:
if type(value) is uuid.UUID:
data[key] = str(value)
elif type(value) is datetime:
data[key] = value.isoformat()
else:
data[key] = value
return data
def set_ack(self, user: str):
"""Sets the ACK for the prediction.
Args:
user (:obj:`str`): the user who sent the ACK.
"""
self.ack = True
self.user_ack = user
self.save()
logger.info(f"DataModel Prediction with {self.id} ACKed by {user}.")
class TrainFile(models.Model):
datamodel = models.ForeignKey(DataModel, on_delete=models.CASCADE)
file = models.FileField(
blank=False,
null=False,
help_text="A CSV training file containing the columns of the DataModel",
)
index_column = models.CharField(max_length=128, blank=True, null=True)
uploaded_at = models.DateTimeField(auto_now_add=True)
class Meta:
get_latest_by = "uploaded_at"
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Lin To and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class PurchaseInvoice(Document):
def validate(self):
self.validate_account_types()
self.validate_item_quantities()
def before_save(self):
self.set_item_entry_values() # Value Per Unit
self.set_item_entry_cost()
self.set_invoice_cost()
def on_submit(self):
self.add_items_to_inventory()
self.add_ledger_entries()
def on_cancel(self):
if self.docstatus == 0:
return
self.remove_items_from_inventory()
self.cancel_ledger_entries()
def validate_account_type(self, account, account_types):
account_doc = frappe.get_doc("Account", account)
isnt_valid = account_doc.account_type not in account_types
if isnt_valid:
frappe.throw(f"{account} is not from {", ".join(account_types)}")
def validate_account_types(self):
self.validate_account_type(self.stock_account, ["Stock"])
self.validate_account_type(self.funds_account, ["Payable"])
def validate_item_quantities(self):
for item_entry in self.items:
if item_entry.quantity <= 0:
frappe.throw(f"{item_entry.item} quantity should be more than 0")
def set_item_entry_values(self):
for item_entry in self.items:
if not item_entry.value:
item_entry.value = frappe.get_doc("Item", item_entry.item).value
def set_item_entry_cost(self):
for item_entry in self.items:
item_entry.cost = item_entry.value * item_entry.quantity
def set_invoice_cost(self):
self.cost = sum([item_entry.cost for item_entry in self.items])
def add_items_to_inventory(self):
for item_entry in self.items:
# Update quantity
if frappe.db.exists("Inventory", item_entry.item):
# Update quantity
inventory_doc = frappe.get_doc("Inventory", item_entry.item)
inventory_doc.quantity = inventory_doc.quantity + item_entry.quantity
inventory_doc.save(ignore_permissions=True)
else:
# Create new entry
inventory_doc = frappe.new_doc(doctype="Inventory")
inventory_doc.item = item_entry.item
inventory_doc.company = self.company
inventory_doc.quantity = item_entry.quantity
inventory_doc.insert(ignore_permissions=True)
def remove_items_from_inventory(self):
for item_entry in self.items:
# Update quantity
if frappe.db.exists("Inventory", item_entry.item):
# Update quantity
inventory_doc = frappe.get_doc("Inventory", item_entry.item)
inventory_doc.quantity = inventory_doc.quantity - item_entry.quantity
inventory_doc.save(ignore_permissions=True)
def get_ledger_entry(
self, account, against_account, credit, debit, is_for_cancel=False
):
return frappe.get_doc(
doctype="GL Entry",
posting_date=self.posting_date,
account=account,
against_account=against_account,
credit=credit,
debit=debit,
voucher_type=f"{"Cancel" if is_for_cancel else ""}Purchase Invoice",
company_name=self.company,
voucher_number=self.name,
)
def add_ledger_entries(self):
# Create Ledger Entries
credit_entry = self.get_ledger_entry(
self.funds_account, self.stock_account, credit=self.cost, debit=0.0
)
debit_entry = self.get_ledger_entry(
self.stock_account, self.seller, credit=0.0, debit=self.cost
)
self.insert_ledger_entries(credit_entry, debit_entry)
def cancel_ledger_entries(self):
credit_entry = self.get_ledger_entry(
self.funds_account,
self.stock_account,
credit=0.0,
debit=self.cost,
is_for_cancel=True,
)
debit_entry = self.get_ledger_entry(
self.stock_account,
self.seller,
credit=self.cost,
debit=0.0,
is_for_cancel=True,
)
self.insert_ledger_entries(credit_entry, debit_entry)
def insert_ledger_entries(self, credit_entry, debit_entry):
# Insert Ledger Entries
for gl_entry in [credit_entry, debit_entry]:
gl_entry.docstatus = 1
gl_entry.insert(ignore_permissions=True, ignore_if_duplicate=True)
| # -*- coding: utf-8 -*-
# Copyright (c) 2021, Lin To and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class PurchaseInvoice(Document):
def validate(self):
self.validate_account_types()
self.validate_item_quantities()
def before_save(self):
self.set_item_entry_values() # Value Per Unit
self.set_item_entry_cost()
self.set_invoice_cost()
def on_submit(self):
self.add_items_to_inventory()
self.add_ledger_entries()
def on_cancel(self):
if self.docstatus == 0:
return
self.remove_items_from_inventory()
self.cancel_ledger_entries()
def validate_account_type(self, account, account_types):
account_doc = frappe.get_doc("Account", account)
isnt_valid = account_doc.account_type not in account_types
if isnt_valid:
frappe.throw(f"{account} is not from {', '.join(account_types)}")
def validate_account_types(self):
self.validate_account_type(self.stock_account, ["Stock"])
self.validate_account_type(self.funds_account, ["Payable"])
def validate_item_quantities(self):
for item_entry in self.items:
if item_entry.quantity <= 0:
frappe.throw(f"{item_entry.item} quantity should be more than 0")
def set_item_entry_values(self):
for item_entry in self.items:
if not item_entry.value:
item_entry.value = frappe.get_doc("Item", item_entry.item).value
def set_item_entry_cost(self):
for item_entry in self.items:
item_entry.cost = item_entry.value * item_entry.quantity
def set_invoice_cost(self):
self.cost = sum([item_entry.cost for item_entry in self.items])
def add_items_to_inventory(self):
for item_entry in self.items:
# Update quantity
if frappe.db.exists("Inventory", item_entry.item):
# Update quantity
inventory_doc = frappe.get_doc("Inventory", item_entry.item)
inventory_doc.quantity = inventory_doc.quantity + item_entry.quantity
inventory_doc.save(ignore_permissions=True)
else:
# Create new entry
inventory_doc = frappe.new_doc(doctype="Inventory")
inventory_doc.item = item_entry.item
inventory_doc.company = self.company
inventory_doc.quantity = item_entry.quantity
inventory_doc.insert(ignore_permissions=True)
def remove_items_from_inventory(self):
for item_entry in self.items:
# Update quantity
if frappe.db.exists("Inventory", item_entry.item):
# Update quantity
inventory_doc = frappe.get_doc("Inventory", item_entry.item)
inventory_doc.quantity = inventory_doc.quantity - item_entry.quantity
inventory_doc.save(ignore_permissions=True)
def get_ledger_entry(
self, account, against_account, credit, debit, is_for_cancel=False
):
return frappe.get_doc(
doctype="GL Entry",
posting_date=self.posting_date,
account=account,
against_account=against_account,
credit=credit,
debit=debit,
voucher_type=f"{'Cancel' if is_for_cancel else ''}Purchase Invoice",
company_name=self.company,
voucher_number=self.name,
)
def add_ledger_entries(self):
# Create Ledger Entries
credit_entry = self.get_ledger_entry(
self.funds_account, self.stock_account, credit=self.cost, debit=0.0
)
debit_entry = self.get_ledger_entry(
self.stock_account, self.seller, credit=0.0, debit=self.cost
)
self.insert_ledger_entries(credit_entry, debit_entry)
def cancel_ledger_entries(self):
credit_entry = self.get_ledger_entry(
self.funds_account,
self.stock_account,
credit=0.0,
debit=self.cost,
is_for_cancel=True,
)
debit_entry = self.get_ledger_entry(
self.stock_account,
self.seller,
credit=self.cost,
debit=0.0,
is_for_cancel=True,
)
self.insert_ledger_entries(credit_entry, debit_entry)
def insert_ledger_entries(self, credit_entry, debit_entry):
# Insert Ledger Entries
for gl_entry in [credit_entry, debit_entry]:
gl_entry.docstatus = 1
gl_entry.insert(ignore_permissions=True, ignore_if_duplicate=True)
|
import json
import re
import string
from typing import List, Dict
import requests
from bs4 import BeautifulSoup
TG_CORE_TYPES = ["String", "Boolean", "Integer", "Float"]
API_URL = "https://core.telegram.org/bots/api"
METHODS = "methods"
TYPES = "types"
def retrieve_api_info() -> Dict:
r = requests.get(API_URL)
soup = BeautifulSoup(r.text, features="html.parser")
dev_rules = soup.find("div", {"id": "dev_page_content"})
curr_type = ""
curr_name = ""
items = {
METHODS: dict(),
TYPES: dict(),
}
for x in list(dev_rules.children):
if x.name == "h3":
# New category; clear name and type.
curr_name = ""
curr_type = ""
if x.name == "h4":
anchor = x.find("a")
name = anchor.get("name")
if name and "-" in name:
curr_name = ""
curr_type = ""
continue
curr_name, curr_type = get_type_and_name(x, anchor, items)
if not curr_type or not curr_name:
continue
if x.name == "p":
description = x.get_text().strip()
# we only need returns for methods.
# We only check this when curr_desc is empty, since the first paragraph contains the description.
if curr_type == METHODS and not items[curr_type][curr_name].get("description"):
get_method_return_type(curr_name, curr_type, description, items)
items[curr_type][curr_name].setdefault("description", []).append(description)
if x.name == "table":
get_fields(curr_name, curr_type, x, items)
if x.name == "ul":
get_subtypes(curr_name, curr_type, x, items)
return items
def get_subtypes(curr_name: str, curr_type: str, x, items: dict):
if curr_name == "InputFile": # Has no interesting subtypes
return
subtypes = []
for li in x.find_all("li"):
subtype_name = li.get_text()
subtypes.append(subtype_name)
items[curr_type][curr_name]["subtypes"] = subtypes
items[curr_type][curr_name]["description"] += [f"- {s}" for s in subtypes]
# Get fields/parameters of type/method
def get_fields(curr_name: str, curr_type: str, x, items: dict):
body = x.find("tbody")
fields = []
for tr in body.find_all("tr"):
children = list(tr.find_all("td"))
if curr_type == TYPES and len(children) == 3:
desc = clean_tg_description(children[2].get_text())
fields.append(
{
"name": children[0].get_text(),
"types": clean_tg_type(children[1].get_text()),
"required": not desc.startswith("Optional. "),
"description": desc,
}
)
elif curr_type == METHODS and len(children) == 4:
fields.append(
{
"name": children[0].get_text(),
"types": clean_tg_type(children[1].get_text()),
"required": children[2].get_text() == "Yes",
"description": clean_tg_description(children[3].get_text()),
}
)
else:
print("An unexpected state has occurred!")
print("Type:", curr_type)
print("Name:", curr_name)
print("Number of children:", len(children))
print(children)
exit(1)
items[curr_type][curr_name]["fields"] = fields
def get_method_return_type(curr_name, curr_type, description, items):
ret_search = re.search("(?:on success,|ret.urns)([^.]*)(?:on success)?", description, re.IGNORECASE)
ret_search2 = re.search("([^.]*)(?:is returned)", description, re.IGNORECASE)
if ret_search:
extract_return_type(curr_type, curr_name, ret_search.group(1).strip(), items)
elif ret_search2:
extract_return_type(curr_type, curr_name, ret_search2.group(1).strip(), items)
else:
print("Failed to get return type for", curr_name)
def get_type_and_name(x, anchor, items):
if x.text[0].isupper():
curr_type = TYPES
else:
curr_type = METHODS
curr_name = x.get_text()
items[curr_type][curr_name] = {"name": curr_name}
href = anchor.get("href")
if href:
items[curr_type][curr_name]["href"] = API_URL + href
return curr_name, curr_type
def extract_return_type(curr_type: str, curr_name: str, ret_str: str, items: Dict):
array_match = re.search(r"(?:array of )+(\w*)", ret_str, re.IGNORECASE)
if array_match:
ret = clean_tg_type(array_match.group(1))
rets = [f"Array of {r}" for r in ret]
items[curr_type][curr_name]["returns"] = rets
else:
words = ret_str.split()
rets = [
r for ret in words
for r in clean_tg_type(ret.translate(str.maketrans("", "", string.punctuation)))
if ret[0].isupper()
]
items[curr_type][curr_name]["returns"] = rets
def clean_tg_description(t: str) -> str:
return t.replace('”', '"').replace('“', '"')
def get_proper_type(t: str) -> str:
if t == "Messages": # Avoids https://core.telegram.org/bots/api#sendmediagroup
return "Message"
elif t == "Float number":
return "Float"
elif t == "Int":
return "Integer"
elif t == "True" or t == "Bool":
return "Boolean"
return t
def clean_tg_type(t: str) -> List[str]:
pref = ""
if t.startswith("Array of "):
pref = "Array of "
t = t[len("Array of "):]
fixed_ors = [x.strip() for x in t.split(" or ")] # Fix situations like "A or B"
fixed_ands = [x.strip() for fo in fixed_ors for x in fo.split(" and ")] # Fix situations like "A and B"
fixed_commas = [x.strip() for fa in fixed_ands for x in fa.split(", ")] # Fix situations like "A, B"
return [pref + get_proper_type(x) for x in fixed_commas]
def verify_type_parameters(items: Dict):
for t, values in items[TYPES].items():
# check all values have a URL
if not values.get("href"):
print(f"{t} has no link!")
continue
fields = values.get("fields", [])
if len(fields) == 0:
subtypes = values.get("subtypes", [])
if not subtypes:
print("TYPE", t, "HAS NO FIELDS OR SUBTYPES")
continue
for st in subtypes:
if st in items[TYPES]:
items[TYPES][st].setdefault("subtype_of", []).append(t)
else:
print("TYPE", t, "USES INVALID SUBTYPE", st)
# check all parameter types are valid
for param in fields:
types = param.get("types")
for t in types:
while t.startswith("Array of "):
t = t[len("Array of "):]
if t not in items[TYPES] and t not in TG_CORE_TYPES:
print("UNKNOWN FIELD TYPE", t)
def verify_method_parameters(items: Dict):
# Type check all methods
for method, values in items[METHODS].items():
# check all values have a URL
if not values.get("href"):
print(f"{method} has no link!")
continue
# check all methods have a return
if not values.get("returns"):
print(f"{method} has no return types!")
continue
if len(values.get("returns")) > 1:
print(f"{method} has multiple return types: {values.get("returns")}")
# check all parameter types are valid
for param in values.get("fields", []):
types = param.get("types")
for t in types:
while t.startswith("Array of "):
t = t[len("Array of "):]
if t not in items[TYPES] and t not in TG_CORE_TYPES:
print("UNKNOWN PARAM TYPE", t)
# check all return types are valid
for ret in values.get("returns", []):
while ret.startswith("Array of "):
ret = ret[len("Array of "):]
if ret not in items[TYPES] and ret not in TG_CORE_TYPES:
print("UNKNOWN RETURN TYPE", ret)
if __name__ == '__main__':
ITEMS = retrieve_api_info()
verify_type_parameters(ITEMS)
verify_method_parameters(ITEMS)
with open("api.json", "w") as f:
json.dump(ITEMS, f, indent=2)
| import json
import re
import string
from typing import List, Dict
import requests
from bs4 import BeautifulSoup
TG_CORE_TYPES = ["String", "Boolean", "Integer", "Float"]
API_URL = "https://core.telegram.org/bots/api"
METHODS = "methods"
TYPES = "types"
def retrieve_api_info() -> Dict:
r = requests.get(API_URL)
soup = BeautifulSoup(r.text, features="html.parser")
dev_rules = soup.find("div", {"id": "dev_page_content"})
curr_type = ""
curr_name = ""
items = {
METHODS: dict(),
TYPES: dict(),
}
for x in list(dev_rules.children):
if x.name == "h3":
# New category; clear name and type.
curr_name = ""
curr_type = ""
if x.name == "h4":
anchor = x.find("a")
name = anchor.get("name")
if name and "-" in name:
curr_name = ""
curr_type = ""
continue
curr_name, curr_type = get_type_and_name(x, anchor, items)
if not curr_type or not curr_name:
continue
if x.name == "p":
description = x.get_text().strip()
# we only need returns for methods.
# We only check this when curr_desc is empty, since the first paragraph contains the description.
if curr_type == METHODS and not items[curr_type][curr_name].get("description"):
get_method_return_type(curr_name, curr_type, description, items)
items[curr_type][curr_name].setdefault("description", []).append(description)
if x.name == "table":
get_fields(curr_name, curr_type, x, items)
if x.name == "ul":
get_subtypes(curr_name, curr_type, x, items)
return items
def get_subtypes(curr_name: str, curr_type: str, x, items: dict):
if curr_name == "InputFile": # Has no interesting subtypes
return
subtypes = []
for li in x.find_all("li"):
subtype_name = li.get_text()
subtypes.append(subtype_name)
items[curr_type][curr_name]["subtypes"] = subtypes
items[curr_type][curr_name]["description"] += [f"- {s}" for s in subtypes]
# Get fields/parameters of type/method
def get_fields(curr_name: str, curr_type: str, x, items: dict):
body = x.find("tbody")
fields = []
for tr in body.find_all("tr"):
children = list(tr.find_all("td"))
if curr_type == TYPES and len(children) == 3:
desc = clean_tg_description(children[2].get_text())
fields.append(
{
"name": children[0].get_text(),
"types": clean_tg_type(children[1].get_text()),
"required": not desc.startswith("Optional. "),
"description": desc,
}
)
elif curr_type == METHODS and len(children) == 4:
fields.append(
{
"name": children[0].get_text(),
"types": clean_tg_type(children[1].get_text()),
"required": children[2].get_text() == "Yes",
"description": clean_tg_description(children[3].get_text()),
}
)
else:
print("An unexpected state has occurred!")
print("Type:", curr_type)
print("Name:", curr_name)
print("Number of children:", len(children))
print(children)
exit(1)
items[curr_type][curr_name]["fields"] = fields
def get_method_return_type(curr_name, curr_type, description, items):
ret_search = re.search("(?:on success,|ret.urns)([^.]*)(?:on success)?", description, re.IGNORECASE)
ret_search2 = re.search("([^.]*)(?:is returned)", description, re.IGNORECASE)
if ret_search:
extract_return_type(curr_type, curr_name, ret_search.group(1).strip(), items)
elif ret_search2:
extract_return_type(curr_type, curr_name, ret_search2.group(1).strip(), items)
else:
print("Failed to get return type for", curr_name)
def get_type_and_name(x, anchor, items):
if x.text[0].isupper():
curr_type = TYPES
else:
curr_type = METHODS
curr_name = x.get_text()
items[curr_type][curr_name] = {"name": curr_name}
href = anchor.get("href")
if href:
items[curr_type][curr_name]["href"] = API_URL + href
return curr_name, curr_type
def extract_return_type(curr_type: str, curr_name: str, ret_str: str, items: Dict):
array_match = re.search(r"(?:array of )+(\w*)", ret_str, re.IGNORECASE)
if array_match:
ret = clean_tg_type(array_match.group(1))
rets = [f"Array of {r}" for r in ret]
items[curr_type][curr_name]["returns"] = rets
else:
words = ret_str.split()
rets = [
r for ret in words
for r in clean_tg_type(ret.translate(str.maketrans("", "", string.punctuation)))
if ret[0].isupper()
]
items[curr_type][curr_name]["returns"] = rets
def clean_tg_description(t: str) -> str:
return t.replace('”', '"').replace('“', '"')
def get_proper_type(t: str) -> str:
if t == "Messages": # Avoids https://core.telegram.org/bots/api#sendmediagroup
return "Message"
elif t == "Float number":
return "Float"
elif t == "Int":
return "Integer"
elif t == "True" or t == "Bool":
return "Boolean"
return t
def clean_tg_type(t: str) -> List[str]:
pref = ""
if t.startswith("Array of "):
pref = "Array of "
t = t[len("Array of "):]
fixed_ors = [x.strip() for x in t.split(" or ")] # Fix situations like "A or B"
fixed_ands = [x.strip() for fo in fixed_ors for x in fo.split(" and ")] # Fix situations like "A and B"
fixed_commas = [x.strip() for fa in fixed_ands for x in fa.split(", ")] # Fix situations like "A, B"
return [pref + get_proper_type(x) for x in fixed_commas]
def verify_type_parameters(items: Dict):
for t, values in items[TYPES].items():
# check all values have a URL
if not values.get("href"):
print(f"{t} has no link!")
continue
fields = values.get("fields", [])
if len(fields) == 0:
subtypes = values.get("subtypes", [])
if not subtypes:
print("TYPE", t, "HAS NO FIELDS OR SUBTYPES")
continue
for st in subtypes:
if st in items[TYPES]:
items[TYPES][st].setdefault("subtype_of", []).append(t)
else:
print("TYPE", t, "USES INVALID SUBTYPE", st)
# check all parameter types are valid
for param in fields:
types = param.get("types")
for t in types:
while t.startswith("Array of "):
t = t[len("Array of "):]
if t not in items[TYPES] and t not in TG_CORE_TYPES:
print("UNKNOWN FIELD TYPE", t)
def verify_method_parameters(items: Dict):
# Type check all methods
for method, values in items[METHODS].items():
# check all values have a URL
if not values.get("href"):
print(f"{method} has no link!")
continue
# check all methods have a return
if not values.get("returns"):
print(f"{method} has no return types!")
continue
if len(values.get("returns")) > 1:
print(f"{method} has multiple return types: {values.get('returns')}")
# check all parameter types are valid
for param in values.get("fields", []):
types = param.get("types")
for t in types:
while t.startswith("Array of "):
t = t[len("Array of "):]
if t not in items[TYPES] and t not in TG_CORE_TYPES:
print("UNKNOWN PARAM TYPE", t)
# check all return types are valid
for ret in values.get("returns", []):
while ret.startswith("Array of "):
ret = ret[len("Array of "):]
if ret not in items[TYPES] and ret not in TG_CORE_TYPES:
print("UNKNOWN RETURN TYPE", ret)
if __name__ == '__main__':
ITEMS = retrieve_api_info()
verify_type_parameters(ITEMS)
verify_method_parameters(ITEMS)
with open("api.json", "w") as f:
json.dump(ITEMS, f, indent=2)
|
import pandas as pd
import numpy as np
def write_lookup_function(name, x_key, y_key):
code = f"""
function {name}
input Real x;
output Real y;
algorithm
for i in 1:size({x_key}, 1) loop
if {x_key}[i+1] > x then
y := {y_key}[i] + (x - {x_key}[i]) * ({y_key}[i] - {y_key}[i + 1]) / ({x_key}[i] - {x_key}[i + 1]);
break;
end if;
end for;
end {name};
"""
return code
def write_direct_lookup_function(name, x_key, y_key):
code = f"""
function {name}
input Real x;
output Real y;
protected
Integer idx;
Real delta_x;
algorithm
idx := integer(x/dx);
delta_x := x - idx*dx;
y := {y_key}[idx+1] + delta_x*({y_key}[idx+2]-{y_key}[idx+1])/dx;
end {name};
"""
return code
def write_output_connector():
code = """
connector Output = output Real annotation(Icon(graphics = {Polygon(lineColor = {52, 101, 164},
fillColor = {144, 222, 236},
fillPattern = FillPattern.Solid,
lineThickness = 1,
points = {{-100, 100},
{100, 0},
{-100, -100},
{-100, 100}})}));
"""
return code
def write_source_base():
code = """
model source_base
Output y annotation(Placement(visible = true,
transformation(origin = {90, 0},
extent = {{-10, -10}, {10, 10}}),
iconTransformation(origin = {90, 0},
extent = {{-10, -10}, {10, 10}})));
equation
annotation(Icon(graphics = {Rectangle(fillColor = {238, 238, 236},
fillPattern = FillPattern.Solid,
lineThickness = 1,
extent = {{-80, 80}, {80, -80}}),
Text(origin = {-2, -1},
extent = {{-74, 13}, {74, -13}},
textString = "%name")}));
end source_base;
"""
return code
def write_source(name, function):
code = f"""
model {name}
extends source_base;
equation
y=Functions.{name}(time);
end {name};
"""
return code
def convert_to_modelica_package(path, dx=None, make_callable=True, output=None):
code = "package data\n"
code += f"constant Real dx = {dx};\n"
# insert data
df = pd.read_csv(path)
keys = df.keys()
x_key = keys[0]
y_keys = keys[1:]
x_min = df[x_key].iloc[0]
x_max = df[x_key].iloc[-1]
x_vals = np.arange(x_min, x_max, dx)
if x_vals[-1] != x_max:
np.append(x_vals,[x_max])
for key in keys:
vals = np.interp(x_vals, df[x_key], df[key]).astype(str)
code += f"constant Real {key}_data[:] = " + "{" + ",".join(vals) + "};\n"
#insert 1D interpolations functions
code += "package Functions\n"
for y_key in y_keys:
code += write_direct_lookup_function(y_key, f"{x_key}_data", f"{y_key}_data")
code += "end Functions;\n"
# insert data sources blocks
code += write_output_connector()
code += write_source_base()
for y_key in y_keys:
code += write_source(y_key, f"Functions.{y_key}")
code += "end data;"
# save modelica file to disk
if output is None:
output = f"{path[:-4]}.mo"
f = open(output, "w")
f.write(code)
f.close()
| import pandas as pd
import numpy as np
def write_lookup_function(name, x_key, y_key):
code = f"""
function {name}
input Real x;
output Real y;
algorithm
for i in 1:size({x_key}, 1) loop
if {x_key}[i+1] > x then
y := {y_key}[i] + (x - {x_key}[i]) * ({y_key}[i] - {y_key}[i + 1]) / ({x_key}[i] - {x_key}[i + 1]);
break;
end if;
end for;
end {name};
"""
return code
def write_direct_lookup_function(name, x_key, y_key):
code = f"""
function {name}
input Real x;
output Real y;
protected
Integer idx;
Real delta_x;
algorithm
idx := integer(x/dx);
delta_x := x - idx*dx;
y := {y_key}[idx+1] + delta_x*({y_key}[idx+2]-{y_key}[idx+1])/dx;
end {name};
"""
return code
def write_output_connector():
code = """
connector Output = output Real annotation(Icon(graphics = {Polygon(lineColor = {52, 101, 164},
fillColor = {144, 222, 236},
fillPattern = FillPattern.Solid,
lineThickness = 1,
points = {{-100, 100},
{100, 0},
{-100, -100},
{-100, 100}})}));
"""
return code
def write_source_base():
code = """
model source_base
Output y annotation(Placement(visible = true,
transformation(origin = {90, 0},
extent = {{-10, -10}, {10, 10}}),
iconTransformation(origin = {90, 0},
extent = {{-10, -10}, {10, 10}})));
equation
annotation(Icon(graphics = {Rectangle(fillColor = {238, 238, 236},
fillPattern = FillPattern.Solid,
lineThickness = 1,
extent = {{-80, 80}, {80, -80}}),
Text(origin = {-2, -1},
extent = {{-74, 13}, {74, -13}},
textString = "%name")}));
end source_base;
"""
return code
def write_source(name, function):
code = f"""
model {name}
extends source_base;
equation
y=Functions.{name}(time);
end {name};
"""
return code
def convert_to_modelica_package(path, dx=None, make_callable=True, output=None):
code = "package data\n"
code += f"constant Real dx = {dx};\n"
# insert data
df = pd.read_csv(path)
keys = df.keys()
x_key = keys[0]
y_keys = keys[1:]
x_min = df[x_key].iloc[0]
x_max = df[x_key].iloc[-1]
x_vals = np.arange(x_min, x_max, dx)
if x_vals[-1] != x_max:
np.append(x_vals,[x_max])
for key in keys:
vals = np.interp(x_vals, df[x_key], df[key]).astype(str)
code += f"constant Real {key}_data[:] = " + "{" + ",".join(vals) + "};\n"
#insert 1D interpolations functions
code += "package Functions\n"
for y_key in y_keys:
code += write_direct_lookup_function(y_key, f"{x_key}_data", f"{y_key}_data")
code += "end Functions;\n"
# insert data sources blocks
code += write_output_connector()
code += write_source_base()
for y_key in y_keys:
code += write_source(y_key, f"Functions.{y_key}")
code += "end data;"
# save modelica file to disk
if output is None:
output = f"{path[:-4]}.mo"
f = open(output, "w")
f.write(code)
f.close()
|
import argparse
import logging
import sys
from .fetch import download_fns
logger = logging.getLogger("mne")
AVAILABLE_DATASETS = set(download_fns.keys())
def download_dataset(output_dir, n_first=None, cohort="eegbci"):
download_fns[cohort](output_dir, n_first)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output_dir", type=str, required=True, help="Path to output directory.\nWill be created if not available.",
)
parser.add_argument(
"-c",
"--cohort",
type=str,
help="Choice of EEG dataset (default 'eegbci').",
default="eegbci",
choices=AVAILABLE_DATASETS,
)
parser.add_argument("-n", "--n_first", default=109, type=int, help="Number of recordings to download.")
parser.add_argument("--log", action="store_true")
args = parser.parse_args()
if args.log:
file_handler = logging.FileHandler("logs/fetch_data.log", mode="w")
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.info(f'Usage: {' '.join([x for x in sys.argv])}\n')
logger.info("Settings:")
logger.info("---------------------------")
for idx, (k, v) in enumerate(sorted(vars(args).items())):
if idx == (len(vars(args)) - 1):
logger.info(f"{k:>15}\t{v}\n")
else:
logger.info(f"{k:>15}\t{v}")
download_dataset(args.output_dir, args.n_first, args.cohort)
| import argparse
import logging
import sys
from .fetch import download_fns
logger = logging.getLogger("mne")
AVAILABLE_DATASETS = set(download_fns.keys())
def download_dataset(output_dir, n_first=None, cohort="eegbci"):
download_fns[cohort](output_dir, n_first)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output_dir", type=str, required=True, help="Path to output directory.\nWill be created if not available.",
)
parser.add_argument(
"-c",
"--cohort",
type=str,
help="Choice of EEG dataset (default 'eegbci').",
default="eegbci",
choices=AVAILABLE_DATASETS,
)
parser.add_argument("-n", "--n_first", default=109, type=int, help="Number of recordings to download.")
parser.add_argument("--log", action="store_true")
args = parser.parse_args()
if args.log:
file_handler = logging.FileHandler("logs/fetch_data.log", mode="w")
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.info(f'Usage: {" ".join([x for x in sys.argv])}\n')
logger.info("Settings:")
logger.info("---------------------------")
for idx, (k, v) in enumerate(sorted(vars(args).items())):
if idx == (len(vars(args)) - 1):
logger.info(f"{k:>15}\t{v}\n")
else:
logger.info(f"{k:>15}\t{v}")
download_dataset(args.output_dir, args.n_first, args.cohort)
|
import re
from collections import defaultdict
from datetime import date, timedelta
from operator import attrgetter
from cachetools import cachedmethod, keys
from ._provider import Provider
class CurrencyLayer(Provider):
"""
Real-time service with free plan for 250 requests per month.
Implicit base currency is USD.
"""
BASE_URL = "http://www.apilayer.net/api/live?access_key=%s"
name = "currency_layer"
def __init__(self, base_currency, http_user_agent, access_key, logger):
"""
:type base_currency: str
:type http_user_agent: str
:type access_key: str
:type logger: gold_digger.utils.ContextLogger
"""
super().__init__(base_currency, http_user_agent)
if access_key:
self._url = self.BASE_URL % access_key
else:
logger.critical("%s - You need an access token!", self)
self._url = self.BASE_URL % ""
self.has_request_limit = True
@cachedmethod(cache=attrgetter("_cache"), key=lambda date_of_exchange, _: keys.hashkey(date_of_exchange))
def get_supported_currencies(self, date_of_exchange, logger):
"""
:type date_of_exchange: datetime.date
:type logger: gold_digger.utils.ContextLogger
:rtype: set[str]
"""
currencies = set()
response = self._get("https://currencylayer.com/downloads/cl-currencies-table.txt", logger=logger)
if response:
currencies = set(re.findall("<td>([A-Z]{3})</td>", response.text))
if currencies:
logger.debug("%s - Supported currencies: %s", self, currencies)
else:
logger.error("%s - Supported currencies not found.", self)
return currencies
@Provider.check_request_limit(return_value=None)
def get_by_date(self, date_of_exchange, currency, logger):
"""
:type date_of_exchange: datetime.date
:type currency: str
:type logger: gold_digger.utils.ContextLogger
:rtype: decimal.Decimal | None
"""
date_str = date_of_exchange.strftime("%Y-%m-%d")
logger.debug("%s - Requesting for %s (%s)", self, currency, date_str, extra={"currency": currency, "date": date_str})
response = self._get(f"{self._url}&date={date_str}¤cies={currency}", logger=logger)
if not response:
logger.warning("%s - Unexpected response. Response: %s", self, response, extra={"currency": currency, "date": date_str})
return None
response = response.json()
if response["success"]:
records = response.get("quotes", {})
elif response["error"]["code"] == 104:
self.set_request_limit_reached(logger)
return None
else:
logger.warning(
"%s - Unsuccessful request. Error: %s",
self,
response.get("error", {}).get("info"),
extra={"currency": currency, "date": date_str},
)
return None
value = records.get("%s%s" % (self.base_currency, currency))
return self._to_decimal(value, currency, logger=logger) if value is not None else None
@Provider.check_request_limit(return_value={})
def get_all_by_date(self, date_of_exchange, currencies, logger):
"""
:type date_of_exchange: datetime.date
:type currencies: set[str]
:type logger: gold_digger.utils.ContextLogger
:rtype: dict[str, decimal.Decimal | None]
"""
logger.debug("%s - Requesting for all rates for date %s", self, date_of_exchange)
response = self._get(f"{self._url}&date={date_of_exchange.strftime("%Y-%m-%d")}¤cies={",".join(currencies)}", logger=logger)
if not response:
return {}
response = response.json()
records = {}
if response["success"]:
records = response.get("quotes", {})
elif response["error"]["code"] == 104:
self.set_request_limit_reached(logger)
return {}
day_rates = {}
for currency_pair, value in records.items():
currency = currency_pair[3:]
decimal_value = self._to_decimal(value, currency, logger=logger) if value is not None else None
if currency and decimal_value:
day_rates[currency] = decimal_value
return day_rates
@Provider.check_request_limit(return_value={})
def get_historical(self, origin_date, currencies, logger):
"""
:type origin_date: datetime.date
:type currencies: set[str]
:type logger: gold_digger.utils.ContextLogger
:rtype: dict[date, dict[str, decimal.Decimal]]
"""
day_rates = defaultdict(dict)
date_of_exchange = origin_date
date_of_today = date.today()
while date_of_exchange != date_of_today:
response = self._get(f"{self._url}&date={date_of_exchange.strftime("%Y-%m-%d")}¤cies={",".join(currencies)}", logger=logger)
records = {}
if response:
response = response.json()
if response["success"]:
records = response.get("quotes", {})
elif response["error"]["code"] == 104:
self.set_request_limit_reached(logger)
break
for currency_pair, value in records.items():
currency = currency_pair[3:]
decimal_value = self._to_decimal(value, currency, logger=logger) if value is not None else None
if currency and decimal_value:
day_rates[date_of_exchange][currency] = decimal_value
date_of_exchange = date_of_exchange + timedelta(1)
return day_rates
| import re
from collections import defaultdict
from datetime import date, timedelta
from operator import attrgetter
from cachetools import cachedmethod, keys
from ._provider import Provider
class CurrencyLayer(Provider):
"""
Real-time service with free plan for 250 requests per month.
Implicit base currency is USD.
"""
BASE_URL = "http://www.apilayer.net/api/live?access_key=%s"
name = "currency_layer"
def __init__(self, base_currency, http_user_agent, access_key, logger):
"""
:type base_currency: str
:type http_user_agent: str
:type access_key: str
:type logger: gold_digger.utils.ContextLogger
"""
super().__init__(base_currency, http_user_agent)
if access_key:
self._url = self.BASE_URL % access_key
else:
logger.critical("%s - You need an access token!", self)
self._url = self.BASE_URL % ""
self.has_request_limit = True
@cachedmethod(cache=attrgetter("_cache"), key=lambda date_of_exchange, _: keys.hashkey(date_of_exchange))
def get_supported_currencies(self, date_of_exchange, logger):
"""
:type date_of_exchange: datetime.date
:type logger: gold_digger.utils.ContextLogger
:rtype: set[str]
"""
currencies = set()
response = self._get("https://currencylayer.com/downloads/cl-currencies-table.txt", logger=logger)
if response:
currencies = set(re.findall("<td>([A-Z]{3})</td>", response.text))
if currencies:
logger.debug("%s - Supported currencies: %s", self, currencies)
else:
logger.error("%s - Supported currencies not found.", self)
return currencies
@Provider.check_request_limit(return_value=None)
def get_by_date(self, date_of_exchange, currency, logger):
"""
:type date_of_exchange: datetime.date
:type currency: str
:type logger: gold_digger.utils.ContextLogger
:rtype: decimal.Decimal | None
"""
date_str = date_of_exchange.strftime("%Y-%m-%d")
logger.debug("%s - Requesting for %s (%s)", self, currency, date_str, extra={"currency": currency, "date": date_str})
response = self._get(f"{self._url}&date={date_str}¤cies={currency}", logger=logger)
if not response:
logger.warning("%s - Unexpected response. Response: %s", self, response, extra={"currency": currency, "date": date_str})
return None
response = response.json()
if response["success"]:
records = response.get("quotes", {})
elif response["error"]["code"] == 104:
self.set_request_limit_reached(logger)
return None
else:
logger.warning(
"%s - Unsuccessful request. Error: %s",
self,
response.get("error", {}).get("info"),
extra={"currency": currency, "date": date_str},
)
return None
value = records.get("%s%s" % (self.base_currency, currency))
return self._to_decimal(value, currency, logger=logger) if value is not None else None
@Provider.check_request_limit(return_value={})
def get_all_by_date(self, date_of_exchange, currencies, logger):
"""
:type date_of_exchange: datetime.date
:type currencies: set[str]
:type logger: gold_digger.utils.ContextLogger
:rtype: dict[str, decimal.Decimal | None]
"""
logger.debug("%s - Requesting for all rates for date %s", self, date_of_exchange)
response = self._get(f"{self._url}&date={date_of_exchange.strftime('%Y-%m-%d')}¤cies={','.join(currencies)}", logger=logger)
if not response:
return {}
response = response.json()
records = {}
if response["success"]:
records = response.get("quotes", {})
elif response["error"]["code"] == 104:
self.set_request_limit_reached(logger)
return {}
day_rates = {}
for currency_pair, value in records.items():
currency = currency_pair[3:]
decimal_value = self._to_decimal(value, currency, logger=logger) if value is not None else None
if currency and decimal_value:
day_rates[currency] = decimal_value
return day_rates
@Provider.check_request_limit(return_value={})
def get_historical(self, origin_date, currencies, logger):
"""
:type origin_date: datetime.date
:type currencies: set[str]
:type logger: gold_digger.utils.ContextLogger
:rtype: dict[date, dict[str, decimal.Decimal]]
"""
day_rates = defaultdict(dict)
date_of_exchange = origin_date
date_of_today = date.today()
while date_of_exchange != date_of_today:
response = self._get(f"{self._url}&date={date_of_exchange.strftime('%Y-%m-%d')}¤cies={','.join(currencies)}", logger=logger)
records = {}
if response:
response = response.json()
if response["success"]:
records = response.get("quotes", {})
elif response["error"]["code"] == 104:
self.set_request_limit_reached(logger)
break
for currency_pair, value in records.items():
currency = currency_pair[3:]
decimal_value = self._to_decimal(value, currency, logger=logger) if value is not None else None
if currency and decimal_value:
day_rates[date_of_exchange][currency] = decimal_value
date_of_exchange = date_of_exchange + timedelta(1)
return day_rates
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing
from collections import deque
from contextlib import contextmanager
from enum import Enum
from itertools import islice
from os import cpu_count
from pathlib import Path
from threading import Lock
from typing import Optional
from pydantic import BaseModel, Field, validator
from pydantic.class_validators import root_validator
logger = logging.getLogger(__name__)
_executor_lock = Lock()
_executor = None
class Executor(BaseModel):
"""Defines an execution environment for jobs.
E.g. a node on a cluster, the local machine, etc. To create jobs,
instantiate this class and submit functions to using the executor API:
>>> executor = Executor(executor="local", block=True)
>>> with executor.get_executor() as executor:
... executor.submit(my_job, arg1, arg2)
... executor.submit(another_job)
"""
class Type(str, Enum):
"""Types of execution environments."""
SLURM = "slurm"
"""Submit jobs to a SLURM cluster scheduler."""
LOCAL = "local"
"""Submit jobs to run on the current machine."""
DEBUG = "debug"
"""Submit jobs to run synchronously on the current machine."""
NOOP = "noop"
"""Submitted jobs return immediately without executing. This can be
useful for debugging, where you want to validate the code and
configuration without performing any computation.
"""
type: Type = Field(allow_mutation=False)
"""The execution environment."""
slurm_partition: Optional[str] = Field(default=None, allow_mutation=False)
"""The name of the SLURM partition to submit jobs to.
Only used for :code:`Type.SLURM` executors.
"""
cpus: int = Field(default=1, allow_mutation=False, ge=-1)
"""The number of CPU threads to provision.
If the type of executor is :code:`Type.SLURM`, this is the number of CPU
threads to provision for each job. If the type of executor is
:code:`Type.LOCAL`, this is the number of parallel jobs to process in a
thread pool. If the value is -1 and the executor is :code:`Type.LOCAL`, the
number of physical cores on the machine is used. Has no effect for
:code:`Type.DEBUG` and :code:`Type.NOOP`.
"""
gpus: int = Field(default=0, allow_mutation=False, ge=0)
"""The number of GPUs to provision.
This is used only by the :code:`Type.SLURM` executor.
"""
timeout_hours: float = Field(default=12, allow_mutation=False, gt=0)
block: bool = Field(default=False, allow_mutation=False)
"""If :code:`True`, the :code:`get_executor()` context manager will block
until all jobs have completed when exiting scope. Jobs are still submitted
asynchronously for parallel execution.
"""
# === Start of public API. ===
@contextmanager
def get_executor(
self, logs_dir: Path, timeout_hours: Optional[float] = None, cpus=None
) -> "Executor":
cpus = cpus or self.cpus
timeout_hours = timeout_hours or self.timeout_hours
if self.type == self.Type.SLURM:
try:
from submitit import AutoExecutor
except ImportError as e:
raise OSError(
"Using the slurm executor requires the submitit library. "
"Install submitit using: python -m pip install submitit"
) from e
executor = AutoExecutor(folder=logs_dir)
executor.update_parameters(
timeout_min=int(round(timeout_hours * 60)),
nodes=1,
cpus_per_task=cpus,
gpus_per_node=self.gpus,
slurm_partition=self.slurm_partition,
)
name = self.slurm_partition or "slurm" # default value for logging
elif self.type == self.Type.LOCAL:
executor, name = (
LocalParallelExecutor(
cpus=cpus,
timeout_seconds=int(round(timeout_hours * 3600)),
),
"local",
)
elif self.type == self.Type.DEBUG:
executor, name = LocalSynchronousExecutor(), "local"
elif self.type == self.Type.NOOP:
executor, name = DummyExecutor(), "noop"
else:
assert False, f"Unknown executor: {self.type} ({type(self.type).__name__})"
executor = WrappedExecutor(executor, name=name)
yield executor
if self.type == self.Type.DEBUG or self.block:
wait_on_jobs(
executor.jobs,
executor_name=str(executor),
cancel_on_error=self.type == self.Type.SLURM,
)
if hasattr(executor.unwrapped, "close"):
executor.unwrapped.close()
@staticmethod
def get_default_local_executor():
"""Return a singleton :code:`Executor`.
:returns: An executor.
"""
with _executor_lock:
global _executor
if _executor is None:
_executor = Executor(type="local", cpus=cpu_count())
return _executor
# === Start of implementation details. ===
@validator("slurm_partition")
def validate_slurm_partition(cls, value, *, values, **kwargs):
del kwargs
if values["type"] == cls.Type.SLURM:
assert value, f"Must specify a partition for executor: {values["executor"]}"
return value
@validator("cpus", pre=True)
def validate_cpus(cls, value, *, values, **kwargs):
del kwargs
# -1 CPU count defaults to CPU count.
if values["type"] == cls.Type.LOCAL and value == -1:
return cpu_count()
return value
@root_validator
def local_always_blocks(cls, values):
if values["type"] == cls.Type.LOCAL or values["type"] == cls.Type.NOOP:
values["block"] = True
return values
class Config:
validate_assignment = True
class WrappedExecutor:
"""An executor-like interface that records all jobs that are submitted."""
def __init__(self, executor, name: str):
self.unwrapped = executor
self.jobs = []
self.name = name
def submit(self, *args, **kwargs):
job = self.unwrapped.submit(*args, **kwargs)
logger.info("Submitting job %s to %s ...", job.job_id, self)
self.jobs.append(job)
return job
def __repr__(self) -> str:
return self.name
def wait_on_jobs(jobs, executor_name: str = "executor", cancel_on_error: bool = True):
njobs = len(jobs)
jobs = deque(jobs)
def cancel_all_jobs(jobs):
print(f"Cancelling {len(jobs)} {executor_name} jobs")
for job in jobs:
try:
job.cancel()
except: # noqa
pass
# Produce a list of the first few job IDs
max_num_job_ids_to_show = 8
job_ids = [j.job_id for j in islice(jobs, max_num_job_ids_to_show)]
job_ids = ", ".join(str(x) for x in job_ids)
job_ids = f"job ID: {job_ids}" if len(jobs) == 1 else f"job IDs: {job_ids}"
if len(jobs) > max_num_job_ids_to_show:
job_ids = f"{job_ids} ..."
logger.info(
f"Waiting for {len(jobs)} {executor_name} jobs to complete with {job_ids}"
)
completed = 0
while jobs:
job = jobs.popleft()
if cancel_on_error:
try:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
except Exception as e: # noqa Intentionally broad.
logger.error(f"Caught: {type(e).__name__}: {e}")
jobs.append(job)
return cancel_all_jobs(jobs)
else:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
logger.info("All done.")
class LocalParallelExecutor:
"""An executor which uses a process pool to process jobs in parallel on the
local machine.
"""
class LocalJob:
def __init__(self, job_id: int, async_result, timeout_seconds: int):
self._async_result = async_result
self.job_id = job_id
self.timeout_seconds = timeout_seconds
def result(self):
return self._async_result.get(timeout=self.timeout_seconds)
def cancel(self):
pass
def __init__(self, cpus: int, timeout_seconds: int):
self.last_job_id = 0
self.process_pool = multiprocessing.Pool(cpus)
self.timeout_seconds = timeout_seconds
self.futures = []
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
self.futures.append(self.process_pool.apply_async(fn, args, kwargs))
return self.LocalJob(
self.last_job_id,
self.futures[-1],
self.timeout_seconds,
)
def close(self):
# Block until all jobs have completed.
for future in self.futures:
future.get()
self.process_pool.close()
class LocalSynchronousExecutor:
"""An executor where each job is executed synchronously when result() is
called."""
class LocalJob:
def __init__(self, job_id: int, fn, *args, **kwargs):
self._callback = lambda: fn(*args, **kwargs)
self.job_id = job_id
def result(self):
return self._callback()
def cancel(self):
pass
def __init__(self):
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
return self.LocalJob(self.last_job_id, fn, *args, **kwargs)
class DummyExecutor:
class DummyJob:
def __init__(self, job_id: int):
self.job_id = job_id
def result(self):
return None
def cancel(self):
pass
def __init__(self) -> None:
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
del fn
del args
del kwargs
self.last_job_id += 1
return self.DummyJob(self.last_job_id)
| # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing
from collections import deque
from contextlib import contextmanager
from enum import Enum
from itertools import islice
from os import cpu_count
from pathlib import Path
from threading import Lock
from typing import Optional
from pydantic import BaseModel, Field, validator
from pydantic.class_validators import root_validator
logger = logging.getLogger(__name__)
_executor_lock = Lock()
_executor = None
class Executor(BaseModel):
"""Defines an execution environment for jobs.
E.g. a node on a cluster, the local machine, etc. To create jobs,
instantiate this class and submit functions to using the executor API:
>>> executor = Executor(executor="local", block=True)
>>> with executor.get_executor() as executor:
... executor.submit(my_job, arg1, arg2)
... executor.submit(another_job)
"""
class Type(str, Enum):
"""Types of execution environments."""
SLURM = "slurm"
"""Submit jobs to a SLURM cluster scheduler."""
LOCAL = "local"
"""Submit jobs to run on the current machine."""
DEBUG = "debug"
"""Submit jobs to run synchronously on the current machine."""
NOOP = "noop"
"""Submitted jobs return immediately without executing. This can be
useful for debugging, where you want to validate the code and
configuration without performing any computation.
"""
type: Type = Field(allow_mutation=False)
"""The execution environment."""
slurm_partition: Optional[str] = Field(default=None, allow_mutation=False)
"""The name of the SLURM partition to submit jobs to.
Only used for :code:`Type.SLURM` executors.
"""
cpus: int = Field(default=1, allow_mutation=False, ge=-1)
"""The number of CPU threads to provision.
If the type of executor is :code:`Type.SLURM`, this is the number of CPU
threads to provision for each job. If the type of executor is
:code:`Type.LOCAL`, this is the number of parallel jobs to process in a
thread pool. If the value is -1 and the executor is :code:`Type.LOCAL`, the
number of physical cores on the machine is used. Has no effect for
:code:`Type.DEBUG` and :code:`Type.NOOP`.
"""
gpus: int = Field(default=0, allow_mutation=False, ge=0)
"""The number of GPUs to provision.
This is used only by the :code:`Type.SLURM` executor.
"""
timeout_hours: float = Field(default=12, allow_mutation=False, gt=0)
block: bool = Field(default=False, allow_mutation=False)
"""If :code:`True`, the :code:`get_executor()` context manager will block
until all jobs have completed when exiting scope. Jobs are still submitted
asynchronously for parallel execution.
"""
# === Start of public API. ===
@contextmanager
def get_executor(
self, logs_dir: Path, timeout_hours: Optional[float] = None, cpus=None
) -> "Executor":
cpus = cpus or self.cpus
timeout_hours = timeout_hours or self.timeout_hours
if self.type == self.Type.SLURM:
try:
from submitit import AutoExecutor
except ImportError as e:
raise OSError(
"Using the slurm executor requires the submitit library. "
"Install submitit using: python -m pip install submitit"
) from e
executor = AutoExecutor(folder=logs_dir)
executor.update_parameters(
timeout_min=int(round(timeout_hours * 60)),
nodes=1,
cpus_per_task=cpus,
gpus_per_node=self.gpus,
slurm_partition=self.slurm_partition,
)
name = self.slurm_partition or "slurm" # default value for logging
elif self.type == self.Type.LOCAL:
executor, name = (
LocalParallelExecutor(
cpus=cpus,
timeout_seconds=int(round(timeout_hours * 3600)),
),
"local",
)
elif self.type == self.Type.DEBUG:
executor, name = LocalSynchronousExecutor(), "local"
elif self.type == self.Type.NOOP:
executor, name = DummyExecutor(), "noop"
else:
assert False, f"Unknown executor: {self.type} ({type(self.type).__name__})"
executor = WrappedExecutor(executor, name=name)
yield executor
if self.type == self.Type.DEBUG or self.block:
wait_on_jobs(
executor.jobs,
executor_name=str(executor),
cancel_on_error=self.type == self.Type.SLURM,
)
if hasattr(executor.unwrapped, "close"):
executor.unwrapped.close()
@staticmethod
def get_default_local_executor():
"""Return a singleton :code:`Executor`.
:returns: An executor.
"""
with _executor_lock:
global _executor
if _executor is None:
_executor = Executor(type="local", cpus=cpu_count())
return _executor
# === Start of implementation details. ===
@validator("slurm_partition")
def validate_slurm_partition(cls, value, *, values, **kwargs):
del kwargs
if values["type"] == cls.Type.SLURM:
assert value, f"Must specify a partition for executor: {values['executor']}"
return value
@validator("cpus", pre=True)
def validate_cpus(cls, value, *, values, **kwargs):
del kwargs
# -1 CPU count defaults to CPU count.
if values["type"] == cls.Type.LOCAL and value == -1:
return cpu_count()
return value
@root_validator
def local_always_blocks(cls, values):
if values["type"] == cls.Type.LOCAL or values["type"] == cls.Type.NOOP:
values["block"] = True
return values
class Config:
validate_assignment = True
class WrappedExecutor:
"""An executor-like interface that records all jobs that are submitted."""
def __init__(self, executor, name: str):
self.unwrapped = executor
self.jobs = []
self.name = name
def submit(self, *args, **kwargs):
job = self.unwrapped.submit(*args, **kwargs)
logger.info("Submitting job %s to %s ...", job.job_id, self)
self.jobs.append(job)
return job
def __repr__(self) -> str:
return self.name
def wait_on_jobs(jobs, executor_name: str = "executor", cancel_on_error: bool = True):
njobs = len(jobs)
jobs = deque(jobs)
def cancel_all_jobs(jobs):
print(f"Cancelling {len(jobs)} {executor_name} jobs")
for job in jobs:
try:
job.cancel()
except: # noqa
pass
# Produce a list of the first few job IDs
max_num_job_ids_to_show = 8
job_ids = [j.job_id for j in islice(jobs, max_num_job_ids_to_show)]
job_ids = ", ".join(str(x) for x in job_ids)
job_ids = f"job ID: {job_ids}" if len(jobs) == 1 else f"job IDs: {job_ids}"
if len(jobs) > max_num_job_ids_to_show:
job_ids = f"{job_ids} ..."
logger.info(
f"Waiting for {len(jobs)} {executor_name} jobs to complete with {job_ids}"
)
completed = 0
while jobs:
job = jobs.popleft()
if cancel_on_error:
try:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
except Exception as e: # noqa Intentionally broad.
logger.error(f"Caught: {type(e).__name__}: {e}")
jobs.append(job)
return cancel_all_jobs(jobs)
else:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
logger.info("All done.")
class LocalParallelExecutor:
"""An executor which uses a process pool to process jobs in parallel on the
local machine.
"""
class LocalJob:
def __init__(self, job_id: int, async_result, timeout_seconds: int):
self._async_result = async_result
self.job_id = job_id
self.timeout_seconds = timeout_seconds
def result(self):
return self._async_result.get(timeout=self.timeout_seconds)
def cancel(self):
pass
def __init__(self, cpus: int, timeout_seconds: int):
self.last_job_id = 0
self.process_pool = multiprocessing.Pool(cpus)
self.timeout_seconds = timeout_seconds
self.futures = []
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
self.futures.append(self.process_pool.apply_async(fn, args, kwargs))
return self.LocalJob(
self.last_job_id,
self.futures[-1],
self.timeout_seconds,
)
def close(self):
# Block until all jobs have completed.
for future in self.futures:
future.get()
self.process_pool.close()
class LocalSynchronousExecutor:
"""An executor where each job is executed synchronously when result() is
called."""
class LocalJob:
def __init__(self, job_id: int, fn, *args, **kwargs):
self._callback = lambda: fn(*args, **kwargs)
self.job_id = job_id
def result(self):
return self._callback()
def cancel(self):
pass
def __init__(self):
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
return self.LocalJob(self.last_job_id, fn, *args, **kwargs)
class DummyExecutor:
class DummyJob:
def __init__(self, job_id: int):
self.job_id = job_id
def result(self):
return None
def cancel(self):
pass
def __init__(self) -> None:
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
del fn
del args
del kwargs
self.last_job_id += 1
return self.DummyJob(self.last_job_id)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DeviceFleetArgs', 'DeviceFleet']
@pulumi.input_type
class DeviceFleetArgs:
def __init__(__self__, *,
device_fleet_name: pulumi.Input[str],
output_config: pulumi.Input['DeviceFleetOutputConfigArgs'],
role_arn: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
enable_iot_role_alias: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a DeviceFleet resource.
:param pulumi.Input[str] device_fleet_name: The name of the Device Fleet (must be unique).
:param pulumi.Input['DeviceFleetOutputConfigArgs'] output_config: Specifies details about the repository. see Output Config details below.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
:param pulumi.Input[str] description: A description of the fleet.
:param pulumi.Input[bool] enable_iot_role_alias: Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
pulumi.set(__self__, "device_fleet_name", device_fleet_name)
pulumi.set(__self__, "output_config", output_config)
pulumi.set(__self__, "role_arn", role_arn)
if description is not None:
pulumi.set(__self__, "description", description)
if enable_iot_role_alias is not None:
pulumi.set(__self__, "enable_iot_role_alias", enable_iot_role_alias)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="deviceFleetName")
def device_fleet_name(self) -> pulumi.Input[str]:
"""
The name of the Device Fleet (must be unique).
"""
return pulumi.get(self, "device_fleet_name")
@device_fleet_name.setter
def device_fleet_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_fleet_name", value)
@property
@pulumi.getter(name="outputConfig")
def output_config(self) -> pulumi.Input['DeviceFleetOutputConfigArgs']:
"""
Specifies details about the repository. see Output Config details below.
"""
return pulumi.get(self, "output_config")
@output_config.setter
def output_config(self, value: pulumi.Input['DeviceFleetOutputConfigArgs']):
pulumi.set(self, "output_config", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the fleet.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="enableIotRoleAlias")
def enable_iot_role_alias(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
"""
return pulumi.get(self, "enable_iot_role_alias")
@enable_iot_role_alias.setter
def enable_iot_role_alias(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_iot_role_alias", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _DeviceFleetState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
device_fleet_name: Optional[pulumi.Input[str]] = None,
enable_iot_role_alias: Optional[pulumi.Input[bool]] = None,
iot_role_alias: Optional[pulumi.Input[str]] = None,
output_config: Optional[pulumi.Input['DeviceFleetOutputConfigArgs']] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering DeviceFleet resources.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) assigned by AWS to this Device Fleet.
:param pulumi.Input[str] description: A description of the fleet.
:param pulumi.Input[str] device_fleet_name: The name of the Device Fleet (must be unique).
:param pulumi.Input[bool] enable_iot_role_alias: Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
:param pulumi.Input['DeviceFleetOutputConfigArgs'] output_config: Specifies details about the repository. see Output Config details below.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if description is not None:
pulumi.set(__self__, "description", description)
if device_fleet_name is not None:
pulumi.set(__self__, "device_fleet_name", device_fleet_name)
if enable_iot_role_alias is not None:
pulumi.set(__self__, "enable_iot_role_alias", enable_iot_role_alias)
if iot_role_alias is not None:
pulumi.set(__self__, "iot_role_alias", iot_role_alias)
if output_config is not None:
pulumi.set(__self__, "output_config", output_config)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) assigned by AWS to this Device Fleet.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the fleet.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="deviceFleetName")
def device_fleet_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Device Fleet (must be unique).
"""
return pulumi.get(self, "device_fleet_name")
@device_fleet_name.setter
def device_fleet_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "device_fleet_name", value)
@property
@pulumi.getter(name="enableIotRoleAlias")
def enable_iot_role_alias(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
"""
return pulumi.get(self, "enable_iot_role_alias")
@enable_iot_role_alias.setter
def enable_iot_role_alias(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_iot_role_alias", value)
@property
@pulumi.getter(name="iotRoleAlias")
def iot_role_alias(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "iot_role_alias")
@iot_role_alias.setter
def iot_role_alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iot_role_alias", value)
@property
@pulumi.getter(name="outputConfig")
def output_config(self) -> Optional[pulumi.Input['DeviceFleetOutputConfigArgs']]:
"""
Specifies details about the repository. see Output Config details below.
"""
return pulumi.get(self, "output_config")
@output_config.setter
def output_config(self, value: Optional[pulumi.Input['DeviceFleetOutputConfigArgs']]):
pulumi.set(self, "output_config", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class DeviceFleet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
device_fleet_name: Optional[pulumi.Input[str]] = None,
enable_iot_role_alias: Optional[pulumi.Input[bool]] = None,
output_config: Optional[pulumi.Input[pulumi.InputType['DeviceFleetOutputConfigArgs']]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides a Sagemaker Device Fleet resource.
## Example Usage
### Basic usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.DeviceFleet("example",
device_fleet_name="example",
role_arn=aws_iam_role["test"]["arn"],
output_config=aws.sagemaker.DeviceFleetOutputConfigArgs(
s3_output_location=f"s3://{aws_s3_bucket["example"]["bucket"]}/prefix/",
))
```
## Import
Sagemaker Device Fleets can be imported using the `name`, e.g.,
```sh
$ pulumi import aws:sagemaker/deviceFleet:DeviceFleet example my-fleet
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description of the fleet.
:param pulumi.Input[str] device_fleet_name: The name of the Device Fleet (must be unique).
:param pulumi.Input[bool] enable_iot_role_alias: Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
:param pulumi.Input[pulumi.InputType['DeviceFleetOutputConfigArgs']] output_config: Specifies details about the repository. see Output Config details below.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DeviceFleetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Sagemaker Device Fleet resource.
## Example Usage
### Basic usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.DeviceFleet("example",
device_fleet_name="example",
role_arn=aws_iam_role["test"]["arn"],
output_config=aws.sagemaker.DeviceFleetOutputConfigArgs(
s3_output_location=f"s3://{aws_s3_bucket["example"]["bucket"]}/prefix/",
))
```
## Import
Sagemaker Device Fleets can be imported using the `name`, e.g.,
```sh
$ pulumi import aws:sagemaker/deviceFleet:DeviceFleet example my-fleet
```
:param str resource_name: The name of the resource.
:param DeviceFleetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DeviceFleetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
device_fleet_name: Optional[pulumi.Input[str]] = None,
enable_iot_role_alias: Optional[pulumi.Input[bool]] = None,
output_config: Optional[pulumi.Input[pulumi.InputType['DeviceFleetOutputConfigArgs']]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DeviceFleetArgs.__new__(DeviceFleetArgs)
__props__.__dict__["description"] = description
if device_fleet_name is None and not opts.urn:
raise TypeError("Missing required property 'device_fleet_name'")
__props__.__dict__["device_fleet_name"] = device_fleet_name
__props__.__dict__["enable_iot_role_alias"] = enable_iot_role_alias
if output_config is None and not opts.urn:
raise TypeError("Missing required property 'output_config'")
__props__.__dict__["output_config"] = output_config
if role_arn is None and not opts.urn:
raise TypeError("Missing required property 'role_arn'")
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["iot_role_alias"] = None
__props__.__dict__["tags_all"] = None
super(DeviceFleet, __self__).__init__(
'aws:sagemaker/deviceFleet:DeviceFleet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
device_fleet_name: Optional[pulumi.Input[str]] = None,
enable_iot_role_alias: Optional[pulumi.Input[bool]] = None,
iot_role_alias: Optional[pulumi.Input[str]] = None,
output_config: Optional[pulumi.Input[pulumi.InputType['DeviceFleetOutputConfigArgs']]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'DeviceFleet':
"""
Get an existing DeviceFleet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) assigned by AWS to this Device Fleet.
:param pulumi.Input[str] description: A description of the fleet.
:param pulumi.Input[str] device_fleet_name: The name of the Device Fleet (must be unique).
:param pulumi.Input[bool] enable_iot_role_alias: Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
:param pulumi.Input[pulumi.InputType['DeviceFleetOutputConfigArgs']] output_config: Specifies details about the repository. see Output Config details below.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DeviceFleetState.__new__(_DeviceFleetState)
__props__.__dict__["arn"] = arn
__props__.__dict__["description"] = description
__props__.__dict__["device_fleet_name"] = device_fleet_name
__props__.__dict__["enable_iot_role_alias"] = enable_iot_role_alias
__props__.__dict__["iot_role_alias"] = iot_role_alias
__props__.__dict__["output_config"] = output_config
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return DeviceFleet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) assigned by AWS to this Device Fleet.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description of the fleet.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="deviceFleetName")
def device_fleet_name(self) -> pulumi.Output[str]:
"""
The name of the Device Fleet (must be unique).
"""
return pulumi.get(self, "device_fleet_name")
@property
@pulumi.getter(name="enableIotRoleAlias")
def enable_iot_role_alias(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
"""
return pulumi.get(self, "enable_iot_role_alias")
@property
@pulumi.getter(name="iotRoleAlias")
def iot_role_alias(self) -> pulumi.Output[str]:
return pulumi.get(self, "iot_role_alias")
@property
@pulumi.getter(name="outputConfig")
def output_config(self) -> pulumi.Output['outputs.DeviceFleetOutputConfig']:
"""
Specifies details about the repository. see Output Config details below.
"""
return pulumi.get(self, "output_config")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DeviceFleetArgs', 'DeviceFleet']
@pulumi.input_type
class DeviceFleetArgs:
def __init__(__self__, *,
device_fleet_name: pulumi.Input[str],
output_config: pulumi.Input['DeviceFleetOutputConfigArgs'],
role_arn: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
enable_iot_role_alias: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a DeviceFleet resource.
:param pulumi.Input[str] device_fleet_name: The name of the Device Fleet (must be unique).
:param pulumi.Input['DeviceFleetOutputConfigArgs'] output_config: Specifies details about the repository. see Output Config details below.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
:param pulumi.Input[str] description: A description of the fleet.
:param pulumi.Input[bool] enable_iot_role_alias: Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
pulumi.set(__self__, "device_fleet_name", device_fleet_name)
pulumi.set(__self__, "output_config", output_config)
pulumi.set(__self__, "role_arn", role_arn)
if description is not None:
pulumi.set(__self__, "description", description)
if enable_iot_role_alias is not None:
pulumi.set(__self__, "enable_iot_role_alias", enable_iot_role_alias)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="deviceFleetName")
def device_fleet_name(self) -> pulumi.Input[str]:
"""
The name of the Device Fleet (must be unique).
"""
return pulumi.get(self, "device_fleet_name")
@device_fleet_name.setter
def device_fleet_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_fleet_name", value)
@property
@pulumi.getter(name="outputConfig")
def output_config(self) -> pulumi.Input['DeviceFleetOutputConfigArgs']:
"""
Specifies details about the repository. see Output Config details below.
"""
return pulumi.get(self, "output_config")
@output_config.setter
def output_config(self, value: pulumi.Input['DeviceFleetOutputConfigArgs']):
pulumi.set(self, "output_config", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the fleet.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="enableIotRoleAlias")
def enable_iot_role_alias(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
"""
return pulumi.get(self, "enable_iot_role_alias")
@enable_iot_role_alias.setter
def enable_iot_role_alias(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_iot_role_alias", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _DeviceFleetState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
device_fleet_name: Optional[pulumi.Input[str]] = None,
enable_iot_role_alias: Optional[pulumi.Input[bool]] = None,
iot_role_alias: Optional[pulumi.Input[str]] = None,
output_config: Optional[pulumi.Input['DeviceFleetOutputConfigArgs']] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering DeviceFleet resources.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) assigned by AWS to this Device Fleet.
:param pulumi.Input[str] description: A description of the fleet.
:param pulumi.Input[str] device_fleet_name: The name of the Device Fleet (must be unique).
:param pulumi.Input[bool] enable_iot_role_alias: Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
:param pulumi.Input['DeviceFleetOutputConfigArgs'] output_config: Specifies details about the repository. see Output Config details below.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if description is not None:
pulumi.set(__self__, "description", description)
if device_fleet_name is not None:
pulumi.set(__self__, "device_fleet_name", device_fleet_name)
if enable_iot_role_alias is not None:
pulumi.set(__self__, "enable_iot_role_alias", enable_iot_role_alias)
if iot_role_alias is not None:
pulumi.set(__self__, "iot_role_alias", iot_role_alias)
if output_config is not None:
pulumi.set(__self__, "output_config", output_config)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) assigned by AWS to this Device Fleet.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the fleet.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="deviceFleetName")
def device_fleet_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Device Fleet (must be unique).
"""
return pulumi.get(self, "device_fleet_name")
@device_fleet_name.setter
def device_fleet_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "device_fleet_name", value)
@property
@pulumi.getter(name="enableIotRoleAlias")
def enable_iot_role_alias(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
"""
return pulumi.get(self, "enable_iot_role_alias")
@enable_iot_role_alias.setter
def enable_iot_role_alias(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_iot_role_alias", value)
@property
@pulumi.getter(name="iotRoleAlias")
def iot_role_alias(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "iot_role_alias")
@iot_role_alias.setter
def iot_role_alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iot_role_alias", value)
@property
@pulumi.getter(name="outputConfig")
def output_config(self) -> Optional[pulumi.Input['DeviceFleetOutputConfigArgs']]:
"""
Specifies details about the repository. see Output Config details below.
"""
return pulumi.get(self, "output_config")
@output_config.setter
def output_config(self, value: Optional[pulumi.Input['DeviceFleetOutputConfigArgs']]):
pulumi.set(self, "output_config", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class DeviceFleet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
device_fleet_name: Optional[pulumi.Input[str]] = None,
enable_iot_role_alias: Optional[pulumi.Input[bool]] = None,
output_config: Optional[pulumi.Input[pulumi.InputType['DeviceFleetOutputConfigArgs']]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides a Sagemaker Device Fleet resource.
## Example Usage
### Basic usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.DeviceFleet("example",
device_fleet_name="example",
role_arn=aws_iam_role["test"]["arn"],
output_config=aws.sagemaker.DeviceFleetOutputConfigArgs(
s3_output_location=f"s3://{aws_s3_bucket['example']['bucket']}/prefix/",
))
```
## Import
Sagemaker Device Fleets can be imported using the `name`, e.g.,
```sh
$ pulumi import aws:sagemaker/deviceFleet:DeviceFleet example my-fleet
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description of the fleet.
:param pulumi.Input[str] device_fleet_name: The name of the Device Fleet (must be unique).
:param pulumi.Input[bool] enable_iot_role_alias: Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
:param pulumi.Input[pulumi.InputType['DeviceFleetOutputConfigArgs']] output_config: Specifies details about the repository. see Output Config details below.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DeviceFleetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Sagemaker Device Fleet resource.
## Example Usage
### Basic usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.DeviceFleet("example",
device_fleet_name="example",
role_arn=aws_iam_role["test"]["arn"],
output_config=aws.sagemaker.DeviceFleetOutputConfigArgs(
s3_output_location=f"s3://{aws_s3_bucket['example']['bucket']}/prefix/",
))
```
## Import
Sagemaker Device Fleets can be imported using the `name`, e.g.,
```sh
$ pulumi import aws:sagemaker/deviceFleet:DeviceFleet example my-fleet
```
:param str resource_name: The name of the resource.
:param DeviceFleetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DeviceFleetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
device_fleet_name: Optional[pulumi.Input[str]] = None,
enable_iot_role_alias: Optional[pulumi.Input[bool]] = None,
output_config: Optional[pulumi.Input[pulumi.InputType['DeviceFleetOutputConfigArgs']]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DeviceFleetArgs.__new__(DeviceFleetArgs)
__props__.__dict__["description"] = description
if device_fleet_name is None and not opts.urn:
raise TypeError("Missing required property 'device_fleet_name'")
__props__.__dict__["device_fleet_name"] = device_fleet_name
__props__.__dict__["enable_iot_role_alias"] = enable_iot_role_alias
if output_config is None and not opts.urn:
raise TypeError("Missing required property 'output_config'")
__props__.__dict__["output_config"] = output_config
if role_arn is None and not opts.urn:
raise TypeError("Missing required property 'role_arn'")
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["iot_role_alias"] = None
__props__.__dict__["tags_all"] = None
super(DeviceFleet, __self__).__init__(
'aws:sagemaker/deviceFleet:DeviceFleet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
device_fleet_name: Optional[pulumi.Input[str]] = None,
enable_iot_role_alias: Optional[pulumi.Input[bool]] = None,
iot_role_alias: Optional[pulumi.Input[str]] = None,
output_config: Optional[pulumi.Input[pulumi.InputType['DeviceFleetOutputConfigArgs']]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'DeviceFleet':
"""
Get an existing DeviceFleet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) assigned by AWS to this Device Fleet.
:param pulumi.Input[str] description: A description of the fleet.
:param pulumi.Input[str] device_fleet_name: The name of the Device Fleet (must be unique).
:param pulumi.Input[bool] enable_iot_role_alias: Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
:param pulumi.Input[pulumi.InputType['DeviceFleetOutputConfigArgs']] output_config: Specifies details about the repository. see Output Config details below.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DeviceFleetState.__new__(_DeviceFleetState)
__props__.__dict__["arn"] = arn
__props__.__dict__["description"] = description
__props__.__dict__["device_fleet_name"] = device_fleet_name
__props__.__dict__["enable_iot_role_alias"] = enable_iot_role_alias
__props__.__dict__["iot_role_alias"] = iot_role_alias
__props__.__dict__["output_config"] = output_config
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return DeviceFleet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) assigned by AWS to this Device Fleet.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description of the fleet.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="deviceFleetName")
def device_fleet_name(self) -> pulumi.Output[str]:
"""
The name of the Device Fleet (must be unique).
"""
return pulumi.get(self, "device_fleet_name")
@property
@pulumi.getter(name="enableIotRoleAlias")
def enable_iot_role_alias(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}".
"""
return pulumi.get(self, "enable_iot_role_alias")
@property
@pulumi.getter(name="iotRoleAlias")
def iot_role_alias(self) -> pulumi.Output[str]:
return pulumi.get(self, "iot_role_alias")
@property
@pulumi.getter(name="outputConfig")
def output_config(self) -> pulumi.Output['outputs.DeviceFleetOutputConfig']:
"""
Specifies details about the repository. see Output Config details below.
"""
return pulumi.get(self, "output_config")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
|
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import logging
import os
import tempfile
import time
from typing import Any, Dict, List, Optional, Tuple, Union
from urllib.parse import urlparse
from uuid import UUID
import jmespath
from azure.applicationinsights import ApplicationInsightsDataClient
from azure.applicationinsights.models import QueryBody
from azure.common.client_factory import get_azure_cli_credentials
from onefuzztypes.enums import ContainerType, TaskType
from onefuzztypes.models import BlobRef, NodeAssignment, Report, Task
from onefuzztypes.primitives import Container, Directory
from onefuzz.api import UUID_EXPANSION, Command, Onefuzz
from .azcopy import azcopy_sync
from .backend import wait
from .rdp import rdp_connect
from .ssh import ssh_connect
EMPTY_SHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
ZERO_SHA256 = "0" * len(EMPTY_SHA256)
DAY_TIMESPAN = "PT24H"
HOUR_TIMESPAN = "PT1H"
DEFAULT_TAIL_DELAY = 10.0
class DebugRepro(Command):
""" Debug repro instances """
def _disambiguate(self, vm_id: UUID_EXPANSION) -> str:
return str(
self.onefuzz.repro._disambiguate_uuid(
"vm_id",
vm_id,
lambda: [str(x.vm_id) for x in self.onefuzz.repro.list()],
)
)
def _info(self) -> Tuple[str, str]:
info = self.onefuzz.info.get()
return info.resource_group, info.subscription
def ssh(self, vm_id: str) -> None:
vm_id = self._disambiguate(vm_id)
repro = self.onefuzz.repro.get(vm_id)
if repro.ip is None:
raise Exception("missing IP: %s" % repro)
if repro.auth is None:
raise Exception("missing Auth: %s" % repro)
with ssh_connect(repro.ip, repro.auth.private_key, call=True):
pass
def rdp(self, vm_id: str) -> None:
vm_id = self._disambiguate(vm_id)
repro = self.onefuzz.repro.get(vm_id)
if repro.ip is None:
raise Exception("missing IP: %s" % repro)
if repro.auth is None:
raise Exception("missing Auth: %s" % repro)
RDP_PORT = 3389
with rdp_connect(repro.ip, repro.auth.password, port=RDP_PORT):
return
class DebugNode(Command):
""" Debug a specific node on a scaleset """
def rdp(self, machine_id: UUID_EXPANSION, duration: Optional[int] = 1) -> None:
node = self.onefuzz.nodes.get(machine_id)
if node.scaleset_id is None:
raise Exception("node is not part of a scaleset")
self.onefuzz.debug.scalesets.rdp(
scaleset_id=node.scaleset_id, machine_id=node.machine_id, duration=duration
)
def ssh(self, machine_id: UUID_EXPANSION, duration: Optional[int] = 1) -> None:
node = self.onefuzz.nodes.get(machine_id)
if node.scaleset_id is None:
raise Exception("node is not part of a scaleset")
self.onefuzz.debug.scalesets.ssh(
scaleset_id=node.scaleset_id, machine_id=node.machine_id, duration=duration
)
class DebugScaleset(Command):
""" Debug tasks """
def _get_proxy_setup(
self, scaleset_id: UUID, machine_id: UUID, port: int, duration: Optional[int]
) -> Tuple[bool, str, Optional[Tuple[str, int]]]:
proxy = self.onefuzz.scaleset_proxy.create(
scaleset_id, machine_id, port, duration=duration
)
if proxy.ip is None:
return (False, "waiting on proxy ip", None)
return (True, "waiting on proxy port", (proxy.ip, proxy.forward.src_port))
def rdp(
self,
scaleset_id: UUID_EXPANSION,
machine_id: UUID_EXPANSION,
duration: Optional[int] = 1,
) -> None:
(
scaleset,
machine_id_expanded,
) = self.onefuzz.scalesets._expand_scaleset_machine(
scaleset_id, machine_id, include_auth=True
)
RDP_PORT = 3389
setup = wait(
lambda: self._get_proxy_setup(
scaleset.scaleset_id, machine_id_expanded, RDP_PORT, duration
)
)
if setup is None:
raise Exception("no proxy for RDP port configured")
if scaleset.auth is None:
raise Exception("auth is not available for scaleset")
ip, port = setup
with rdp_connect(ip, scaleset.auth.password, port=port):
return
def ssh(
self,
scaleset_id: UUID_EXPANSION,
machine_id: UUID_EXPANSION,
duration: Optional[int] = 1,
command: Optional[str] = None,
) -> None:
(
scaleset,
machine_id_expanded,
) = self.onefuzz.scalesets._expand_scaleset_machine(
scaleset_id, machine_id, include_auth=True
)
SSH_PORT = 22
setup = wait(
lambda: self._get_proxy_setup(
scaleset.scaleset_id, machine_id_expanded, SSH_PORT, duration
)
)
if setup is None:
raise Exception("no proxy for SSH port configured")
ip, port = setup
if scaleset.auth is None:
raise Exception("auth is not available for scaleset")
with ssh_connect(
ip, scaleset.auth.private_key, port=port, call=True, command=command
):
return
class DebugTask(Command):
""" Debug a specific job """
def list_nodes(self, task_id: UUID_EXPANSION) -> Optional[List[NodeAssignment]]:
task = self.onefuzz.tasks.get(task_id)
return task.nodes
def _get_node(
self, task_id: UUID_EXPANSION, node_id: Optional[UUID]
) -> Tuple[UUID, UUID]:
nodes = self.list_nodes(task_id)
if not nodes:
raise Exception("task is not currently executing on nodes")
if node_id is not None:
for node in nodes:
if node.node_id == node_id and node.scaleset_id is not None:
return (node.scaleset_id, node.node_id)
raise Exception("unable to find scaleset with node_id")
for node in nodes:
if node.scaleset_id:
return (node.scaleset_id, node.node_id)
raise Exception("unable to find scaleset node running on task")
def ssh(
self,
task_id: UUID_EXPANSION,
*,
node_id: Optional[UUID] = None,
duration: Optional[int] = 1,
) -> None:
scaleset_id, node_id = self._get_node(task_id, node_id)
return self.onefuzz.debug.scalesets.ssh(scaleset_id, node_id, duration=duration)
def rdp(
self,
task_id: UUID_EXPANSION,
*,
node_id: Optional[UUID] = None,
duration: Optional[int] = 1,
) -> None:
scaleset_id, node_id = self._get_node(task_id, node_id)
return self.onefuzz.debug.scalesets.rdp(scaleset_id, node_id, duration=duration)
def libfuzzer_coverage(
self,
task_id: UUID_EXPANSION,
timespan: str = DAY_TIMESPAN,
limit: Optional[int] = None,
) -> Any:
"""
Get the coverage for the specified task
:param task_id value: Task ID
:param str timespan: ISO 8601 duration format
:param int limit: Limit the number of records returned
"""
task = self.onefuzz.tasks.get(task_id)
query = f"where customDimensions.task_id == '{task.task_id}'"
return self.onefuzz.debug.logs._query_libfuzzer_coverage(query, timespan, limit)
def libfuzzer_execs_sec(
self,
task_id: UUID_EXPANSION,
timespan: str = DAY_TIMESPAN,
limit: Optional[int] = None,
) -> Any:
"""
Get the executions per second for the specified task
:param task_id value: Task ID
:param str timespan: ISO 8601 duration format
:param int limit: Limit the number of records returned
"""
task = self.onefuzz.tasks.get(task_id)
query = f"where customDimensions.task_id == '{task.task_id}'"
return self.onefuzz.debug.logs._query_libfuzzer_execs_sec(
query, timespan, limit
)
class DebugJobTask(Command):
""" Debug a task for a specific job """
def _get_task(self, job_id: UUID_EXPANSION, task_type: TaskType) -> UUID:
for task in self.onefuzz.tasks.list(job_id=job_id):
if task.config.task.type == task_type:
return task.task_id
raise Exception(
"unable to find task type %s for job:%s" % (task_type.name, job_id)
)
def ssh(
self,
job_id: UUID_EXPANSION,
task_type: TaskType,
*,
duration: Optional[int] = 1,
) -> None:
""" SSH into the first node running the specified task type in the job """
return self.onefuzz.debug.task.ssh(
self._get_task(job_id, task_type), duration=duration
)
def rdp(
self,
job_id: UUID_EXPANSION,
task_type: TaskType,
*,
duration: Optional[int] = 1,
) -> None:
""" RDP into the first node running the specified task type in the job """
return self.onefuzz.debug.task.rdp(
self._get_task(job_id, task_type), duration=duration
)
class DebugJob(Command):
""" Debug a specific Job """
def __init__(self, onefuzz: Any, logger: logging.Logger):
super().__init__(onefuzz, logger)
self.task = DebugJobTask(onefuzz, logger)
def libfuzzer_coverage(
self,
job_id: UUID_EXPANSION,
timespan: str = DAY_TIMESPAN,
limit: Optional[int] = None,
) -> Any:
"""
Get the coverage for the specified job
:param job_id value: Job ID
:param str timespan: ISO 8601 duration format
:param int limit: Limit the number of records returned
"""
job = self.onefuzz.jobs.get(job_id)
query = f"where customDimensions.job_id == '{job.job_id}'"
return self.onefuzz.debug.logs._query_libfuzzer_coverage(query, timespan, limit)
def libfuzzer_execs_sec(
self,
job_id: UUID_EXPANSION,
timespan: str = DAY_TIMESPAN,
limit: Optional[int] = None,
) -> Any:
"""
Get the executions per second for the specified job
:param job_id value: Job ID
:param str timespan: ISO 8601 duration format
:param int limit: Limit the number of records returned
"""
job = self.onefuzz.jobs.get(job_id)
query = f"where customDimensions.job_id == '{job.job_id}'"
return self.onefuzz.debug.logs._query_libfuzzer_execs_sec(
query, timespan, limit
)
def download_files(self, job_id: UUID_EXPANSION, output: Directory) -> None:
""" Download the containers by container type for each task in the specified job """
to_download = {}
tasks = self.onefuzz.tasks.list(job_id=job_id, state=None)
if not tasks:
raise Exception("no tasks with job_id:%s" % job_id)
for task in tasks:
for container in task.config.containers:
info = self.onefuzz.containers.get(container.name)
name = os.path.join(container.type.name, container.name)
to_download[name] = info.sas_url
for name in to_download:
outdir = os.path.join(output, name)
if not os.path.exists(outdir):
os.makedirs(outdir)
self.logger.info("downloading: %s", name)
# security note: the src for azcopy comes from the server which is
# trusted in this context, while the destination is provided by the
# user
azcopy_sync(to_download[name], outdir)
class DebugLog(Command):
def __init__(self, onefuzz: "Onefuzz", logger: logging.Logger):
self.onefuzz = onefuzz
self.logger = logger
self._client: Optional[ApplicationInsightsDataClient] = None
self._app_id: Optional[str] = None
def _convert(self, raw_data: Any) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
results = {}
for table in raw_data.tables:
result = []
for row in table.rows:
converted = {
table.columns[x].name: y
for (x, y) in enumerate(row)
if y not in [None, ""]
}
if "customDimensions" in converted:
converted["customDimensions"] = json.loads(
converted["customDimensions"]
)
result.append(converted)
results[table.name] = result
if list(results.keys()) == ["PrimaryResult"]:
return results["PrimaryResult"]
return results
def query(
self,
log_query: str,
*,
timespan: Optional[str] = DAY_TIMESPAN,
raw: bool = False,
) -> Any:
"""
Perform an Application Insights query
Queries should be well formed Kusto Queries.
Ref https://docs.microsoft.com/en-us/azure/data-explorer/kql-quick-reference
:param str log_query: Query to send to Application Insights
:param str timespan: ISO 8601 duration format
:param bool raw: Do not simplify the data result
"""
if self._app_id is None:
self._app_id = self.onefuzz.info.get().insights_appid
if self._app_id is None:
raise Exception("instance does not have an insights_appid")
if self._client is None:
creds, _ = get_azure_cli_credentials(
resource="https://api.applicationinsights.io"
)
self._client = ApplicationInsightsDataClient(creds)
self.logger.debug("query: %s", log_query)
raw_data = self._client.query.execute(
self._app_id, body=QueryBody(query=log_query, timespan=timespan)
)
if "error" in raw_data.additional_properties:
raise Exception(
"Error performing query: %s" % raw_data.additional_properties["error"]
)
if raw:
return raw_data
return self._convert(raw_data)
def _query_parts(
self, parts: List[str], *, timespan: Optional[str] = None, raw: bool = False
) -> Any:
log_query = " | ".join(parts)
return self.query(log_query, timespan=timespan, raw=raw)
def _build_keyword_query(
self, value: str, limit: Optional[int] = None, desc: bool = True
) -> List[str]:
# See https://docs.microsoft.com/en-us/azure/data-explorer/kql-quick-reference
components = ["union isfuzzy=true exceptions, traces, customEvents"]
value = value.strip()
keywords = ['* has "%s"' % (x.replace('"', '\\"')) for x in value.split(" ")]
if keywords:
components.append("where " + " and ".join(keywords))
order = "desc" if desc else "asc"
if limit:
components.append(f"take {limit}")
components.append(f"order by timestamp {order}")
return components
def keyword(
self,
value: str,
*,
timespan: Optional[str] = DAY_TIMESPAN,
limit: Optional[int] = None,
raw: bool = False,
) -> Any:
"""
Perform an Application Insights keyword query akin to "Transaction Search"
:param str value: Keyword to query Application Insights
:param str timespan: ISO 8601 duration format
:param int limit: Limit the number of records returned
:param bool raw: Do not simplify the data result
"""
components = self._build_keyword_query(value, limit=limit)
return self._query_parts(components, timespan=timespan, raw=raw)
def tail(
self,
value: str,
*,
limit: int = 1000,
indent: Optional[int] = None,
filter: Optional[str] = "[message, name, customDimensions]",
timespan: Optional[str] = HOUR_TIMESPAN,
) -> None:
"""
Perform an Application Insights keyword query akin to "Transaction Search"
:param str value: Keyword to query Application Insights
:param str indent: Specify indent for JSON printing
:param str limit: Limit the number of records to return in each query
:param str filter: JMESPath filter for streaming results
"""
expression = None
if filter:
expression = jmespath.compile(filter)
base_query = self._build_keyword_query(value, limit=limit, desc=False)
last_seen: Optional[str] = None
wait = DEFAULT_TAIL_DELAY
while True:
query = base_query.copy()
if last_seen is not None:
query.append(f'where timestamp > datetime("{last_seen}")')
results = self._query_parts(query, timespan=timespan)
if results:
last_seen = results[-1]["timestamp"]
for entry in results:
if expression is not None:
entry = expression.search(entry)
if entry:
print(json.dumps(entry, indent=indent, sort_keys=True))
wait = DEFAULT_TAIL_DELAY
else:
self.onefuzz.logger.debug("waiting %f seconds", wait)
time.sleep(wait)
if wait < 60:
wait *= 1.5
def _query_libfuzzer_coverage(
self, query: str, timespan: str, limit: Optional[int] = None
) -> Any:
project_fields = [
"rate=customDimensions.rate",
"covered=customDimensions.covered",
"features=customDimensions.features",
"timestamp",
]
query_parts = [
"customEvents",
"where name == 'coverage_data'",
query,
"order by timestamp desc",
f"project {",".join(project_fields)}",
]
if limit:
query_parts.append(f"take {limit}")
return self.onefuzz.debug.logs._query_parts(query_parts, timespan=timespan)
def _query_libfuzzer_execs_sec(
self,
query: str,
timespan: str,
limit: Optional[int] = None,
) -> Any:
project_fields = [
"machine_id=customDimensions.machine_id",
"worker_id=customDimensions.worker_id",
"execs_sec=customDimensions.execs_sec",
"timestamp",
]
query_parts = [
"customEvents",
"where name == 'runtime_stats'",
query,
"where customDimensions.execs_sec > 0",
"order by timestamp desc",
f"project {",".join(project_fields)}",
]
if limit:
query_parts.append(f"take {limit}")
return self.onefuzz.debug.logs._query_parts(query_parts, timespan=timespan)
class DebugNotification(Command):
""" Debug notification integrations """
def _get_container(
self, task: Task, container_type: ContainerType
) -> Optional[Container]:
for container in task.config.containers:
if container.type == container_type:
return container.name
return None
def _get_storage_account(self, container_name: Container) -> str:
sas_url = self.onefuzz.containers.get(container_name).sas_url
_, netloc, _, _, _, _ = urlparse(sas_url)
return netloc.split(".")[0]
def job(
self,
job_id: str,
*,
report_container_type: ContainerType = ContainerType.unique_reports,
crash_name: str = "fake-crash-sample",
) -> None:
""" Inject a report into the first crash reporting task in the specified job """
tasks = self.onefuzz.tasks.list(job_id=job_id, state=[])
for task in tasks:
if task.config.task.type in [
TaskType.libfuzzer_crash_report,
TaskType.generic_crash_report,
]:
self.task(
str(task.task_id),
report_container_type=report_container_type,
crash_name=crash_name,
)
return
raise Exception("no crash reporting tasks configured")
def task(
self,
task_id: str,
*,
report_container_type: ContainerType = ContainerType.unique_reports,
crash_name: str = "fake-crash-sample",
) -> None:
""" Inject a report into the specified crash reporting task """
task = self.onefuzz.tasks.get(task_id)
crashes = self._get_container(task, ContainerType.crashes)
reports = self._get_container(task, report_container_type)
if crashes is None:
raise Exception("task does not have a crashes container")
if reports is None:
raise Exception(
"task does not have a %s container" % report_container_type.name
)
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, crash_name)
with open(file_path, "w") as handle:
handle.write("")
self.onefuzz.containers.files.upload_file(crashes, file_path, crash_name)
report = Report(
input_blob=BlobRef(
account=self._get_storage_account(crashes),
container=crashes,
name=crash_name,
),
executable=task.config.task.target_exe,
crash_type="fake crash report",
crash_site="fake crash site",
call_stack=["#0 fake", "#1 call", "#2 stack"],
call_stack_sha256=ZERO_SHA256,
input_sha256=EMPTY_SHA256,
asan_log="fake asan log",
task_id=task_id,
job_id=task.job_id,
)
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "report.json")
with open(file_path, "w") as handle:
handle.write(report.json())
self.onefuzz.containers.files.upload_file(
reports, file_path, crash_name + ".json"
)
class Debug(Command):
""" Debug running jobs """
def __init__(self, onefuzz: Any, logger: logging.Logger):
super().__init__(onefuzz, logger)
self.scalesets = DebugScaleset(onefuzz, logger)
self.repro = DebugRepro(onefuzz, logger)
self.job = DebugJob(onefuzz, logger)
self.notification = DebugNotification(onefuzz, logger)
self.task = DebugTask(onefuzz, logger)
self.logs = DebugLog(onefuzz, logger)
self.node = DebugNode(onefuzz, logger)
| #!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import logging
import os
import tempfile
import time
from typing import Any, Dict, List, Optional, Tuple, Union
from urllib.parse import urlparse
from uuid import UUID
import jmespath
from azure.applicationinsights import ApplicationInsightsDataClient
from azure.applicationinsights.models import QueryBody
from azure.common.client_factory import get_azure_cli_credentials
from onefuzztypes.enums import ContainerType, TaskType
from onefuzztypes.models import BlobRef, NodeAssignment, Report, Task
from onefuzztypes.primitives import Container, Directory
from onefuzz.api import UUID_EXPANSION, Command, Onefuzz
from .azcopy import azcopy_sync
from .backend import wait
from .rdp import rdp_connect
from .ssh import ssh_connect
EMPTY_SHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
ZERO_SHA256 = "0" * len(EMPTY_SHA256)
DAY_TIMESPAN = "PT24H"
HOUR_TIMESPAN = "PT1H"
DEFAULT_TAIL_DELAY = 10.0
class DebugRepro(Command):
""" Debug repro instances """
def _disambiguate(self, vm_id: UUID_EXPANSION) -> str:
return str(
self.onefuzz.repro._disambiguate_uuid(
"vm_id",
vm_id,
lambda: [str(x.vm_id) for x in self.onefuzz.repro.list()],
)
)
def _info(self) -> Tuple[str, str]:
info = self.onefuzz.info.get()
return info.resource_group, info.subscription
def ssh(self, vm_id: str) -> None:
vm_id = self._disambiguate(vm_id)
repro = self.onefuzz.repro.get(vm_id)
if repro.ip is None:
raise Exception("missing IP: %s" % repro)
if repro.auth is None:
raise Exception("missing Auth: %s" % repro)
with ssh_connect(repro.ip, repro.auth.private_key, call=True):
pass
def rdp(self, vm_id: str) -> None:
vm_id = self._disambiguate(vm_id)
repro = self.onefuzz.repro.get(vm_id)
if repro.ip is None:
raise Exception("missing IP: %s" % repro)
if repro.auth is None:
raise Exception("missing Auth: %s" % repro)
RDP_PORT = 3389
with rdp_connect(repro.ip, repro.auth.password, port=RDP_PORT):
return
class DebugNode(Command):
""" Debug a specific node on a scaleset """
def rdp(self, machine_id: UUID_EXPANSION, duration: Optional[int] = 1) -> None:
node = self.onefuzz.nodes.get(machine_id)
if node.scaleset_id is None:
raise Exception("node is not part of a scaleset")
self.onefuzz.debug.scalesets.rdp(
scaleset_id=node.scaleset_id, machine_id=node.machine_id, duration=duration
)
def ssh(self, machine_id: UUID_EXPANSION, duration: Optional[int] = 1) -> None:
node = self.onefuzz.nodes.get(machine_id)
if node.scaleset_id is None:
raise Exception("node is not part of a scaleset")
self.onefuzz.debug.scalesets.ssh(
scaleset_id=node.scaleset_id, machine_id=node.machine_id, duration=duration
)
class DebugScaleset(Command):
""" Debug tasks """
def _get_proxy_setup(
self, scaleset_id: UUID, machine_id: UUID, port: int, duration: Optional[int]
) -> Tuple[bool, str, Optional[Tuple[str, int]]]:
proxy = self.onefuzz.scaleset_proxy.create(
scaleset_id, machine_id, port, duration=duration
)
if proxy.ip is None:
return (False, "waiting on proxy ip", None)
return (True, "waiting on proxy port", (proxy.ip, proxy.forward.src_port))
def rdp(
self,
scaleset_id: UUID_EXPANSION,
machine_id: UUID_EXPANSION,
duration: Optional[int] = 1,
) -> None:
(
scaleset,
machine_id_expanded,
) = self.onefuzz.scalesets._expand_scaleset_machine(
scaleset_id, machine_id, include_auth=True
)
RDP_PORT = 3389
setup = wait(
lambda: self._get_proxy_setup(
scaleset.scaleset_id, machine_id_expanded, RDP_PORT, duration
)
)
if setup is None:
raise Exception("no proxy for RDP port configured")
if scaleset.auth is None:
raise Exception("auth is not available for scaleset")
ip, port = setup
with rdp_connect(ip, scaleset.auth.password, port=port):
return
def ssh(
self,
scaleset_id: UUID_EXPANSION,
machine_id: UUID_EXPANSION,
duration: Optional[int] = 1,
command: Optional[str] = None,
) -> None:
(
scaleset,
machine_id_expanded,
) = self.onefuzz.scalesets._expand_scaleset_machine(
scaleset_id, machine_id, include_auth=True
)
SSH_PORT = 22
setup = wait(
lambda: self._get_proxy_setup(
scaleset.scaleset_id, machine_id_expanded, SSH_PORT, duration
)
)
if setup is None:
raise Exception("no proxy for SSH port configured")
ip, port = setup
if scaleset.auth is None:
raise Exception("auth is not available for scaleset")
with ssh_connect(
ip, scaleset.auth.private_key, port=port, call=True, command=command
):
return
class DebugTask(Command):
""" Debug a specific job """
def list_nodes(self, task_id: UUID_EXPANSION) -> Optional[List[NodeAssignment]]:
task = self.onefuzz.tasks.get(task_id)
return task.nodes
def _get_node(
self, task_id: UUID_EXPANSION, node_id: Optional[UUID]
) -> Tuple[UUID, UUID]:
nodes = self.list_nodes(task_id)
if not nodes:
raise Exception("task is not currently executing on nodes")
if node_id is not None:
for node in nodes:
if node.node_id == node_id and node.scaleset_id is not None:
return (node.scaleset_id, node.node_id)
raise Exception("unable to find scaleset with node_id")
for node in nodes:
if node.scaleset_id:
return (node.scaleset_id, node.node_id)
raise Exception("unable to find scaleset node running on task")
def ssh(
self,
task_id: UUID_EXPANSION,
*,
node_id: Optional[UUID] = None,
duration: Optional[int] = 1,
) -> None:
scaleset_id, node_id = self._get_node(task_id, node_id)
return self.onefuzz.debug.scalesets.ssh(scaleset_id, node_id, duration=duration)
def rdp(
self,
task_id: UUID_EXPANSION,
*,
node_id: Optional[UUID] = None,
duration: Optional[int] = 1,
) -> None:
scaleset_id, node_id = self._get_node(task_id, node_id)
return self.onefuzz.debug.scalesets.rdp(scaleset_id, node_id, duration=duration)
def libfuzzer_coverage(
self,
task_id: UUID_EXPANSION,
timespan: str = DAY_TIMESPAN,
limit: Optional[int] = None,
) -> Any:
"""
Get the coverage for the specified task
:param task_id value: Task ID
:param str timespan: ISO 8601 duration format
:param int limit: Limit the number of records returned
"""
task = self.onefuzz.tasks.get(task_id)
query = f"where customDimensions.task_id == '{task.task_id}'"
return self.onefuzz.debug.logs._query_libfuzzer_coverage(query, timespan, limit)
def libfuzzer_execs_sec(
self,
task_id: UUID_EXPANSION,
timespan: str = DAY_TIMESPAN,
limit: Optional[int] = None,
) -> Any:
"""
Get the executions per second for the specified task
:param task_id value: Task ID
:param str timespan: ISO 8601 duration format
:param int limit: Limit the number of records returned
"""
task = self.onefuzz.tasks.get(task_id)
query = f"where customDimensions.task_id == '{task.task_id}'"
return self.onefuzz.debug.logs._query_libfuzzer_execs_sec(
query, timespan, limit
)
class DebugJobTask(Command):
""" Debug a task for a specific job """
def _get_task(self, job_id: UUID_EXPANSION, task_type: TaskType) -> UUID:
for task in self.onefuzz.tasks.list(job_id=job_id):
if task.config.task.type == task_type:
return task.task_id
raise Exception(
"unable to find task type %s for job:%s" % (task_type.name, job_id)
)
def ssh(
self,
job_id: UUID_EXPANSION,
task_type: TaskType,
*,
duration: Optional[int] = 1,
) -> None:
""" SSH into the first node running the specified task type in the job """
return self.onefuzz.debug.task.ssh(
self._get_task(job_id, task_type), duration=duration
)
def rdp(
self,
job_id: UUID_EXPANSION,
task_type: TaskType,
*,
duration: Optional[int] = 1,
) -> None:
""" RDP into the first node running the specified task type in the job """
return self.onefuzz.debug.task.rdp(
self._get_task(job_id, task_type), duration=duration
)
class DebugJob(Command):
""" Debug a specific Job """
def __init__(self, onefuzz: Any, logger: logging.Logger):
super().__init__(onefuzz, logger)
self.task = DebugJobTask(onefuzz, logger)
def libfuzzer_coverage(
self,
job_id: UUID_EXPANSION,
timespan: str = DAY_TIMESPAN,
limit: Optional[int] = None,
) -> Any:
"""
Get the coverage for the specified job
:param job_id value: Job ID
:param str timespan: ISO 8601 duration format
:param int limit: Limit the number of records returned
"""
job = self.onefuzz.jobs.get(job_id)
query = f"where customDimensions.job_id == '{job.job_id}'"
return self.onefuzz.debug.logs._query_libfuzzer_coverage(query, timespan, limit)
def libfuzzer_execs_sec(
self,
job_id: UUID_EXPANSION,
timespan: str = DAY_TIMESPAN,
limit: Optional[int] = None,
) -> Any:
"""
Get the executions per second for the specified job
:param job_id value: Job ID
:param str timespan: ISO 8601 duration format
:param int limit: Limit the number of records returned
"""
job = self.onefuzz.jobs.get(job_id)
query = f"where customDimensions.job_id == '{job.job_id}'"
return self.onefuzz.debug.logs._query_libfuzzer_execs_sec(
query, timespan, limit
)
def download_files(self, job_id: UUID_EXPANSION, output: Directory) -> None:
""" Download the containers by container type for each task in the specified job """
to_download = {}
tasks = self.onefuzz.tasks.list(job_id=job_id, state=None)
if not tasks:
raise Exception("no tasks with job_id:%s" % job_id)
for task in tasks:
for container in task.config.containers:
info = self.onefuzz.containers.get(container.name)
name = os.path.join(container.type.name, container.name)
to_download[name] = info.sas_url
for name in to_download:
outdir = os.path.join(output, name)
if not os.path.exists(outdir):
os.makedirs(outdir)
self.logger.info("downloading: %s", name)
# security note: the src for azcopy comes from the server which is
# trusted in this context, while the destination is provided by the
# user
azcopy_sync(to_download[name], outdir)
class DebugLog(Command):
def __init__(self, onefuzz: "Onefuzz", logger: logging.Logger):
self.onefuzz = onefuzz
self.logger = logger
self._client: Optional[ApplicationInsightsDataClient] = None
self._app_id: Optional[str] = None
def _convert(self, raw_data: Any) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
results = {}
for table in raw_data.tables:
result = []
for row in table.rows:
converted = {
table.columns[x].name: y
for (x, y) in enumerate(row)
if y not in [None, ""]
}
if "customDimensions" in converted:
converted["customDimensions"] = json.loads(
converted["customDimensions"]
)
result.append(converted)
results[table.name] = result
if list(results.keys()) == ["PrimaryResult"]:
return results["PrimaryResult"]
return results
def query(
self,
log_query: str,
*,
timespan: Optional[str] = DAY_TIMESPAN,
raw: bool = False,
) -> Any:
"""
Perform an Application Insights query
Queries should be well formed Kusto Queries.
Ref https://docs.microsoft.com/en-us/azure/data-explorer/kql-quick-reference
:param str log_query: Query to send to Application Insights
:param str timespan: ISO 8601 duration format
:param bool raw: Do not simplify the data result
"""
if self._app_id is None:
self._app_id = self.onefuzz.info.get().insights_appid
if self._app_id is None:
raise Exception("instance does not have an insights_appid")
if self._client is None:
creds, _ = get_azure_cli_credentials(
resource="https://api.applicationinsights.io"
)
self._client = ApplicationInsightsDataClient(creds)
self.logger.debug("query: %s", log_query)
raw_data = self._client.query.execute(
self._app_id, body=QueryBody(query=log_query, timespan=timespan)
)
if "error" in raw_data.additional_properties:
raise Exception(
"Error performing query: %s" % raw_data.additional_properties["error"]
)
if raw:
return raw_data
return self._convert(raw_data)
def _query_parts(
self, parts: List[str], *, timespan: Optional[str] = None, raw: bool = False
) -> Any:
log_query = " | ".join(parts)
return self.query(log_query, timespan=timespan, raw=raw)
def _build_keyword_query(
self, value: str, limit: Optional[int] = None, desc: bool = True
) -> List[str]:
# See https://docs.microsoft.com/en-us/azure/data-explorer/kql-quick-reference
components = ["union isfuzzy=true exceptions, traces, customEvents"]
value = value.strip()
keywords = ['* has "%s"' % (x.replace('"', '\\"')) for x in value.split(" ")]
if keywords:
components.append("where " + " and ".join(keywords))
order = "desc" if desc else "asc"
if limit:
components.append(f"take {limit}")
components.append(f"order by timestamp {order}")
return components
def keyword(
self,
value: str,
*,
timespan: Optional[str] = DAY_TIMESPAN,
limit: Optional[int] = None,
raw: bool = False,
) -> Any:
"""
Perform an Application Insights keyword query akin to "Transaction Search"
:param str value: Keyword to query Application Insights
:param str timespan: ISO 8601 duration format
:param int limit: Limit the number of records returned
:param bool raw: Do not simplify the data result
"""
components = self._build_keyword_query(value, limit=limit)
return self._query_parts(components, timespan=timespan, raw=raw)
def tail(
self,
value: str,
*,
limit: int = 1000,
indent: Optional[int] = None,
filter: Optional[str] = "[message, name, customDimensions]",
timespan: Optional[str] = HOUR_TIMESPAN,
) -> None:
"""
Perform an Application Insights keyword query akin to "Transaction Search"
:param str value: Keyword to query Application Insights
:param str indent: Specify indent for JSON printing
:param str limit: Limit the number of records to return in each query
:param str filter: JMESPath filter for streaming results
"""
expression = None
if filter:
expression = jmespath.compile(filter)
base_query = self._build_keyword_query(value, limit=limit, desc=False)
last_seen: Optional[str] = None
wait = DEFAULT_TAIL_DELAY
while True:
query = base_query.copy()
if last_seen is not None:
query.append(f'where timestamp > datetime("{last_seen}")')
results = self._query_parts(query, timespan=timespan)
if results:
last_seen = results[-1]["timestamp"]
for entry in results:
if expression is not None:
entry = expression.search(entry)
if entry:
print(json.dumps(entry, indent=indent, sort_keys=True))
wait = DEFAULT_TAIL_DELAY
else:
self.onefuzz.logger.debug("waiting %f seconds", wait)
time.sleep(wait)
if wait < 60:
wait *= 1.5
def _query_libfuzzer_coverage(
self, query: str, timespan: str, limit: Optional[int] = None
) -> Any:
project_fields = [
"rate=customDimensions.rate",
"covered=customDimensions.covered",
"features=customDimensions.features",
"timestamp",
]
query_parts = [
"customEvents",
"where name == 'coverage_data'",
query,
"order by timestamp desc",
f"project {','.join(project_fields)}",
]
if limit:
query_parts.append(f"take {limit}")
return self.onefuzz.debug.logs._query_parts(query_parts, timespan=timespan)
def _query_libfuzzer_execs_sec(
self,
query: str,
timespan: str,
limit: Optional[int] = None,
) -> Any:
project_fields = [
"machine_id=customDimensions.machine_id",
"worker_id=customDimensions.worker_id",
"execs_sec=customDimensions.execs_sec",
"timestamp",
]
query_parts = [
"customEvents",
"where name == 'runtime_stats'",
query,
"where customDimensions.execs_sec > 0",
"order by timestamp desc",
f"project {','.join(project_fields)}",
]
if limit:
query_parts.append(f"take {limit}")
return self.onefuzz.debug.logs._query_parts(query_parts, timespan=timespan)
class DebugNotification(Command):
""" Debug notification integrations """
def _get_container(
self, task: Task, container_type: ContainerType
) -> Optional[Container]:
for container in task.config.containers:
if container.type == container_type:
return container.name
return None
def _get_storage_account(self, container_name: Container) -> str:
sas_url = self.onefuzz.containers.get(container_name).sas_url
_, netloc, _, _, _, _ = urlparse(sas_url)
return netloc.split(".")[0]
def job(
self,
job_id: str,
*,
report_container_type: ContainerType = ContainerType.unique_reports,
crash_name: str = "fake-crash-sample",
) -> None:
""" Inject a report into the first crash reporting task in the specified job """
tasks = self.onefuzz.tasks.list(job_id=job_id, state=[])
for task in tasks:
if task.config.task.type in [
TaskType.libfuzzer_crash_report,
TaskType.generic_crash_report,
]:
self.task(
str(task.task_id),
report_container_type=report_container_type,
crash_name=crash_name,
)
return
raise Exception("no crash reporting tasks configured")
def task(
self,
task_id: str,
*,
report_container_type: ContainerType = ContainerType.unique_reports,
crash_name: str = "fake-crash-sample",
) -> None:
""" Inject a report into the specified crash reporting task """
task = self.onefuzz.tasks.get(task_id)
crashes = self._get_container(task, ContainerType.crashes)
reports = self._get_container(task, report_container_type)
if crashes is None:
raise Exception("task does not have a crashes container")
if reports is None:
raise Exception(
"task does not have a %s container" % report_container_type.name
)
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, crash_name)
with open(file_path, "w") as handle:
handle.write("")
self.onefuzz.containers.files.upload_file(crashes, file_path, crash_name)
report = Report(
input_blob=BlobRef(
account=self._get_storage_account(crashes),
container=crashes,
name=crash_name,
),
executable=task.config.task.target_exe,
crash_type="fake crash report",
crash_site="fake crash site",
call_stack=["#0 fake", "#1 call", "#2 stack"],
call_stack_sha256=ZERO_SHA256,
input_sha256=EMPTY_SHA256,
asan_log="fake asan log",
task_id=task_id,
job_id=task.job_id,
)
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "report.json")
with open(file_path, "w") as handle:
handle.write(report.json())
self.onefuzz.containers.files.upload_file(
reports, file_path, crash_name + ".json"
)
class Debug(Command):
""" Debug running jobs """
def __init__(self, onefuzz: Any, logger: logging.Logger):
super().__init__(onefuzz, logger)
self.scalesets = DebugScaleset(onefuzz, logger)
self.repro = DebugRepro(onefuzz, logger)
self.job = DebugJob(onefuzz, logger)
self.notification = DebugNotification(onefuzz, logger)
self.task = DebugTask(onefuzz, logger)
self.logs = DebugLog(onefuzz, logger)
self.node = DebugNode(onefuzz, logger)
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2019 Akumatic
#
# https://adventofcode.com/2019/day/5
import sys, os
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import intcode, intcode_test
def readFile() -> list:
with open(f"{__file__.rstrip("code.py")}input.txt", "r") as f:
return [int(num) for num in f.readline().split(",")]
def part1(pc: intcode.Computer) -> int:
pc.reset(input=1)
pc.run()
return pc.data[0]
def part2(pc: intcode.Computer) -> int:
pc.reset(input=5)
pc.run()
return pc.data[0]
if __name__ == "__main__":
intcode_test.test_05()
pc = intcode.Computer(readFile())
print(f"Part 1: {part1(pc)}")
print(f"Part 2: {part2(pc)}") | # SPDX-License-Identifier: MIT
# Copyright (c) 2019 Akumatic
#
# https://adventofcode.com/2019/day/5
import sys, os
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import intcode, intcode_test
def readFile() -> list:
with open(f"{__file__.rstrip('code.py')}input.txt", "r") as f:
return [int(num) for num in f.readline().split(",")]
def part1(pc: intcode.Computer) -> int:
pc.reset(input=1)
pc.run()
return pc.data[0]
def part2(pc: intcode.Computer) -> int:
pc.reset(input=5)
pc.run()
return pc.data[0]
if __name__ == "__main__":
intcode_test.test_05()
pc = intcode.Computer(readFile())
print(f"Part 1: {part1(pc)}")
print(f"Part 2: {part2(pc)}") |
from typing import Union, Dict, Optional, Any, IO, TYPE_CHECKING
from thinc.api import Config, fix_random_seed, set_gpu_allocator
from thinc.api import ConfigValidationError
from pathlib import Path
import srsly
import numpy
import tarfile
import gzip
import zipfile
import tqdm
from .pretrain import get_tok2vec_ref
from ..lookups import Lookups
from ..vectors import Vectors
from ..errors import Errors, Warnings
from ..schemas import ConfigSchemaTraining
from ..util import registry, load_model_from_config, resolve_dot_names, logger
from ..util import load_model, ensure_path, get_sourced_components
from ..util import OOV_RANK, DEFAULT_OOV_PROB
if TYPE_CHECKING:
from ..language import Language # noqa: F401
def init_nlp(config: Config, *, use_gpu: int = -1) -> "Language":
raw_config = config
config = raw_config.interpolate()
if "seed" not in config["training"]:
raise ValueError(Errors.E1015.format(value="[training] seed"))
if "gpu_allocator" not in config["training"]:
raise ValueError(Errors.E1015.format(value="[training] gpu_allocator"))
if config["training"]["seed"] is not None:
fix_random_seed(config["training"]["seed"])
allocator = config["training"]["gpu_allocator"]
if use_gpu >= 0 and allocator:
set_gpu_allocator(allocator)
# Use original config here before it's resolved to functions
sourced = get_sourced_components(config)
nlp = load_model_from_config(raw_config, auto_fill=True)
logger.info("Set up nlp object from config")
config = nlp.config.interpolate()
# Resolve all training-relevant sections using the filled nlp config
T = registry.resolve(config["training"], schema=ConfigSchemaTraining)
dot_names = [T["train_corpus"], T["dev_corpus"]]
if not isinstance(T["train_corpus"], str):
raise ConfigValidationError(
desc=Errors.E897.format(
field="training.train_corpus", type=type(T["train_corpus"])
)
)
if not isinstance(T["dev_corpus"], str):
raise ConfigValidationError(
desc=Errors.E897.format(
field="training.dev_corpus", type=type(T["dev_corpus"])
)
)
train_corpus, dev_corpus = resolve_dot_names(config, dot_names)
optimizer = T["optimizer"]
# Components that shouldn't be updated during training
frozen_components = T["frozen_components"]
# Sourced components that require resume_training
resume_components = [p for p in sourced if p not in frozen_components]
logger.info(f"Pipeline: {nlp.pipe_names}")
if resume_components:
with nlp.select_pipes(enable=resume_components):
logger.info(f"Resuming training for: {resume_components}")
nlp.resume_training(sgd=optimizer)
# Make sure that listeners are defined before initializing further
nlp._link_components()
with nlp.select_pipes(disable=[*frozen_components, *resume_components]):
nlp.initialize(lambda: train_corpus(nlp), sgd=optimizer)
logger.info(f"Initialized pipeline components: {nlp.pipe_names}")
# Detect components with listeners that are not frozen consistently
for name, proc in nlp.pipeline:
if getattr(proc, "listening_components", None): # e.g. tok2vec/transformer
for listener in proc.listening_components:
if listener in frozen_components and name not in frozen_components:
logger.warning(Warnings.W087.format(name=name, listener=listener))
# We always check this regardless, in case user freezes tok2vec
if listener not in frozen_components and name in frozen_components:
logger.warning(Warnings.W086.format(name=name, listener=listener))
return nlp
def init_vocab(
nlp: "Language",
*,
data: Optional[Path] = None,
lookups: Optional[Lookups] = None,
vectors: Optional[str] = None,
) -> "Language":
if lookups:
nlp.vocab.lookups = lookups
logger.info(f"Added vocab lookups: {", ".join(lookups.tables)}")
data_path = ensure_path(data)
if data_path is not None:
lex_attrs = srsly.read_jsonl(data_path)
for lexeme in nlp.vocab:
lexeme.rank = OOV_RANK
for attrs in lex_attrs:
if "settings" in attrs:
continue
lexeme = nlp.vocab[attrs["orth"]]
lexeme.set_attrs(**attrs)
if len(nlp.vocab):
oov_prob = min(lex.prob for lex in nlp.vocab) - 1
else:
oov_prob = DEFAULT_OOV_PROB
nlp.vocab.cfg.update({"oov_prob": oov_prob})
logger.info(f"Added {len(nlp.vocab)} lexical entries to the vocab")
logger.info("Created vocabulary")
if vectors is not None:
load_vectors_into_model(nlp, vectors)
logger.info(f"Added vectors: {vectors}")
logger.info("Finished initializing nlp object")
def load_vectors_into_model(
nlp: "Language", name: Union[str, Path], *, add_strings: bool = True
) -> None:
"""Load word vectors from an installed model or path into a model instance."""
try:
vectors_nlp = load_model(name)
except ConfigValidationError as e:
title = f"Config validation error for vectors {name}"
desc = (
"This typically means that there's a problem in the config.cfg included "
"with the packaged vectors. Make sure that the vectors package you're "
"loading is compatible with the current version of spaCy."
)
err = ConfigValidationError.from_error(e, title=title, desc=desc)
raise err from None
nlp.vocab.vectors = vectors_nlp.vocab.vectors
if add_strings:
# I guess we should add the strings from the vectors_nlp model?
# E.g. if someone does a similarity query, they might expect the strings.
for key in nlp.vocab.vectors.key2row:
if key in vectors_nlp.vocab.strings:
nlp.vocab.strings.add(vectors_nlp.vocab.strings[key])
def init_tok2vec(
nlp: "Language", pretrain_config: Dict[str, Any], init_config: Dict[str, Any]
) -> bool:
# Load pretrained tok2vec weights - cf. CLI command 'pretrain'
P = pretrain_config
I = init_config
weights_data = None
init_tok2vec = ensure_path(I["init_tok2vec"])
if init_tok2vec is not None:
if not init_tok2vec.exists():
err = f"can't find pretrained tok2vec: {init_tok2vec}"
errors = [{"loc": ["initialize", "init_tok2vec"], "msg": err}]
raise ConfigValidationError(config=nlp.config, errors=errors)
with init_tok2vec.open("rb") as file_:
weights_data = file_.read()
if weights_data is not None:
layer = get_tok2vec_ref(nlp, P)
layer.from_bytes(weights_data)
logger.info(f"Loaded pretrained weights from {init_tok2vec}")
return True
return False
def convert_vectors(
nlp: "Language",
vectors_loc: Optional[Path],
*,
truncate: int,
prune: int,
name: Optional[str] = None,
) -> None:
vectors_loc = ensure_path(vectors_loc)
if vectors_loc and vectors_loc.parts[-1].endswith(".npz"):
nlp.vocab.vectors = Vectors(data=numpy.load(vectors_loc.open("rb")))
for lex in nlp.vocab:
if lex.rank and lex.rank != OOV_RANK:
nlp.vocab.vectors.add(lex.orth, row=lex.rank)
else:
if vectors_loc:
logger.info(f"Reading vectors from {vectors_loc}")
vectors_data, vector_keys = read_vectors(vectors_loc, truncate)
logger.info(f"Loaded vectors from {vectors_loc}")
else:
vectors_data, vector_keys = (None, None)
if vector_keys is not None:
for word in vector_keys:
if word not in nlp.vocab:
nlp.vocab[word]
if vectors_data is not None:
nlp.vocab.vectors = Vectors(data=vectors_data, keys=vector_keys)
if name is None:
# TODO: Is this correct? Does this matter?
nlp.vocab.vectors.name = f"{nlp.meta["lang"]}_{nlp.meta["name"]}.vectors"
else:
nlp.vocab.vectors.name = name
nlp.meta["vectors"]["name"] = nlp.vocab.vectors.name
if prune >= 1:
nlp.vocab.prune_vectors(prune)
def read_vectors(vectors_loc: Path, truncate_vectors: int):
f = ensure_shape(vectors_loc)
shape = tuple(int(size) for size in next(f).split())
if truncate_vectors >= 1:
shape = (truncate_vectors, shape[1])
vectors_data = numpy.zeros(shape=shape, dtype="f")
vectors_keys = []
for i, line in enumerate(tqdm.tqdm(f)):
line = line.rstrip()
pieces = line.rsplit(" ", vectors_data.shape[1])
word = pieces.pop(0)
if len(pieces) != vectors_data.shape[1]:
raise ValueError(Errors.E094.format(line_num=i, loc=vectors_loc))
vectors_data[i] = numpy.asarray(pieces, dtype="f")
vectors_keys.append(word)
if i == truncate_vectors - 1:
break
return vectors_data, vectors_keys
def open_file(loc: Union[str, Path]) -> IO:
"""Handle .gz, .tar.gz or unzipped files"""
loc = ensure_path(loc)
if tarfile.is_tarfile(str(loc)):
return tarfile.open(str(loc), "r:gz")
elif loc.parts[-1].endswith("gz"):
return (line.decode("utf8") for line in gzip.open(str(loc), "r"))
elif loc.parts[-1].endswith("zip"):
zip_file = zipfile.ZipFile(str(loc))
names = zip_file.namelist()
file_ = zip_file.open(names[0])
return (line.decode("utf8") for line in file_)
else:
return loc.open("r", encoding="utf8")
def ensure_shape(vectors_loc):
"""Ensure that the first line of the data is the vectors shape.
If it's not, we read in the data and output the shape as the first result,
so that the reader doesn't have to deal with the problem.
"""
lines = open_file(vectors_loc)
first_line = next(lines)
try:
shape = tuple(int(size) for size in first_line.split())
except ValueError:
shape = None
if shape is not None:
# All good, give the data
yield first_line
yield from lines
else:
# Figure out the shape, make it the first value, and then give the
# rest of the data.
width = len(first_line.split()) - 1
length = 1
for _ in lines:
length += 1
yield f"{length} {width}"
# Reading the lines in again from file. This to avoid having to
# store all the results in a list in memory
lines2 = open_file(vectors_loc)
yield from lines2
| from typing import Union, Dict, Optional, Any, IO, TYPE_CHECKING
from thinc.api import Config, fix_random_seed, set_gpu_allocator
from thinc.api import ConfigValidationError
from pathlib import Path
import srsly
import numpy
import tarfile
import gzip
import zipfile
import tqdm
from .pretrain import get_tok2vec_ref
from ..lookups import Lookups
from ..vectors import Vectors
from ..errors import Errors, Warnings
from ..schemas import ConfigSchemaTraining
from ..util import registry, load_model_from_config, resolve_dot_names, logger
from ..util import load_model, ensure_path, get_sourced_components
from ..util import OOV_RANK, DEFAULT_OOV_PROB
if TYPE_CHECKING:
from ..language import Language # noqa: F401
def init_nlp(config: Config, *, use_gpu: int = -1) -> "Language":
raw_config = config
config = raw_config.interpolate()
if "seed" not in config["training"]:
raise ValueError(Errors.E1015.format(value="[training] seed"))
if "gpu_allocator" not in config["training"]:
raise ValueError(Errors.E1015.format(value="[training] gpu_allocator"))
if config["training"]["seed"] is not None:
fix_random_seed(config["training"]["seed"])
allocator = config["training"]["gpu_allocator"]
if use_gpu >= 0 and allocator:
set_gpu_allocator(allocator)
# Use original config here before it's resolved to functions
sourced = get_sourced_components(config)
nlp = load_model_from_config(raw_config, auto_fill=True)
logger.info("Set up nlp object from config")
config = nlp.config.interpolate()
# Resolve all training-relevant sections using the filled nlp config
T = registry.resolve(config["training"], schema=ConfigSchemaTraining)
dot_names = [T["train_corpus"], T["dev_corpus"]]
if not isinstance(T["train_corpus"], str):
raise ConfigValidationError(
desc=Errors.E897.format(
field="training.train_corpus", type=type(T["train_corpus"])
)
)
if not isinstance(T["dev_corpus"], str):
raise ConfigValidationError(
desc=Errors.E897.format(
field="training.dev_corpus", type=type(T["dev_corpus"])
)
)
train_corpus, dev_corpus = resolve_dot_names(config, dot_names)
optimizer = T["optimizer"]
# Components that shouldn't be updated during training
frozen_components = T["frozen_components"]
# Sourced components that require resume_training
resume_components = [p for p in sourced if p not in frozen_components]
logger.info(f"Pipeline: {nlp.pipe_names}")
if resume_components:
with nlp.select_pipes(enable=resume_components):
logger.info(f"Resuming training for: {resume_components}")
nlp.resume_training(sgd=optimizer)
# Make sure that listeners are defined before initializing further
nlp._link_components()
with nlp.select_pipes(disable=[*frozen_components, *resume_components]):
nlp.initialize(lambda: train_corpus(nlp), sgd=optimizer)
logger.info(f"Initialized pipeline components: {nlp.pipe_names}")
# Detect components with listeners that are not frozen consistently
for name, proc in nlp.pipeline:
if getattr(proc, "listening_components", None): # e.g. tok2vec/transformer
for listener in proc.listening_components:
if listener in frozen_components and name not in frozen_components:
logger.warning(Warnings.W087.format(name=name, listener=listener))
# We always check this regardless, in case user freezes tok2vec
if listener not in frozen_components and name in frozen_components:
logger.warning(Warnings.W086.format(name=name, listener=listener))
return nlp
def init_vocab(
nlp: "Language",
*,
data: Optional[Path] = None,
lookups: Optional[Lookups] = None,
vectors: Optional[str] = None,
) -> "Language":
if lookups:
nlp.vocab.lookups = lookups
logger.info(f"Added vocab lookups: {', '.join(lookups.tables)}")
data_path = ensure_path(data)
if data_path is not None:
lex_attrs = srsly.read_jsonl(data_path)
for lexeme in nlp.vocab:
lexeme.rank = OOV_RANK
for attrs in lex_attrs:
if "settings" in attrs:
continue
lexeme = nlp.vocab[attrs["orth"]]
lexeme.set_attrs(**attrs)
if len(nlp.vocab):
oov_prob = min(lex.prob for lex in nlp.vocab) - 1
else:
oov_prob = DEFAULT_OOV_PROB
nlp.vocab.cfg.update({"oov_prob": oov_prob})
logger.info(f"Added {len(nlp.vocab)} lexical entries to the vocab")
logger.info("Created vocabulary")
if vectors is not None:
load_vectors_into_model(nlp, vectors)
logger.info(f"Added vectors: {vectors}")
logger.info("Finished initializing nlp object")
def load_vectors_into_model(
nlp: "Language", name: Union[str, Path], *, add_strings: bool = True
) -> None:
"""Load word vectors from an installed model or path into a model instance."""
try:
vectors_nlp = load_model(name)
except ConfigValidationError as e:
title = f"Config validation error for vectors {name}"
desc = (
"This typically means that there's a problem in the config.cfg included "
"with the packaged vectors. Make sure that the vectors package you're "
"loading is compatible with the current version of spaCy."
)
err = ConfigValidationError.from_error(e, title=title, desc=desc)
raise err from None
nlp.vocab.vectors = vectors_nlp.vocab.vectors
if add_strings:
# I guess we should add the strings from the vectors_nlp model?
# E.g. if someone does a similarity query, they might expect the strings.
for key in nlp.vocab.vectors.key2row:
if key in vectors_nlp.vocab.strings:
nlp.vocab.strings.add(vectors_nlp.vocab.strings[key])
def init_tok2vec(
nlp: "Language", pretrain_config: Dict[str, Any], init_config: Dict[str, Any]
) -> bool:
# Load pretrained tok2vec weights - cf. CLI command 'pretrain'
P = pretrain_config
I = init_config
weights_data = None
init_tok2vec = ensure_path(I["init_tok2vec"])
if init_tok2vec is not None:
if not init_tok2vec.exists():
err = f"can't find pretrained tok2vec: {init_tok2vec}"
errors = [{"loc": ["initialize", "init_tok2vec"], "msg": err}]
raise ConfigValidationError(config=nlp.config, errors=errors)
with init_tok2vec.open("rb") as file_:
weights_data = file_.read()
if weights_data is not None:
layer = get_tok2vec_ref(nlp, P)
layer.from_bytes(weights_data)
logger.info(f"Loaded pretrained weights from {init_tok2vec}")
return True
return False
def convert_vectors(
nlp: "Language",
vectors_loc: Optional[Path],
*,
truncate: int,
prune: int,
name: Optional[str] = None,
) -> None:
vectors_loc = ensure_path(vectors_loc)
if vectors_loc and vectors_loc.parts[-1].endswith(".npz"):
nlp.vocab.vectors = Vectors(data=numpy.load(vectors_loc.open("rb")))
for lex in nlp.vocab:
if lex.rank and lex.rank != OOV_RANK:
nlp.vocab.vectors.add(lex.orth, row=lex.rank)
else:
if vectors_loc:
logger.info(f"Reading vectors from {vectors_loc}")
vectors_data, vector_keys = read_vectors(vectors_loc, truncate)
logger.info(f"Loaded vectors from {vectors_loc}")
else:
vectors_data, vector_keys = (None, None)
if vector_keys is not None:
for word in vector_keys:
if word not in nlp.vocab:
nlp.vocab[word]
if vectors_data is not None:
nlp.vocab.vectors = Vectors(data=vectors_data, keys=vector_keys)
if name is None:
# TODO: Is this correct? Does this matter?
nlp.vocab.vectors.name = f"{nlp.meta['lang']}_{nlp.meta['name']}.vectors"
else:
nlp.vocab.vectors.name = name
nlp.meta["vectors"]["name"] = nlp.vocab.vectors.name
if prune >= 1:
nlp.vocab.prune_vectors(prune)
def read_vectors(vectors_loc: Path, truncate_vectors: int):
f = ensure_shape(vectors_loc)
shape = tuple(int(size) for size in next(f).split())
if truncate_vectors >= 1:
shape = (truncate_vectors, shape[1])
vectors_data = numpy.zeros(shape=shape, dtype="f")
vectors_keys = []
for i, line in enumerate(tqdm.tqdm(f)):
line = line.rstrip()
pieces = line.rsplit(" ", vectors_data.shape[1])
word = pieces.pop(0)
if len(pieces) != vectors_data.shape[1]:
raise ValueError(Errors.E094.format(line_num=i, loc=vectors_loc))
vectors_data[i] = numpy.asarray(pieces, dtype="f")
vectors_keys.append(word)
if i == truncate_vectors - 1:
break
return vectors_data, vectors_keys
def open_file(loc: Union[str, Path]) -> IO:
"""Handle .gz, .tar.gz or unzipped files"""
loc = ensure_path(loc)
if tarfile.is_tarfile(str(loc)):
return tarfile.open(str(loc), "r:gz")
elif loc.parts[-1].endswith("gz"):
return (line.decode("utf8") for line in gzip.open(str(loc), "r"))
elif loc.parts[-1].endswith("zip"):
zip_file = zipfile.ZipFile(str(loc))
names = zip_file.namelist()
file_ = zip_file.open(names[0])
return (line.decode("utf8") for line in file_)
else:
return loc.open("r", encoding="utf8")
def ensure_shape(vectors_loc):
"""Ensure that the first line of the data is the vectors shape.
If it's not, we read in the data and output the shape as the first result,
so that the reader doesn't have to deal with the problem.
"""
lines = open_file(vectors_loc)
first_line = next(lines)
try:
shape = tuple(int(size) for size in first_line.split())
except ValueError:
shape = None
if shape is not None:
# All good, give the data
yield first_line
yield from lines
else:
# Figure out the shape, make it the first value, and then give the
# rest of the data.
width = len(first_line.split()) - 1
length = 1
for _ in lines:
length += 1
yield f"{length} {width}"
# Reading the lines in again from file. This to avoid having to
# store all the results in a list in memory
lines2 = open_file(vectors_loc)
yield from lines2
|
import copy
import json
import os
import re
import warnings
from collections import OrderedDict, UserDict
from contextlib import contextmanager
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
import numpy as np
from packaging import version
from requests import HTTPError
from . import __version__
from .dynamic_module_utils import custom_object_save
from .utils import (
EntryNotFoundError,
ExplicitEnum,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
add_end_docstrings,
cached_path,
copy_func,
get_file_from_repo,
hf_bucket_url,
is_flax_available,
is_offline_mode,
is_remote_url,
is_tokenizers_available,
is_torch_available,
logging,
to_py_obj,
torch_required,
)
from .utils.generic import _is_jax, _is_numpy, _is_tensorflow, _is_torch, _is_torch_device
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tokenizers_available():
from tokenizers import AddedToken
from tokenizers import Encoding as EncodingFast
else:
@dataclass(frozen=True, eq=True)
class AddedToken:
"""
AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the
way it should behave.
"""
content: str = field(default_factory=str)
single_word: bool = False
lstrip: bool = False
rstrip: bool = False
normalized: bool = True
def __getstate__(self):
return self.__dict__
@dataclass
class EncodingFast:
"""This is dummy class because without the `tokenizers` library we don't have these objects anyway"""
pass
logger = logging.get_logger(__name__)
# This is used to set the max input length for a model with infinite size input
VERY_LARGE_INTEGER = int(1e30)
# This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER
LARGE_INTEGER = int(1e20)
# Define type aliases and NamedTuples
TextInput = str
PreTokenizedInput = List[str]
EncodedInput = List[int]
TextInputPair = Tuple[str, str]
PreTokenizedInputPair = Tuple[List[str], List[str]]
EncodedInputPair = Tuple[List[int], List[int]]
# Slow tokenizers used to be saved in three separated files
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
ADDED_TOKENS_FILE = "added_tokens.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
# Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file
FULL_TOKENIZER_FILE = "tokenizer.json"
_re_tokenizer_file = re.compile(r"tokenizer\.(.*)\.json")
class TruncationStrategy(ExplicitEnum):
"""
Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in
an IDE.
"""
ONLY_FIRST = "only_first"
ONLY_SECOND = "only_second"
LONGEST_FIRST = "longest_first"
DO_NOT_TRUNCATE = "do_not_truncate"
class CharSpan(NamedTuple):
"""
Character span in the original string.
Args:
start (`int`): Index of the first character in the original string.
end (`int`): Index of the character following the last character in the original string.
"""
start: int
end: int
class TokenSpan(NamedTuple):
"""
Token span in an encoded string (list of tokens).
Args:
start (`int`): Index of the first token in the span.
end (`int`): Index of the token following the last token in the span.
"""
start: int
end: int
class BatchEncoding(UserDict):
"""
Holds the output of the [`~tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`] and
[`~tokenization_utils_base.PreTrainedTokenizerBase.batch_encode`] methods (tokens, attention_masks, etc).
This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes
utility methods to map from word/character space to token space.
Args:
data (`dict`):
Dictionary of lists/arrays/tensors returned by the encode/batch_encode methods ('input_ids',
'attention_mask', etc.).
encoding (`tokenizers.Encoding` or `Sequence[tokenizers.Encoding]`, *optional*):
If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character
space to token space the `tokenizers.Encoding` instance or list of instance (for batches) hold this
information.
tensor_type (`Union[None, str, TensorType]`, *optional*):
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
initialization.
prepend_batch_axis (`bool`, *optional*, defaults to `False`):
Whether or not to add a batch axis when converting to tensors (see `tensor_type` above).
n_sequences (`Optional[int]`, *optional*):
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
initialization.
"""
def __init__(
self,
data: Optional[Dict[str, Any]] = None,
encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None,
tensor_type: Union[None, str, TensorType] = None,
prepend_batch_axis: bool = False,
n_sequences: Optional[int] = None,
):
super().__init__(data)
if isinstance(encoding, EncodingFast):
encoding = [encoding]
self._encodings = encoding
if n_sequences is None and encoding is not None and len(encoding):
n_sequences = encoding[0].n_sequences
self._n_sequences = n_sequences
self.convert_to_tensors(tensor_type=tensor_type,
prepend_batch_axis=prepend_batch_axis)
@property
def n_sequences(self) -> Optional[int]:
"""
`Optional[int]`: The number of sequences used to generate each sample from the batch encoded in this
[`BatchEncoding`]. Currently can be one of `None` (unknown), `1` (a single sentence) or `2` (a pair of
sentences)
"""
return self._n_sequences
@property
def is_fast(self) -> bool:
"""
`bool`: Indicate whether this [`BatchEncoding`] was generated from the result of a [`PreTrainedTokenizerFast`]
or not.
"""
return self._encodings is not None
def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]:
"""
If the key is a string, returns the value of the dict associated to `key` ('input_ids', 'attention_mask',
etc.).
If the key is an integer, get the `tokenizers.Encoding` for batch item with index `key`.
"""
if isinstance(item, str):
return self.data[item]
elif self._encodings is not None:
return self._encodings[item]
else:
raise KeyError(
"Indexing with integers (to access backend Encoding for a given batch index) "
"is not available when using Python based tokenizers"
)
def __getattr__(self, item: str):
try:
return self.data[item]
except KeyError:
raise AttributeError
def __getstate__(self):
return {"data": self.data, "encodings": self._encodings}
def __setstate__(self, state):
if "data" in state:
self.data = state["data"]
if "encodings" in state:
self._encodings = state["encodings"]
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
# After this point:
# Extended properties and methods only available for fast (Rust-based) tokenizers
# provided by HuggingFace tokenizers library.
@property
def encodings(self) -> Optional[List[EncodingFast]]:
"""
`Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process. Returns `None` if
the input was tokenized through Python (i.e., not a fast) tokenizer.
"""
return self._encodings
def tokens(self, batch_index: int = 0) -> List[str]:
"""
Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to
integer indices) at a given batch index (only works for the output of a fast tokenizer).
Args:
batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
Returns:
`List[str]`: The list of tokens at that index.
"""
if not self._encodings:
raise ValueError(
"tokens() is not available when using Python-based tokenizers")
return self._encodings[batch_index].tokens
def sequence_ids(self, batch_index: int = 0) -> List[Optional[int]]:
"""
Return a list mapping the tokens to the id of their original sentences:
- `None` for special tokens added around or between sequences,
- `0` for tokens corresponding to words in the first sequence,
- `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly
encoded.
Args:
batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
Returns:
`List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added
by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding
sequence.
"""
if not self._encodings:
raise ValueError(
"sequence_ids() is not available when using Python-based tokenizers")
return self._encodings[batch_index].sequence_ids
def words(self, batch_index: int = 0) -> List[Optional[int]]:
"""
Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
Args:
batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
Returns:
`List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
(several tokens will be mapped to the same word index if they are parts of that word).
"""
if not self._encodings:
raise ValueError(
"words() is not available when using Python-based tokenizers")
warnings.warn(
"`BatchEncoding.words()` property is deprecated and should be replaced with the identical, "
"but more self-explanatory `BatchEncoding.word_ids()` property.",
FutureWarning,
)
return self.word_ids(batch_index)
def word_ids(self, batch_index: int = 0) -> List[Optional[int]]:
"""
Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
Args:
batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
Returns:
`List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
(several tokens will be mapped to the same word index if they are parts of that word).
"""
if not self._encodings:
raise ValueError(
"word_ids() is not available when using Python-based tokenizers")
return self._encodings[batch_index].word_ids
def token_to_sequence(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
"""
Get the index of the sequence represented by the given token. In the general use case, this method returns `0`
for a single sequence or the first sequence of a pair, and `1` for the second sequence of a pair
Can be called as:
- `self.token_to_sequence(token_index)` if batch size is 1
- `self.token_to_sequence(batch_index, token_index)` if batch size is greater than 1
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
words are defined by the user). In this case it allows to easily associate encoded tokens with provided
tokenized words.
Args:
batch_or_token_index (`int`):
Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
the token in the sequence.
token_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
sequence.
Returns:
`int`: Index of the word in the input sequence.
"""
if not self._encodings:
raise ValueError(
"token_to_sequence() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if token_index < 0:
token_index = self._seq_len + token_index
return self._encodings[batch_index].token_to_sequence(token_index)
def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
"""
Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch.
Can be called as:
- `self.token_to_word(token_index)` if batch size is 1
- `self.token_to_word(batch_index, token_index)` if batch size is greater than 1
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
words are defined by the user). In this case it allows to easily associate encoded tokens with provided
tokenized words.
Args:
batch_or_token_index (`int`):
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
the token in the sequence.
token_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
sequence.
Returns:
`int`: Index of the word in the input sequence.
"""
if not self._encodings:
raise ValueError(
"token_to_word() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if token_index < 0:
token_index = self._seq_len + token_index
return self._encodings[batch_index].token_to_word(token_index)
def word_to_tokens(
self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0
) -> Optional[TokenSpan]:
"""
Get the encoded token span corresponding to a word in a sequence of the batch.
Token spans are returned as a [`~tokenization_utils_base.TokenSpan`] with:
- **start** -- Index of the first token.
- **end** -- Index of the token following the last token.
Can be called as:
- `self.word_to_tokens(word_index, sequence_index: int = 0)` if batch size is 1
- `self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)` if batch size is greater or equal to
1
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
words.
Args:
batch_or_word_index (`int`):
Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
the word in the sequence.
word_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
sequence.
sequence_index (`int`, *optional*, defaults to 0):
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
or 1) the provided word index belongs to.
Returns:
Optional [`~tokenization_utils_base.TokenSpan`] Span of tokens in the encoded sequence. Returns `None` if
no tokens correspond to the word.
"""
if not self._encodings:
raise ValueError(
"word_to_tokens() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if word_index < 0:
word_index = self._seq_len + word_index
span = self._encodings[batch_index].word_to_tokens(
word_index, sequence_index)
return TokenSpan(*span) if span is not None else None
def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan:
"""
Get the character span corresponding to an encoded token in a sequence of the batch.
Character spans are returned as a [`~tokenization_utils_base.CharSpan`] with:
- **start** -- Index of the first character in the original string associated to the token.
- **end** -- Index of the character following the last character in the original string associated to the
token.
Can be called as:
- `self.token_to_chars(token_index)` if batch size is 1
- `self.token_to_chars(batch_index, token_index)` if batch size is greater or equal to 1
Args:
batch_or_token_index (`int`):
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
the token in the sequence.
token_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the token or tokens in
the sequence.
Returns:
[`~tokenization_utils_base.CharSpan`]: Span of characters in the original string.
"""
if not self._encodings:
raise ValueError(
"token_to_chars() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
return CharSpan(*(self._encodings[batch_index].token_to_chars(token_index)))
def char_to_token(
self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0
) -> int:
"""
Get the index of the token in the encoded output comprising a character in the original string for a sequence
of the batch.
Can be called as:
- `self.char_to_token(char_index)` if batch size is 1
- `self.char_to_token(batch_index, char_index)` if batch size is greater or equal to 1
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
words.
Args:
batch_or_char_index (`int`):
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
the word in the sequence
char_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
sequence.
sequence_index (`int`, *optional*, defaults to 0):
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
or 1) the provided character index belongs to.
Returns:
`int`: Index of the token.
"""
if not self._encodings:
raise ValueError(
"char_to_token() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_token(char_index, sequence_index)
def word_to_chars(
self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0
) -> CharSpan:
"""
Get the character span in the original string corresponding to given word in a sequence of the batch.
Character spans are returned as a CharSpan NamedTuple with:
- start: index of the first character in the original string
- end: index of the character following the last character in the original string
Can be called as:
- `self.word_to_chars(word_index)` if batch size is 1
- `self.word_to_chars(batch_index, word_index)` if batch size is greater or equal to 1
Args:
batch_or_word_index (`int`):
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
the word in the sequence
word_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
sequence.
sequence_index (`int`, *optional*, defaults to 0):
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
or 1) the provided word index belongs to.
Returns:
`CharSpan` or `List[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan
are NamedTuple with:
- start: index of the first character associated to the token in the original string
- end: index of the character following the last character associated to the token in the original
string
"""
if not self._encodings:
raise ValueError(
"word_to_chars() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index, sequence_index)))
def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0) -> int:
"""
Get the word in the original string corresponding to a character in the original string of a sequence of the
batch.
Can be called as:
- `self.char_to_word(char_index)` if batch size is 1
- `self.char_to_word(batch_index, char_index)` if batch size is greater than 1
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
words.
Args:
batch_or_char_index (`int`):
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
the character in the original string.
char_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the character in the
original string.
sequence_index (`int`, *optional*, defaults to 0):
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
or 1) the provided character index belongs to.
Returns:
`int` or `List[int]`: Index or indices of the associated encoded token(s).
"""
if not self._encodings:
raise ValueError(
"char_to_word() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_word(char_index, sequence_index)
def convert_to_tensors(
self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False
):
"""
Convert the inner content to tensors.
Args:
tensor_type (`str` or [`~file_utils.TensorType`], *optional*):
The type of tensors to use. If `str`, should be one of the values of the enum
[`~file_utils.TensorType`]. If `None`, no modification is done.
prepend_batch_axis (`int`, *optional*, defaults to `False`):
Whether or not to add the batch dimension during the conversion.
"""
if tensor_type is None:
return self
# Convert to TensorType
if not isinstance(tensor_type, TensorType):
tensor_type = TensorType(tensor_type)
# Get a function reference for the correct framework
if tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError(
"Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
import torch
as_tensor = torch.tensor
is_tensor = torch.is_tensor
else:
as_tensor = np.asarray
is_tensor = _is_numpy
# (mfuntowicz: This code is unreachable)
# else:
# raise ImportError(
# f"Unable to convert output to tensors format {tensor_type}"
# )
# Do the tensor conversion in batch
for key, value in self.items():
try:
if prepend_batch_axis:
value = [value]
if not is_tensor(value):
tensor = as_tensor(value)
# Removing this for now in favor of controlling the shape with `prepend_batch_axis`
# # at-least2d
# if tensor.ndim > 2:
# tensor = tensor.squeeze(0)
# elif tensor.ndim < 2:
# tensor = tensor[None, :]
self[key] = tensor
except: # noqa E722
if key == "overflowing_tokens":
raise ValueError(
"Unable to create tensor returning overflowing tokens of different lengths. "
"Please see if a fast version of this tokenizer is available to have this feature available."
)
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length."
)
return self
@torch_required
def to(self, device: Union[str, "torch.device"]) -> "BatchEncoding":
"""
Send all values to device by calling `v.to(device)` (PyTorch only).
Args:
device (`str` or `torch.device`): The device to put the tensors on.
Returns:
[`BatchEncoding`]: The same instance after modification.
"""
# This check catches things like APEX blindly calling "to" on all inputs to a module
# Otherwise it passes the casts down and casts the LongTensor containing the token idxs
# into a HalfTensor
if isinstance(device, str) or _is_torch_device(device) or isinstance(device, int):
self.data = {k: v.to(device=device) for k, v in self.data.items()}
else:
logger.warning(
f"Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.")
return self
class SpecialTokensMixin:
"""
A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to
special tokens. In particular, this class hold the attributes which can be used to directly access these special
tokens in a model-independent manner and allow to set and update the special tokens.
Args:
bos_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing the beginning of a sentence.
eos_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing the end of a sentence.
unk_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing an out-of-vocabulary token.
sep_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token separating two different sentences in the same input (used by BERT for instance).
pad_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
attention mechanisms or loss computation.
cls_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing the class of the input (used by BERT for instance).
mask_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing a masked token (used by masked-language modeling pretraining objectives, like
BERT).
additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
A tuple or a list of additional special tokens.
"""
SPECIAL_TOKENS_ATTRIBUTES = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
"additional_special_tokens",
]
def __init__(self, verbose=True, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._pad_token_type_id = 0
self._additional_special_tokens = []
self.verbose = verbose
# We directly set the hidden value to allow initialization with special tokens
# which are not yet in the vocabulary. Necessary for serialization/de-serialization
# TODO clean this up at some point (probably by switching to fast tokenizers)
for key, value in kwargs.items():
if value is None:
continue
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)
), f"Value {value} is not a list or tuple"
assert all(
isinstance(t, (str, AddedToken)) for t in value
), "One of the tokens is not a string or an AddedToken"
setattr(self, key, value)
elif isinstance(value, (str, AddedToken)):
setattr(self, key, value)
else:
raise TypeError(
f"special token {key} has to be either str or AddedToken but got: {type(value)}")
def sanitize_special_tokens(self) -> int:
"""
Make sure that all the special tokens attributes of the tokenizer (`tokenizer.mask_token`,
`tokenizer.cls_token`, etc.) are in the vocabulary.
Add the missing ones to the vocabulary if needed.
Return:
`int`: The number of tokens added in the vocabulary during the operation.
"""
return self.add_tokens(self.all_special_tokens_extended, special_tokens=True)
def add_special_tokens(self, special_tokens_dict: Dict[str, Union[str, AddedToken]]) -> int:
"""
Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If
special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the
current vocabulary).
Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding
matrix of the model so that its embedding matrix matches the tokenizer.
In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
Using `add_special_tokens` will ensure your special tokens can be used in several ways:
- Special tokens are carefully handled by the tokenizer (they are never split).
- You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This
makes it easy to develop model-agnostic training and fine-tuning scripts.
When possible, special tokens are already registered for provided pretrained models (for instance
[`BertTokenizer`] `cls_token` is already registered to be :obj*'[CLS]'* and XLM's one is also registered to be
`'</s>'`).
Args:
special_tokens_dict (dictionary *str* to *str* or `tokenizers.AddedToken`):
Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`,
`sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer
assign the index of the `unk_token` to them).
Returns:
`int`: Number of tokens added to the vocabulary.
Examples:
```python
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2Model.from_pretrained("gpt2")
special_tokens_dict = {"cls_token": "<CLS>"}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print("We have added", num_added_toks, "tokens")
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
assert tokenizer.cls_token == "<CLS>"
```"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token"
if self.verbose:
logger.info(
f"Assigning {value} to the {key} key of the tokenizer")
setattr(self, key, value)
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(
isinstance(t, (str, AddedToken)) for t in value
), f"Tokens {value} for key {key} should all be str or AddedToken instances"
added_tokens += self.add_tokens(value, special_tokens=True)
else:
assert isinstance(
value, (str, AddedToken)
), f"Token {value} for key {key} should be a str or an AddedToken instance"
added_tokens += self.add_tokens([value], special_tokens=True)
return added_tokens
def add_tokens(
self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False
) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
it with indices starting from length of the current vocabulary.
Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding
matrix of the model so that its embedding matrix matches the tokenizer.
In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
Args:
new_tokens (`str`, `tokenizers.AddedToken` or a list of *str* or `tokenizers.AddedToken`):
Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string
token to let you personalize its behavior: whether this token should only match against a single word,
whether this token should strip all potential whitespaces on the left side, whether this token should
strip all potential whitespaces on the right side, etc.
special_tokens (`bool`, *optional*, defaults to `False`):
Can be used to specify if the token is a special token. This mostly change the normalization behavior
(special tokens like CLS or [MASK] are usually not lower-cased for instance).
See details for `tokenizers.AddedToken` in HuggingFace tokenizers library.
Returns:
`int`: Number of tokens added to the vocabulary.
Examples:
```python
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
model = BertModel.from_pretrained("bert-base-uncased")
num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
print("We have added", num_added_toks, "tokens")
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
```"""
if not new_tokens:
return 0
if not isinstance(new_tokens, (list, tuple)):
new_tokens = [new_tokens]
return self._add_tokens(new_tokens, special_tokens=special_tokens)
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
raise NotImplementedError
@property
def bos_token(self) -> str:
"""
`str`: Beginning of sentence token. Log an error if used while not having been set.
"""
if self._bos_token is None and self.verbose:
logger.error("Using bos_token, but it is not set yet.")
return None
return str(self._bos_token)
@property
def eos_token(self) -> str:
"""
`str`: End of sentence token. Log an error if used while not having been set.
"""
if self._eos_token is None and self.verbose:
logger.error("Using eos_token, but it is not set yet.")
return None
return str(self._eos_token)
@property
def unk_token(self) -> str:
"""
`str`: Unknown token. Log an error if used while not having been set.
"""
if self._unk_token is None and self.verbose:
logger.error("Using unk_token, but it is not set yet.")
return None
return str(self._unk_token)
@property
def sep_token(self) -> str:
"""
`str`: Separation token, to separate context and query in an input sequence. Log an error if used while not
having been set.
"""
if self._sep_token is None and self.verbose:
logger.error("Using sep_token, but it is not set yet.")
return None
return str(self._sep_token)
@property
def pad_token(self) -> str:
"""
`str`: Padding token. Log an error if used while not having been set.
"""
if self._pad_token is None and self.verbose:
logger.error("Using pad_token, but it is not set yet.")
return None
return str(self._pad_token)
@property
def cls_token(self) -> str:
"""
`str`: Classification token, to extract a summary of an input sequence leveraging self-attention along the full
depth of the model. Log an error if used while not having been set.
"""
if self._cls_token is None and self.verbose:
logger.error("Using cls_token, but it is not set yet.")
return None
return str(self._cls_token)
@property
def mask_token(self) -> str:
"""
`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
having been set.
"""
if self._mask_token is None and self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@property
def additional_special_tokens(self) -> List[str]:
"""
`List[str]`: All the additional special tokens you may want to use. Log an error if used while not having been
set.
"""
if self._additional_special_tokens is None and self.verbose:
logger.error(
"Using additional_special_tokens, but it is not set yet.")
return None
return [str(tok) for tok in self._additional_special_tokens]
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
@property
def bos_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the beginning of sentence token in the vocabulary. Returns `None` if the token has not
been set.
"""
if self._bos_token is None:
return None
return self.convert_tokens_to_ids(self.bos_token)
@property
def eos_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been
set.
"""
if self._eos_token is None:
return None
return self.convert_tokens_to_ids(self.eos_token)
@property
def unk_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the unknown token in the vocabulary. Returns `None` if the token has not been set.
"""
if self._unk_token is None:
return None
return self.convert_tokens_to_ids(self.unk_token)
@property
def sep_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the separation token in the vocabulary, to separate context and query in an input
sequence. Returns `None` if the token has not been set.
"""
if self._sep_token is None:
return None
return self.convert_tokens_to_ids(self.sep_token)
@property
def pad_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the padding token in the vocabulary. Returns `None` if the token has not been set.
"""
if self._pad_token is None:
return None
return self.convert_tokens_to_ids(self.pad_token)
@property
def pad_token_type_id(self) -> int:
"""
`int`: Id of the padding token type in the vocabulary.
"""
return self._pad_token_type_id
@property
def cls_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the classification token in the vocabulary, to extract a summary of an input sequence
leveraging self-attention along the full depth of the model.
Returns `None` if the token has not been set.
"""
if self._cls_token is None:
return None
return self.convert_tokens_to_ids(self.cls_token)
@property
def mask_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the mask token in the vocabulary, used when training a model with masked-language
modeling. Returns `None` if the token has not been set.
"""
if self._mask_token is None:
return None
return self.convert_tokens_to_ids(self.mask_token)
@property
def additional_special_tokens_ids(self) -> List[int]:
"""
`List[int]`: Ids of all the additional special tokens in the vocabulary. Log an error if used while not having
been set.
"""
return self.convert_tokens_to_ids(self.additional_special_tokens)
@bos_token_id.setter
def bos_token_id(self, value):
self._bos_token = self.convert_tokens_to_ids(value)
@eos_token_id.setter
def eos_token_id(self, value):
self._eos_token = self.convert_tokens_to_ids(value)
@unk_token_id.setter
def unk_token_id(self, value):
self._unk_token = self.convert_tokens_to_ids(value)
@sep_token_id.setter
def sep_token_id(self, value):
self._sep_token = self.convert_tokens_to_ids(value)
@pad_token_id.setter
def pad_token_id(self, value):
self._pad_token = self.convert_tokens_to_ids(value)
@cls_token_id.setter
def cls_token_id(self, value):
self._cls_token = self.convert_tokens_to_ids(value)
@mask_token_id.setter
def mask_token_id(self, value):
self._mask_token = self.convert_tokens_to_ids(value)
@additional_special_tokens_ids.setter
def additional_special_tokens_ids(self, values):
self._additional_special_tokens = [
self.convert_tokens_to_ids(value) for value in values]
@property
def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]:
"""
`Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (`cls_token`,
`unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).
Convert potential tokens of `tokenizers.AddedToken` type to string.
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = (
type(attr_value)(str(attr_value_sub)
for attr_value_sub in attr_value)
if isinstance(attr_value, (list, tuple))
else str(attr_value)
)
return set_attr
@property
def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]:
"""
`Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping
special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).
Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how
special tokens are tokenized.
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self) -> List[str]:
"""
`List[str]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes.
Convert tokens of `tokenizers.AddedToken` type to string.
"""
all_toks = [str(s) for s in self.all_special_tokens_extended]
return all_toks
@property
def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]:
"""
`List[Union[str, tokenizers.AddedToken]]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.) mapped to class
attributes.
Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how
special tokens are tokenized.
"""
all_toks = []
set_attr = self.special_tokens_map_extended
for attr_value in set_attr.values():
all_toks = all_toks + \
(list(attr_value) if isinstance(
attr_value, (list, tuple)) else [attr_value])
all_toks = list(OrderedDict.fromkeys(all_toks))
return all_toks
@property
def all_special_ids(self) -> List[int]:
"""
`List[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes.
"""
all_toks = self.all_special_tokens
all_ids = self.convert_tokens_to_ids(all_toks)
return all_ids
ENCODE_KWARGS_DOCSTRING = r"""
add_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to encode the sequences with the special tokens relative to their model.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will
truncate token by token, removing a token from the longest sequence in the pair if a pair of
sequences (or a batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
stride (`int`, *optional*, defaults to 0):
If set to a number along with `max_length`, the overflowing tokens returned when
`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
returned to provide some overlap between truncated and overflowing sequences. The value of this
argument defines the number of overlapping tokens.
is_split_into_words (`bool`, *optional*, defaults to `False`):
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
which it will tokenize. This is useful for NER or token classification.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
"""
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
return_token_type_ids (`bool`, *optional*):
Whether to return token type IDs. If left to the default, will return the token type IDs according to
the specific tokenizer's default, defined by the `return_outputs` attribute.
[What are token type IDs?](../glossary#token-type-ids)
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
of returning overflowing tokens.
return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
Whether or not to return special tokens mask information.
return_offsets_mapping (`bool`, *optional*, defaults to `False`):
Whether or not to return `(char_start, char_end)` for each token.
This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
Python's tokenizer, this method will raise `NotImplementedError`.
return_length (`bool`, *optional*, defaults to `False`):
Whether or not to return the lengths of the encoded inputs.
verbose (`bool`, *optional*, defaults to `True`):
Whether or not to print more information and warnings.
**kwargs: passed to the `self.tokenize()` method
Return:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
[What are input IDs?](../glossary#input-ids)
- **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
if *"token_type_ids"* is in `self.model_input_names`).
[What are token type IDs?](../glossary#token-type-ids)
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
[What are attention masks?](../glossary#attention-mask)
- **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
`return_overflowing_tokens=True`).
- **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
`return_overflowing_tokens=True`).
- **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
- **length** -- The length of the inputs (when `return_length=True`)
"""
INIT_TOKENIZER_DOCSTRING = r"""
Class attributes (overridden by derived classes)
- **vocab_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each
vocabulary file required by the model, and as associated values, the filename for saving the associated file
(string).
- **pretrained_vocab_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the
low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the
associated pretrained vocabulary file.
- **max_model_input_sizes** (`Dict[str, Optional[int]]`) -- A dictionary with, as keys, the `short-cut-names`
of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model,
or `None` if the model has no maximum input size.
- **pretrained_init_configuration** (`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the
`short-cut-names` of the pretrained models, and as associated values, a dictionary of specific arguments to
pass to the `__init__` method of the tokenizer class for this pretrained model when loading the tokenizer
with the [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`] method.
- **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model.
- **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
Should be `'right'` or `'left'`.
- **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
applied. Should be `'right'` or `'left'`.
Args:
model_max_length (`int`, *optional*):
The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is
loaded with [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], this will be set to the
value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will
default to VERY_LARGE_INTEGER (`int(1e30)`).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
truncation_side (`str`, *optional*):
The side on which the model should have truncation applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
model_input_names (`List[string]`, *optional*):
The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or
`"attention_mask"`). Default value is picked from the class attribute of the same name.
bos_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing the beginning of a sentence. Will be associated to `self.bos_token` and
`self.bos_token_id`.
eos_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing the end of a sentence. Will be associated to `self.eos_token` and
`self.eos_token_id`.
unk_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing an out-of-vocabulary token. Will be associated to `self.unk_token` and
`self.unk_token_id`.
sep_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token separating two different sentences in the same input (used by BERT for instance). Will be
associated to `self.sep_token` and `self.sep_token_id`.
pad_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
attention mechanisms or loss computation. Will be associated to `self.pad_token` and `self.pad_token_id`.
cls_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing the class of the input (used by BERT for instance). Will be associated to
`self.cls_token` and `self.cls_token_id`.
mask_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing a masked token (used by masked-language modeling pretraining objectives, like
BERT). Will be associated to `self.mask_token` and `self.mask_token_id`.
additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
A tuple or a list of additional special tokens. Add them here to ensure they won't be split by the
tokenization process. Will be associated to `self.additional_special_tokens` and
`self.additional_special_tokens_ids`.
"""
@add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
"""
Base class for [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`].
Handles shared (mostly boiler plate) methods for those two classes.
"""
vocab_files_names: Dict[str, str] = {}
pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {}
pretrained_init_configuration: Dict[str, Dict[str, Any]] = {}
max_model_input_sizes: Dict[str, Optional[int]] = {}
_auto_class: Optional[str] = None
# first name has to correspond to main model input name
# to make sure `tokenizer.pad(...)` works correctly
model_input_names: List[str] = [
"input_ids", "token_type_ids", "attention_mask"]
padding_side: str = "right"
truncation_side: str = "right"
slow_tokenizer_class = None
def __init__(self, **kwargs):
# inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
self.init_inputs = ()
self.init_kwargs = copy.deepcopy(kwargs)
self.name_or_path = kwargs.pop("name_or_path", "")
self._processor_class = kwargs.pop("processor_class", None)
# For backward compatibility we fallback to set model_max_length from max_len if provided
model_max_length = kwargs.pop(
"model_max_length", kwargs.pop("max_len", None))
self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
# Padding and truncation side are right by default and overridden in subclasses. If specified in the kwargs, it
# is changed.
self.padding_side = kwargs.pop("padding_side", self.padding_side)
if self.padding_side not in ["right", "left"]:
raise ValueError(
f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
)
self.truncation_side = kwargs.pop(
"truncation_side", self.truncation_side)
if self.truncation_side not in ["right", "left"]:
raise ValueError(
f"Padding side should be selected between 'right' and 'left', current value: {self.truncation_side}"
)
self.model_input_names = kwargs.pop(
"model_input_names", self.model_input_names)
self.deprecation_warnings = (
{}
) # Use to store when we have already noticed a deprecation warning (avoid overlogging).
super().__init__(**kwargs)
@property
def max_len_single_sentence(self) -> int:
"""
`int`: The maximum length of a sentence that can be fed to the model.
"""
return self.model_max_length - self.num_special_tokens_to_add(pair=False)
@property
def max_len_sentences_pair(self) -> int:
"""
`int`: The maximum combined length of a pair of sentences that can be fed to the model.
"""
return self.model_max_length - self.num_special_tokens_to_add(pair=True)
@max_len_single_sentence.setter
def max_len_single_sentence(self, value) -> int:
# For backward compatibility, allow to try to setup 'max_len_single_sentence'.
if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose:
if not self.deprecation_warnings.get("max_len_single_sentence", False):
logger.warning(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
self.deprecation_warnings["max_len_single_sentence"] = True
else:
raise ValueError(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
@max_len_sentences_pair.setter
def max_len_sentences_pair(self, value) -> int:
# For backward compatibility, allow to try to setup 'max_len_sentences_pair'.
if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose:
if not self.deprecation_warnings.get("max_len_sentences_pair", False):
logger.warning(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
self.deprecation_warnings["max_len_sentences_pair"] = True
else:
raise ValueError(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
def _set_processor_class(self, processor_class: str):
"""Sets processor class as an attribute."""
self._processor_class = processor_class
def __repr__(self) -> str:
return (
f"{"PreTrainedTokenizerFast" if self.is_fast else "PreTrainedTokenizer"}(name_or_path='{self.name_or_path}', "
f"vocab_size={self.vocab_size}, model_max_len={self.model_max_length}, is_fast={self.is_fast}, "
f"padding_side='{self.padding_side}', truncation_side='{self.truncation_side}', special_tokens={self.special_tokens_map_extended})"
)
def get_vocab(self) -> Dict[str, int]:
"""
Returns the vocabulary as a dictionary of token to index.
`tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the
vocab.
Returns:
`Dict[str, int]`: The vocabulary.
"""
raise NotImplementedError()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs):
r"""
Instantiate a [`~tokenization_utils_base.PreTrainedTokenizerBase`] (or a derived class) from a predefined
tokenizer.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
using the [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary
file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g.,
`./my_model_directory/vocab.txt`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download the vocabulary files and override the cached versions if they
exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Attempt to resume the download if such a file
exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
local_files_only (`bool`, *optional*, defaults to `False`):
Whether or not to only rely on local files and not to attempt to download any files.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
subfolder (`str`, *optional*):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
facebook/rag-token-base), specify it here.
inputs (additional positional arguments, *optional*):
Will be passed along to the Tokenizer `__init__` method.
kwargs (additional keyword arguments, *optional*):
Will be passed to the Tokenizer `__init__` method. Can be used to set special tokens like `bos_token`,
`eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
`additional_special_tokens`. See parameters in the `__init__` for more details.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
# We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from huggingface.co and cache.
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
# Download vocabulary from huggingface.co (user-uploaded) and cache.
tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
# If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
tokenizer = BertTokenizer.from_pretrained("./test/saved_model/")
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt")
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", unk_token="<unk>")
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == "<unk>"
```"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
subfolder = kwargs.pop("subfolder", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "tokenizer",
"from_auto_class": from_auto_class, "is_fast": "Fast" in cls.__name__}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
vocab_files = {}
init_configuration = {}
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
if len(cls.vocab_files_names) > 1:
raise ValueError(
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not "
"supported for this tokenizer. Use a model identifier or the path to a directory instead."
)
warnings.warn(
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and "
"won't be possible anymore in v5. Use a model identifier or the path to a directory instead.",
FutureWarning,
)
file_id = list(cls.vocab_files_names.keys())[0]
vocab_files[file_id] = pretrained_model_name_or_path
else:
# At this point pretrained_model_name_or_path is either a directory or a model identifier name
additional_files_names = {
"added_tokens_file": ADDED_TOKENS_FILE,
"special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE,
"tokenizer_config_file": TOKENIZER_CONFIG_FILE,
}
vocab_files_target = {
**cls.vocab_files_names, **additional_files_names}
if "tokenizer_file" in vocab_files_target:
# Try to get the tokenizer config to see if there are versioned tokenizer files.
fast_tokenizer_file = FULL_TOKENIZER_FILE
resolved_config_file = get_file_from_repo(
pretrained_model_name_or_path,
TOKENIZER_CONFIG_FILE,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
use_auth_token=use_auth_token,
revision=revision,
local_files_only=local_files_only,
)
if resolved_config_file is not None:
with open(resolved_config_file, encoding="utf-8") as reader:
tokenizer_config = json.load(reader)
if "fast_tokenizer_files" in tokenizer_config:
fast_tokenizer_file = get_fast_tokenizer_file(
tokenizer_config["fast_tokenizer_files"])
vocab_files_target["tokenizer_file"] = fast_tokenizer_file
# Look for the tokenizer files
for file_id, file_name in vocab_files_target.items():
if os.path.isdir(pretrained_model_name_or_path):
if subfolder is not None:
full_file_name = os.path.join(
pretrained_model_name_or_path, subfolder, file_name)
else:
full_file_name = os.path.join(
pretrained_model_name_or_path, file_name)
if not os.path.exists(full_file_name):
logger.info(
f"Didn't find file {full_file_name}. We won't load it.")
full_file_name = None
else:
full_file_name = hf_bucket_url(
pretrained_model_name_or_path,
filename=file_name,
subfolder=subfolder,
revision=revision,
mirror=None,
)
vocab_files[file_id] = full_file_name
# Get files from url, cache, or disk depending on the case
resolved_vocab_files = {}
unresolved_files = []
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
try:
resolved_vocab_files[file_id] = cached_path(
file_path,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except FileNotFoundError as error:
if local_files_only:
unresolved_files.append(file_id)
else:
raise error
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to "
"pass a token having permission to this repo with `use_auth_token` or log in with "
"`huggingface-cli login` and pass `use_auth_token=True`."
)
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists "
"for this model name. Check the model page at "
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
)
except EntryNotFoundError:
logger.debug(
f"{pretrained_model_name_or_path} does not contain a file named {file_path}.")
resolved_vocab_files[file_id] = None
except HTTPError as err:
if "404 Client Error" in str(err):
logger.debug(
f"Connection problem to access {file_path}.")
resolved_vocab_files[file_id] = None
else:
raise err
if len(unresolved_files) > 0:
logger.info(
f"Can't load following files from cache: {unresolved_files} and cannot check if these "
"files are necessary for the tokenizer to operate."
)
if all(full_file_name is None for full_file_name in resolved_vocab_files.values()):
raise EnvironmentError(
f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing all relevant files for a {cls.__name__} tokenizer."
)
for file_id, file_path in vocab_files.items():
if file_id not in resolved_vocab_files:
continue
if file_path == resolved_vocab_files[file_id]:
logger.info(f"loading file {file_path}")
else:
logger.info(
f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}")
return cls._from_pretrained(
resolved_vocab_files,
pretrained_model_name_or_path,
init_configuration,
*init_inputs,
use_auth_token=use_auth_token,
cache_dir=cache_dir,
**kwargs,
)
@classmethod
def _from_pretrained(
cls,
resolved_vocab_files,
pretrained_model_name_or_path,
init_configuration,
*init_inputs,
use_auth_token=None,
cache_dir=None,
**kwargs
):
# We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json
# file or if `from_slow` is set to True.
from_slow = kwargs.get("from_slow", False)
has_tokenizer_file = resolved_vocab_files.get(
"tokenizer_file", None) is not None
if (from_slow or not has_tokenizer_file) and cls.slow_tokenizer_class is not None:
slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained(
copy.deepcopy(resolved_vocab_files),
pretrained_model_name_or_path,
copy.deepcopy(init_configuration),
*init_inputs,
**(copy.deepcopy(kwargs)),
)
else:
slow_tokenizer = None
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop(
"tokenizer_config_file", None)
if tokenizer_config_file is not None:
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
# First attempt. We get tokenizer_class from tokenizer_config to check mismatch between tokenizers.
config_tokenizer_class = init_kwargs.get("tokenizer_class")
init_kwargs.pop("tokenizer_class", None)
init_kwargs.pop("auto_map", None)
saved_init_inputs = init_kwargs.pop("init_inputs", ())
if not init_inputs:
init_inputs = saved_init_inputs
else:
config_tokenizer_class = None
init_kwargs = init_configuration
if config_tokenizer_class is None:
from .models.auto.configuration_auto import AutoConfig # tests_ignore
# Second attempt. If we have not yet found tokenizer_class, let's try to use the config.
try:
config = AutoConfig.from_pretrained(
pretrained_model_name_or_path,
use_auth_token=use_auth_token,
cache_dir=cache_dir,
)
config_tokenizer_class = config.tokenizer_class
except (OSError, ValueError, KeyError):
# skip if an error occurred.
config = None
if config_tokenizer_class is None:
# Third attempt. If we have not yet found the original type of the tokenizer,
# we are loading we see if we can infer it from the type of the configuration file
from .models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES # tests_ignore
if hasattr(config, "model_type"):
model_type = config.model_type
else:
# Fallback: use pattern matching on the string.
model_type = None
for pattern in TOKENIZER_MAPPING_NAMES.keys():
if pattern in str(pretrained_model_name_or_path):
model_type = pattern
break
if model_type is not None:
config_tokenizer_class, config_tokenizer_class_fast = TOKENIZER_MAPPING_NAMES.get(
model_type, (None, None)
)
if config_tokenizer_class is None:
config_tokenizer_class = config_tokenizer_class_fast
if config_tokenizer_class is not None:
if cls.__name__.replace("Fast", "") != config_tokenizer_class.replace("Fast", ""):
logger.warning(
"The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. "
"It may result in unexpected tokenization. \n"
f"The tokenizer class you load from this checkpoint is '{config_tokenizer_class}'. \n"
f"The class this function is called from is '{cls.__name__}'."
)
# Update with newly provided kwargs
init_kwargs.update(kwargs)
# Convert AddedTokens serialized as dict to class instances
def convert_added_tokens(obj: Union[AddedToken, Any]):
if isinstance(obj, dict) and "__type" in obj and obj["__type"] == "AddedToken":
obj.pop("__type")
return AddedToken(**obj)
elif isinstance(obj, (list, tuple)):
return list(convert_added_tokens(o) for o in obj)
elif isinstance(obj, dict):
return {k: convert_added_tokens(v) for k, v in obj.items()}
return obj
init_kwargs = convert_added_tokens(init_kwargs)
# Set max length if needed
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
if model_max_length is not None and isinstance(model_max_length, (int, float)):
init_kwargs["model_max_length"] = min(init_kwargs.get(
"model_max_length", int(1e30)), model_max_length)
# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
if slow_tokenizer is not None:
init_kwargs["__slow_tokenizer"] = slow_tokenizer
init_kwargs["name_or_path"] = pretrained_model_name_or_path
# Instantiate tokenizer.
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except OSError:
raise OSError(
"Unable to load vocabulary from file. "
"Please check that the provided vocabulary is accessible and not corrupted."
)
# Save inputs and kwargs for saving and re-loading with ``save_pretrained``
# Removed: Now done at the base class level
# tokenizer.init_inputs = init_inputs
# tokenizer.init_kwargs = init_kwargs
# If there is a complementary special token map, load it
special_tokens_map_file = resolved_vocab_files.pop(
"special_tokens_map_file", None)
if special_tokens_map_file is not None:
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
for key, value in special_tokens_map.items():
if key in kwargs and kwargs[key]:
# This value has already been redefined by the kwargs
# We keep this new value and ignore the one stored in the special_tokens_map_file
continue
if isinstance(value, dict):
value = AddedToken(**value)
elif isinstance(value, list):
value = [AddedToken(
**token) if isinstance(token, dict) else token for token in value]
setattr(tokenizer, key, value)
# Add supplementary tokens.
special_tokens = tokenizer.all_special_tokens
if added_tokens_file is not None:
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
# Sort added tokens by index
added_tok_encoder_sorted = list(
sorted(added_tok_encoder.items(), key=lambda x: x[1]))
for token, index in added_tok_encoder_sorted:
if has_tokenizer_file and index != len(tokenizer) and tokenizer.convert_tokens_to_ids(token) != index:
# Tokenizer fast: added token needs to either be in the vocabulary with the proper index or the
# index is the current length of the tokenizer (not in vocabulary)
raise ValueError(
f"Wrong index found for {token}: should be {tokenizer.convert_tokens_to_ids(token)} but found "
f"{index}."
)
elif not has_tokenizer_file and index != len(tokenizer):
# Tokenizer slow: added token cannot already be in the vocabulary so its index needs to be the
# current length of the tokenizer.
raise ValueError(
f"Non-consecutive added token '{token}' found. "
f"Should have index {len(tokenizer)} but has index {index} in saved vocabulary."
)
# Safe to call on a tokenizer fast even if token already there.
tokenizer.add_tokens(
token, special_tokens=bool(token in special_tokens))
# Check all our special tokens are registered as "no split" token (we don't cut them) and are in the vocab
added_tokens = tokenizer.sanitize_special_tokens()
if added_tokens:
logger.warning_advice(
"Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained."
)
return tokenizer
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
legacy_format: Optional[bool] = None,
filename_prefix: Optional[str] = None,
push_to_hub: bool = False,
**kwargs,
) -> Tuple[str]:
"""
Save the full tokenizer state.
This method make sure the full tokenizer can then be re-loaded using the
[`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] class method..
Warning,None This won't save modifications you may have applied to the tokenizer after the instantiation (for
instance, modifying `tokenizer.do_lower_case` after creation).
Args:
save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved.
legacy_format (`bool`, *optional*):
Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON
format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate
added_tokens files.
If `False`, will only save the tokenizer in the unified JSON format. This format is incompatible with
"slow" tokenizers (not powered by the *tokenizers* library), so the tokenizer will not be able to be
loaded in the corresponding "slow" tokenizer.
If `True`, will save the tokenizer in legacy format. If the "slow" tokenizer doesn't exits, a value
error is raised.
filename_prefix: (`str`, *optional*):
A prefix to add to the names of the files saved by the tokenizer.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
<Tip warning={true}>
Using `push_to_hub=True` will synchronize the repository you are pushing to with `save_directory`,
which requires `save_directory` to be a local clone of the repo you are pushing to if it's an existing
folder. Pass along `temp_dir=True` to use a temporary directory instead.
</Tip>
Returns:
A tuple of `str`: The files saved.
"""
if os.path.isfile(save_directory):
logger.error(
f"Provided path ({save_directory}) should be a directory, not a file")
return
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
special_tokens_map_file = os.path.join(
save_directory, (filename_prefix +
"-" if filename_prefix else "") + SPECIAL_TOKENS_MAP_FILE
)
tokenizer_config_file = os.path.join(
save_directory, (filename_prefix +
"-" if filename_prefix else "") + TOKENIZER_CONFIG_FILE
)
tokenizer_config = copy.deepcopy(self.init_kwargs)
if len(self.init_inputs) > 0:
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
# Sanitize AddedTokens
def convert_added_tokens(obj: Union[AddedToken, Any], add_type_field=True):
if isinstance(obj, AddedToken):
out = obj.__getstate__()
if add_type_field:
out["__type"] = "AddedToken"
return out
elif isinstance(obj, (list, tuple)):
return list(convert_added_tokens(o, add_type_field=add_type_field) for o in obj)
elif isinstance(obj, dict):
return {k: convert_added_tokens(v, add_type_field=add_type_field) for k, v in obj.items()}
return obj
# add_type_field=True to allow dicts in the kwargs / differentiate from AddedToken serialization
tokenizer_config = convert_added_tokens(
tokenizer_config, add_type_field=True)
# Add tokenizer class to the tokenizer config to be able to reload it with from_pretrained
tokenizer_class = self.__class__.__name__
# Remove the Fast at the end unless we have a special `PreTrainedTokenizerFast`
if tokenizer_class.endswith("Fast") and tokenizer_class != "PreTrainedTokenizerFast":
tokenizer_class = tokenizer_class[:-4]
tokenizer_config["tokenizer_class"] = tokenizer_class
if getattr(self, "_auto_map", None) is not None:
tokenizer_config["auto_map"] = self._auto_map
if getattr(self, "_processor_class", None) is not None:
tokenizer_config["processor_class"] = self._processor_class
# If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if self._auto_class is not None:
custom_object_save(self, save_directory, config=tokenizer_config)
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
logger.info(f"tokenizer config file saved in {tokenizer_config_file}")
# Sanitize AddedTokens in special_tokens_map
write_dict = convert_added_tokens(
self.special_tokens_map_extended, add_type_field=False)
with open(special_tokens_map_file, "w", encoding="utf-8") as f:
f.write(json.dumps(write_dict, ensure_ascii=False))
logger.info(f"Special tokens file saved in {special_tokens_map_file}")
file_names = (tokenizer_config_file, special_tokens_map_file)
save_files = self._save_pretrained(
save_directory=save_directory,
file_names=file_names,
legacy_format=legacy_format,
filename_prefix=filename_prefix,
)
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Tokenizer pushed to the hub in this commit: {url}")
return save_files
def _save_pretrained(
self,
save_directory: Union[str, os.PathLike],
file_names: Tuple[str],
legacy_format: Optional[bool] = None,
filename_prefix: Optional[str] = None,
) -> Tuple[str]:
"""
Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens.
Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the
specific [`~tokenization_utils_fast.PreTrainedTokenizerFast._save_pretrained`]
"""
if legacy_format is False:
raise ValueError(
"Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format."
)
save_directory = str(save_directory)
added_tokens_file = os.path.join(
save_directory, (filename_prefix +
"-" if filename_prefix else "") + ADDED_TOKENS_FILE
)
added_vocab = self.get_added_vocab()
if added_vocab:
with open(added_tokens_file, "w", encoding="utf-8") as f:
out_str = json.dumps(added_vocab, ensure_ascii=False)
f.write(out_str)
logger.info(f"added tokens file saved in {added_tokens_file}")
vocab_files = self.save_vocabulary(
save_directory, filename_prefix=filename_prefix)
return file_names + vocab_files + (added_tokens_file,)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
"""
Save only the vocabulary of the tokenizer (vocabulary + added tokens).
This method won't save the configuration and special token mappings of the tokenizer. Use
[`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
An optional prefix to add to the named of the saved files.
Returns:
`Tuple(str)`: Paths to the files saved.
"""
raise NotImplementedError
def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
"""
Converts a string in a sequence of tokens, replacing unknown tokens with the `unk_token`.
Args:
text (`str`):
The sequence to be encoded.
pair (`str`, *optional*):
A second sequence to be encoded with the first.
add_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to add the special tokens associated with the corresponding model.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific encode method. See details in
[`~PreTrainedTokenizerBase.__call__`]
Returns:
`List[str]`: The list of tokens.
"""
raise NotImplementedError
@add_end_docstrings(
ENCODE_KWARGS_DOCSTRING,
"""
**kwargs: Passed along to the `.tokenize()` method.
""",
"""
Returns:
`List[int]`, `torch.Tensor`, `tf.Tensor` or `np.ndarray`: The tokenized ids of the text.
""",
)
def encode(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput,
PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs
) -> List[int]:
"""
Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`.
Args:
text (`str`, `List[str]` or `List[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
text_pair (`str`, `List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
"""
encoded_inputs = self.encode_plus(
text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
def num_special_tokens_to_add(self, pair: bool = False) -> int:
raise NotImplementedError
def _get_padding_truncation_strategies(
self, padding=False, truncation=False, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs
):
"""
Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy
and pad_to_max_length) and behaviors.
"""
old_truncation_strategy = kwargs.pop(
"truncation_strategy", "do_not_truncate")
old_pad_to_max_length = kwargs.pop("pad_to_max_length", False)
# Backward compatibility for previous behavior, maybe we should deprecate it:
# If you only set max_length, it activates truncation for max_length
if max_length is not None and padding is False and truncation is False:
if verbose:
if not self.deprecation_warnings.get("Truncation-not-explicitly-activated", False):
logger.warning(
"Truncation was not explicitly activated but `max_length` is provided a specific value, "
"please use `truncation=True` to explicitly truncate examples to max length. "
"Defaulting to 'longest_first' truncation strategy. "
"If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy "
"more precisely by providing a specific strategy to `truncation`."
)
self.deprecation_warnings["Truncation-not-explicitly-activated"] = True
truncation = "longest_first"
# Get padding strategy
if padding is False and old_pad_to_max_length:
if verbose:
warnings.warn(
"The `pad_to_max_length` argument is deprecated and will be removed in a future version, "
"use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or "
"use `padding='max_length'` to pad to a max length. In this case, you can give a specific "
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the "
"maximal input size of the model (e.g. 512 for Bert).",
FutureWarning,
)
if max_length is None:
padding_strategy = PaddingStrategy.LONGEST
else:
padding_strategy = PaddingStrategy.MAX_LENGTH
elif padding is not False:
if padding is True:
if verbose:
if max_length is not None and (truncation is False or truncation == "do_not_truncate"):
warnings.warn(
"`max_length` is ignored when `padding`=`True` and there is no truncation strategy. "
"To pad to max length, use `padding='max_length'`."
)
if old_pad_to_max_length is not False:
warnings.warn(
"Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.")
# Default to pad to the longest sequence in the batch
padding_strategy = PaddingStrategy.LONGEST
elif not isinstance(padding, PaddingStrategy):
padding_strategy = PaddingStrategy(padding)
elif isinstance(padding, PaddingStrategy):
padding_strategy = padding
else:
padding_strategy = PaddingStrategy.DO_NOT_PAD
# Get truncation strategy
if truncation is False and old_truncation_strategy != "do_not_truncate":
if verbose:
warnings.warn(
"The `truncation_strategy` argument is deprecated and will be removed in a future version, "
"use `truncation=True` to truncate examples to a max length. You can give a specific "
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the "
"maximal input size of the model (e.g. 512 for Bert). "
" If you have pairs of inputs, you can give a specific truncation strategy selected among "
"`truncation='only_first'` (will only truncate the first sentence in the pairs) "
"`truncation='only_second'` (will only truncate the second sentence in the pairs) "
"or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence in the pairs).",
FutureWarning,
)
truncation_strategy = TruncationStrategy(old_truncation_strategy)
elif truncation is not False:
if truncation is True:
truncation_strategy = (
TruncationStrategy.LONGEST_FIRST
) # Default to truncate the longest sequences in pairs of inputs
elif not isinstance(truncation, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation)
elif isinstance(truncation, TruncationStrategy):
truncation_strategy = truncation
else:
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
if self.model_max_length > LARGE_INTEGER:
if verbose:
if not self.deprecation_warnings.get("Asking-to-pad-to-max_length", False):
logger.warning(
"Asking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. "
"Default to no padding."
)
self.deprecation_warnings["Asking-to-pad-to-max_length"] = True
padding_strategy = PaddingStrategy.DO_NOT_PAD
else:
max_length = self.model_max_length
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
if self.model_max_length > LARGE_INTEGER:
if verbose:
if not self.deprecation_warnings.get("Asking-to-truncate-to-max_length", False):
logger.warning(
"Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. "
"Default to no truncation."
)
self.deprecation_warnings["Asking-to-truncate-to-max_length"] = True
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
else:
max_length = self.model_max_length
# Test if we have a padding token
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (not self.pad_token or self.pad_token_id < 0):
raise ValueError(
"Asking to pad but the tokenizer does not have a padding token. "
"Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
"or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`."
)
# Check that we will truncate to a multiple of pad_to_multiple_of if both are provided
if (
truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
and padding_strategy != PaddingStrategy.DO_NOT_PAD
and pad_to_multiple_of is not None
and max_length is not None
and (max_length % pad_to_multiple_of != 0)
):
raise ValueError(
f"Truncation and padding are both activated but "
f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})."
)
return padding_strategy, truncation_strategy, max_length, kwargs
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
text_pair: Optional[Union[TextInput, PreTokenizedInput,
List[TextInput], List[PreTokenizedInput]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
"""
# Input type checking for clearer error
def _is_valid_text_input(t):
if isinstance(t, str):
# Strings are fine
return True
elif isinstance(t, (list, tuple)):
# List are fine as long as they are...
if len(t) == 0:
# ... empty
return True
elif isinstance(t[0], str):
# ... list of strings
return True
elif isinstance(t[0], (list, tuple)):
# ... list with an empty list or with a list of strings
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if not _is_valid_text_input(text):
raise ValueError(
"text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples)."
)
if text_pair is not None and not _is_valid_text_input(text_pair):
raise ValueError(
"text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples)."
)
if is_split_into_words:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(
text[0], (list, tuple))
else:
is_batched = isinstance(text, (list, tuple))
if is_batched:
if isinstance(text_pair, str):
raise TypeError(
"when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as `text`."
)
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}."
)
batch_text_or_text_pairs = list(
zip(text, text_pair)) if text_pair is not None else text
return self.batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput,
PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences.
<Tip warning={true}>
This method is deprecated, `__call__` should be used instead.
</Tip>
Args:
text (`str`, `List[str]` or `List[int]` (the latter only for not-fast tokenizers)):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
text_pair (`str`, `List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput,
PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
raise NotImplementedError
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.
<Tip warning={true}>
This method is deprecated, `__call__` should be used instead.
</Tip>
Args:
batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`):
Batch of sequences or pair of sequences to be encoded. This can be a list of
string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see
details in `encode_plus`).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
raise NotImplementedError
def pad(
self,
encoded_inputs: Union[
BatchEncoding,
List[BatchEncoding],
Dict[str, EncodedInput],
Dict[str, List[EncodedInput]],
List[Dict[str, EncodedInput]],
],
padding: Union[bool, str, PaddingStrategy] = True,
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
verbose: bool = True,
) -> BatchEncoding:
"""
Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
in the batch.
Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`,
`self.pad_token_id` and `self.pad_token_type_id`)
<Tip>
If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
PyTorch tensors, you will lose the specific device of your tensors however.
</Tip>
Args:
encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`):
Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of
tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str,
List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
collate function.
Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see
the note above for the return type.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
>= 7.5 (Volta).
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
verbose (`bool`, *optional*, defaults to `True`):
Whether or not to print more information and warnings.
"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], (dict, BatchEncoding)):
encoded_inputs = {key: [
example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
# The model's main input name, usually `input_ids`, has be passed for padding
if self.model_input_names[0] not in encoded_inputs:
raise ValueError(
"You should supply an encoding or a list of encodings to this method "
f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
)
required_input = encoded_inputs[self.model_input_names[0]]
if not required_input:
if return_attention_mask:
encoded_inputs["attention_mask"] = []
return encoded_inputs
# If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
for item in required_input:
if len(item) != 0:
first_element = item[0]
break
# At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
if not isinstance(first_element, (int, list, tuple)):
if is_torch_available() and _is_torch(first_element):
return_tensors = "pt" if return_tensors is None else return_tensors
elif isinstance(first_element, np.ndarray):
return_tensors = "np" if return_tensors is None else return_tensors
else:
raise ValueError(
f"type of {first_element} unknown: {type(first_element)}. "
f"Should be one of a python, numpy, pytorch or tensorflow object."
)
for key, value in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
# Convert padding_strategy in PaddingStrategy
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
padding=padding, max_length=max_length, verbose=verbose
)
required_input = encoded_inputs[self.model_input_names[0]]
if required_input and not isinstance(required_input[0], (list, tuple)):
encoded_inputs = self._pad(
encoded_inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(required_input)
assert all(
len(v) == batch_size for v in encoded_inputs.values()
), "Some items in the output dictionary have a different batch size than others."
if padding_strategy == PaddingStrategy.LONGEST:
max_length = max(len(inputs) for inputs in required_input)
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = dict((k, v[i]) for k, v in encoded_inputs.items())
outputs = self._pad(
inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create the token type IDs corresponding to the sequences passed. [What are token type
IDs?](../glossary#token-type-ids)
Should be overridden in a subclass if the model has a special way of building those.
Args:
token_ids_0 (`List[int]`): The first tokenized sequence.
token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
Returns:
`List[int]`: The token type ids.
"""
if token_ids_1 is None:
return len(token_ids_0) * [0]
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens.
This implementation does not add special tokens and this method should be overridden in a subclass.
Args:
token_ids_0 (`List[int]`): The first tokenized sequence.
token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
Returns:
`List[int]`: The model input with special tokens.
"""
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
prepend_batch_axis: bool = False,
**kwargs
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids*
different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return
overflowing tokens. Such a combination of arguments will raise an error.
Args:
ids (`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
pair_ids (`List[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
if return_token_type_ids and not add_special_tokens:
raise ValueError(
"Asking to return token_type_ids while setting add_special_tokens to False "
"results in an undefined behavior. Please set add_special_tokens to True or "
"set return_token_type_ids to None."
)
if (
return_overflowing_tokens
and truncation_strategy == TruncationStrategy.LONGEST_FIRST
and pair_ids is not None
):
raise ValueError(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
# Load from model defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
# Compute the total size of the returned encodings
total_len = len_ids + len_pair_ids + \
(self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
# Truncation: Handle max sequence length
overflowing_tokens = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(
ids, pair_ids)
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * \
len(ids) + ([0] * len(pair_ids) if pair else [])
# Build output dictionary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(
ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
# Check lengths
self._eventual_warn_about_too_long_sequence(
encoded_inputs["input_ids"], max_length, verbose)
# Padding
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
batch_outputs = BatchEncoding(
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
)
return batch_outputs
def truncate_sequences(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
num_tokens_to_remove: int = 0,
truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
stride: int = 0,
) -> Tuple[List[int], List[int], List[int]]:
"""
Truncates a sequence pair in-place following the strategy.
Args:
ids (`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
pair_ids (`List[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
of sequences (or a batch of pairs) is provided.
"""
if num_tokens_to_remove <= 0:
return ids, pair_ids, []
if not isinstance(truncation_strategy, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
):
if len(ids) > num_tokens_to_remove:
window_len = min(len(ids), stride + num_tokens_to_remove)
if self.truncation_side == "left":
overflowing_tokens = ids[:window_len]
ids = ids[num_tokens_to_remove:]
elif self.truncation_side == "right":
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
else:
raise ValueError(
f"invalid truncation strategy: {self.truncation_side}, use 'left' or 'right'.")
else:
error_msg = (
f"We need to remove {num_tokens_to_remove} to truncate the input "
f"but the first sequence has a length {len(ids)}. "
)
if truncation_strategy == TruncationStrategy.ONLY_FIRST:
error_msg = (
error_msg + "Please select another truncation strategy than "
f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
)
logger.error(error_msg)
elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
logger.warning(
f"Be aware, overflowing tokens are not returned for the setting you have chosen,"
f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
f"truncation strategy. So the returned list will always be empty even if some "
f"tokens have been removed."
)
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
if self.truncation_side == "right":
ids = ids[:-1]
elif self.truncation_side == "left":
ids = ids[1:]
else:
raise ValueError(
"invalid truncation strategy:" + str(self.truncation_side))
else:
if self.truncation_side == "right":
pair_ids = pair_ids[:-1]
elif self.truncation_side == "left":
pair_ids = pair_ids[1:]
else:
raise ValueError(
"invalid truncation strategy:" + str(self.truncation_side))
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
if len(pair_ids) > num_tokens_to_remove:
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
if self.truncation_side == "right":
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
elif self.truncation_side == "left":
overflowing_tokens = pair_ids[:window_len]
pair_ids = pair_ids[num_tokens_to_remove:]
else:
raise ValueError(
"invalid truncation strategy:" + str(self.truncation_side))
else:
logger.error(
f"We need to remove {num_tokens_to_remove} to truncate the input "
f"but the second sequence has a length {len(pair_ids)}. "
f"Please select another truncation strategy than {truncation_strategy}, "
f"for instance 'longest_first' or 'only_first'."
)
return (ids, pair_ids, overflowing_tokens)
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
>= 7.5 (Volta).
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) +
1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(
required_input) != max_length
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [
0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] +
[self.pad_token_type_id] * difference
)
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [
1] * difference
encoded_inputs[self.model_input_names[0]
] = required_input + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * \
difference + encoded_inputs["attention_mask"]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [
1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs[self.model_input_names[0]] = [
self.pad_token_id] * difference + required_input
else:
raise ValueError("Invalid padding strategy:" +
str(self.padding_side))
return encoded_inputs
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""
Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we
often want to remove sub-word tokenization artifacts at the same time.
Args:
tokens (`List[str]`): The token to join in a string.
Returns:
`str`: The joined tokens.
"""
raise NotImplementedError
def batch_decode(
self,
sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
**kwargs
) -> List[str]:
"""
Convert a list of lists of token ids into a list of strings by calling decode.
Args:
sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to clean up the tokenization spaces.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`List[str]`: The list of decoded sentences.
"""
return [
self.decode(
seq,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
for seq in sequences
]
def decode(
self,
token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
**kwargs
) -> str:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
Args:
token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to clean up the tokenization spaces.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`str`: The decoded sentence.
"""
# Convert inputs to python lists
token_ids = to_py_obj(token_ids)
return self._decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
def _decode(
self,
token_ids: Union[int, List[int]],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
**kwargs
) -> str:
raise NotImplementedError
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of ids of the first sequence.
token_ids_1 (`List[int]`, *optional*):
List of ids of the second sequence.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
assert already_has_special_tokens and token_ids_1 is None, (
"You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
"Please use a slow (full python) tokenizer to activate this argument. "
"Or set `return_special_tokens_mask=True` when calling the encoding method "
"to get the special tokens mask in any tokenizer. "
)
all_special_ids = self.all_special_ids # cache the property
special_tokens_mask = [
1 if token in all_special_ids else 0 for token in token_ids_0]
return special_tokens_mask
@staticmethod
def clean_up_tokenization(out_string: str) -> str:
"""
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
Args:
out_string (`str`): The text to clean up.
Returns:
`str`: The cleaned-up string.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string
def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool):
"""
Depending on the input and internal state we might trigger a warning about a sequence that is too long for its
corresponding model
Args:
ids (`List[str]`): The ids produced by the tokenization
max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set)
verbose (`bool`): Whether or not to print more information and warnings.
"""
if max_length is None and len(ids) > self.model_max_length and verbose:
if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False):
logger.warning(
"Token indices sequence length is longer than the specified maximum sequence length "
f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model "
"will result in indexing errors"
)
self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True
@contextmanager
def as_target_tokenizer(self):
"""
Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
sequence-to-sequence models that need a slightly different processing for the labels.
"""
yield
@classmethod
def register_for_auto_class(cls, auto_class="AutoTokenizer"):
"""
Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the
library are already mapped with `AutoTokenizer`.
<Tip warning={true}>
This API is experimental and may have some slight breaking changes in the next releases.
</Tip>
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoTokenizer"`):
The auto class to register this new tokenizer with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
def prepare_seq2seq_batch(
self,
src_texts: List[str],
tgt_texts: Optional[List[str]] = None,
max_length: Optional[int] = None,
max_target_length: Optional[int] = None,
padding: str = "longest",
return_tensors: str = None,
truncation: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Prepare model inputs for translation. For best performance, translate one sentence at a time.
Arguments:
src_texts (`List[str]`):
List of documents to summarize or source language texts.
tgt_texts (`list`, *optional*):
List of summaries or target language texts.
max_length (`int`, *optional*):
Controls the maximum length for encoder inputs (documents to summarize or source language texts) If
left unset or set to `None`, this will use the predefined model maximum length if a maximum length is
required by one of the truncation/padding parameters. If the model has no specific maximum input length
(like XLNet) truncation/padding to a maximum length will be deactivated.
max_target_length (`int`, *optional*):
Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set
to `None`, this will use the max_length value.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `True`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will
truncate token by token, removing a token from the longest sequence in the pair if a pair of
sequences (or a batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
**kwargs:
Additional keyword arguments passed along to `self.__call__`.
Return:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to the encoder.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
- **labels** -- List of token ids for tgt_texts.
The full set of keys `[input_ids, attention_mask, labels]`, will only be returned if tgt_texts is passed.
Otherwise, input_ids, attention_mask will be the only keys.
"""
# docstyle-ignore
formatted_warning = """
`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular
`__call__` method to prepare your inputs and the tokenizer under the `as_target_tokenizer` context manager to prepare
your targets.
Here is a short example:
model_inputs = tokenizer(src_texts, ...)
with tokenizer.as_target_tokenizer():
labels = tokenizer(tgt_texts, ...)
model_inputs["labels"] = labels["input_ids"]
See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice.
For a more complete example, see the implementation of `prepare_seq2seq_batch`.
"""
warnings.warn(formatted_warning, FutureWarning)
# mBART-specific kwargs that should be ignored by other models.
kwargs.pop("src_lang", None)
kwargs.pop("tgt_lang", None)
if max_length is None:
max_length = self.model_max_length
model_inputs = self(
src_texts,
add_special_tokens=True,
return_tensors=return_tensors,
max_length=max_length,
padding=padding,
truncation=truncation,
**kwargs,
)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
max_target_length = max_length
with self.as_target_tokenizer():
labels = self(
tgt_texts,
add_special_tokens=True,
return_tensors=return_tensors,
padding=padding,
max_length=max_target_length,
truncation=truncation,
**kwargs,
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def get_fast_tokenizer_file(tokenization_files: List[str]) -> str:
"""
Get the tokenization file to use for this version of transformers.
Args:
tokenization_files (`List[str]`): The list of available configuration files.
Returns:
`str`: The tokenization file to use.
"""
tokenizer_files_map = {}
for file_name in tokenization_files:
search = _re_tokenizer_file.search(file_name)
if search is not None:
v = search.groups()[0]
tokenizer_files_map[v] = file_name
available_versions = sorted(tokenizer_files_map.keys())
# Defaults to FULL_TOKENIZER_FILE and then try to look at some newer versions.
tokenizer_file = FULL_TOKENIZER_FILE
transformers_version = version.parse(__version__)
for v in available_versions:
if version.parse(v) <= transformers_version:
tokenizer_file = tokenizer_files_map[v]
else:
# No point going further since the versions are sorted.
break
return tokenizer_file
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
PreTrainedTokenizerBase.push_to_hub = copy_func(
PreTrainedTokenizerBase.push_to_hub)
PreTrainedTokenizerBase.push_to_hub.__doc__ = PreTrainedTokenizerBase.push_to_hub.__doc__.format(
object="tokenizer", object_class="AutoTokenizer", object_files="tokenizer files"
)
| import copy
import json
import os
import re
import warnings
from collections import OrderedDict, UserDict
from contextlib import contextmanager
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
import numpy as np
from packaging import version
from requests import HTTPError
from . import __version__
from .dynamic_module_utils import custom_object_save
from .utils import (
EntryNotFoundError,
ExplicitEnum,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
add_end_docstrings,
cached_path,
copy_func,
get_file_from_repo,
hf_bucket_url,
is_flax_available,
is_offline_mode,
is_remote_url,
is_tokenizers_available,
is_torch_available,
logging,
to_py_obj,
torch_required,
)
from .utils.generic import _is_jax, _is_numpy, _is_tensorflow, _is_torch, _is_torch_device
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tokenizers_available():
from tokenizers import AddedToken
from tokenizers import Encoding as EncodingFast
else:
@dataclass(frozen=True, eq=True)
class AddedToken:
"""
AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the
way it should behave.
"""
content: str = field(default_factory=str)
single_word: bool = False
lstrip: bool = False
rstrip: bool = False
normalized: bool = True
def __getstate__(self):
return self.__dict__
@dataclass
class EncodingFast:
"""This is dummy class because without the `tokenizers` library we don't have these objects anyway"""
pass
logger = logging.get_logger(__name__)
# This is used to set the max input length for a model with infinite size input
VERY_LARGE_INTEGER = int(1e30)
# This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER
LARGE_INTEGER = int(1e20)
# Define type aliases and NamedTuples
TextInput = str
PreTokenizedInput = List[str]
EncodedInput = List[int]
TextInputPair = Tuple[str, str]
PreTokenizedInputPair = Tuple[List[str], List[str]]
EncodedInputPair = Tuple[List[int], List[int]]
# Slow tokenizers used to be saved in three separated files
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
ADDED_TOKENS_FILE = "added_tokens.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
# Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file
FULL_TOKENIZER_FILE = "tokenizer.json"
_re_tokenizer_file = re.compile(r"tokenizer\.(.*)\.json")
class TruncationStrategy(ExplicitEnum):
"""
Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in
an IDE.
"""
ONLY_FIRST = "only_first"
ONLY_SECOND = "only_second"
LONGEST_FIRST = "longest_first"
DO_NOT_TRUNCATE = "do_not_truncate"
class CharSpan(NamedTuple):
"""
Character span in the original string.
Args:
start (`int`): Index of the first character in the original string.
end (`int`): Index of the character following the last character in the original string.
"""
start: int
end: int
class TokenSpan(NamedTuple):
"""
Token span in an encoded string (list of tokens).
Args:
start (`int`): Index of the first token in the span.
end (`int`): Index of the token following the last token in the span.
"""
start: int
end: int
class BatchEncoding(UserDict):
"""
Holds the output of the [`~tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`] and
[`~tokenization_utils_base.PreTrainedTokenizerBase.batch_encode`] methods (tokens, attention_masks, etc).
This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes
utility methods to map from word/character space to token space.
Args:
data (`dict`):
Dictionary of lists/arrays/tensors returned by the encode/batch_encode methods ('input_ids',
'attention_mask', etc.).
encoding (`tokenizers.Encoding` or `Sequence[tokenizers.Encoding]`, *optional*):
If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character
space to token space the `tokenizers.Encoding` instance or list of instance (for batches) hold this
information.
tensor_type (`Union[None, str, TensorType]`, *optional*):
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
initialization.
prepend_batch_axis (`bool`, *optional*, defaults to `False`):
Whether or not to add a batch axis when converting to tensors (see `tensor_type` above).
n_sequences (`Optional[int]`, *optional*):
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
initialization.
"""
def __init__(
self,
data: Optional[Dict[str, Any]] = None,
encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None,
tensor_type: Union[None, str, TensorType] = None,
prepend_batch_axis: bool = False,
n_sequences: Optional[int] = None,
):
super().__init__(data)
if isinstance(encoding, EncodingFast):
encoding = [encoding]
self._encodings = encoding
if n_sequences is None and encoding is not None and len(encoding):
n_sequences = encoding[0].n_sequences
self._n_sequences = n_sequences
self.convert_to_tensors(tensor_type=tensor_type,
prepend_batch_axis=prepend_batch_axis)
@property
def n_sequences(self) -> Optional[int]:
"""
`Optional[int]`: The number of sequences used to generate each sample from the batch encoded in this
[`BatchEncoding`]. Currently can be one of `None` (unknown), `1` (a single sentence) or `2` (a pair of
sentences)
"""
return self._n_sequences
@property
def is_fast(self) -> bool:
"""
`bool`: Indicate whether this [`BatchEncoding`] was generated from the result of a [`PreTrainedTokenizerFast`]
or not.
"""
return self._encodings is not None
def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]:
"""
If the key is a string, returns the value of the dict associated to `key` ('input_ids', 'attention_mask',
etc.).
If the key is an integer, get the `tokenizers.Encoding` for batch item with index `key`.
"""
if isinstance(item, str):
return self.data[item]
elif self._encodings is not None:
return self._encodings[item]
else:
raise KeyError(
"Indexing with integers (to access backend Encoding for a given batch index) "
"is not available when using Python based tokenizers"
)
def __getattr__(self, item: str):
try:
return self.data[item]
except KeyError:
raise AttributeError
def __getstate__(self):
return {"data": self.data, "encodings": self._encodings}
def __setstate__(self, state):
if "data" in state:
self.data = state["data"]
if "encodings" in state:
self._encodings = state["encodings"]
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
# After this point:
# Extended properties and methods only available for fast (Rust-based) tokenizers
# provided by HuggingFace tokenizers library.
@property
def encodings(self) -> Optional[List[EncodingFast]]:
"""
`Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process. Returns `None` if
the input was tokenized through Python (i.e., not a fast) tokenizer.
"""
return self._encodings
def tokens(self, batch_index: int = 0) -> List[str]:
"""
Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to
integer indices) at a given batch index (only works for the output of a fast tokenizer).
Args:
batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
Returns:
`List[str]`: The list of tokens at that index.
"""
if not self._encodings:
raise ValueError(
"tokens() is not available when using Python-based tokenizers")
return self._encodings[batch_index].tokens
def sequence_ids(self, batch_index: int = 0) -> List[Optional[int]]:
"""
Return a list mapping the tokens to the id of their original sentences:
- `None` for special tokens added around or between sequences,
- `0` for tokens corresponding to words in the first sequence,
- `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly
encoded.
Args:
batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
Returns:
`List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added
by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding
sequence.
"""
if not self._encodings:
raise ValueError(
"sequence_ids() is not available when using Python-based tokenizers")
return self._encodings[batch_index].sequence_ids
def words(self, batch_index: int = 0) -> List[Optional[int]]:
"""
Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
Args:
batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
Returns:
`List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
(several tokens will be mapped to the same word index if they are parts of that word).
"""
if not self._encodings:
raise ValueError(
"words() is not available when using Python-based tokenizers")
warnings.warn(
"`BatchEncoding.words()` property is deprecated and should be replaced with the identical, "
"but more self-explanatory `BatchEncoding.word_ids()` property.",
FutureWarning,
)
return self.word_ids(batch_index)
def word_ids(self, batch_index: int = 0) -> List[Optional[int]]:
"""
Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
Args:
batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
Returns:
`List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
(several tokens will be mapped to the same word index if they are parts of that word).
"""
if not self._encodings:
raise ValueError(
"word_ids() is not available when using Python-based tokenizers")
return self._encodings[batch_index].word_ids
def token_to_sequence(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
"""
Get the index of the sequence represented by the given token. In the general use case, this method returns `0`
for a single sequence or the first sequence of a pair, and `1` for the second sequence of a pair
Can be called as:
- `self.token_to_sequence(token_index)` if batch size is 1
- `self.token_to_sequence(batch_index, token_index)` if batch size is greater than 1
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
words are defined by the user). In this case it allows to easily associate encoded tokens with provided
tokenized words.
Args:
batch_or_token_index (`int`):
Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
the token in the sequence.
token_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
sequence.
Returns:
`int`: Index of the word in the input sequence.
"""
if not self._encodings:
raise ValueError(
"token_to_sequence() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if token_index < 0:
token_index = self._seq_len + token_index
return self._encodings[batch_index].token_to_sequence(token_index)
def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
"""
Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch.
Can be called as:
- `self.token_to_word(token_index)` if batch size is 1
- `self.token_to_word(batch_index, token_index)` if batch size is greater than 1
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
words are defined by the user). In this case it allows to easily associate encoded tokens with provided
tokenized words.
Args:
batch_or_token_index (`int`):
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
the token in the sequence.
token_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
sequence.
Returns:
`int`: Index of the word in the input sequence.
"""
if not self._encodings:
raise ValueError(
"token_to_word() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if token_index < 0:
token_index = self._seq_len + token_index
return self._encodings[batch_index].token_to_word(token_index)
def word_to_tokens(
self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0
) -> Optional[TokenSpan]:
"""
Get the encoded token span corresponding to a word in a sequence of the batch.
Token spans are returned as a [`~tokenization_utils_base.TokenSpan`] with:
- **start** -- Index of the first token.
- **end** -- Index of the token following the last token.
Can be called as:
- `self.word_to_tokens(word_index, sequence_index: int = 0)` if batch size is 1
- `self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)` if batch size is greater or equal to
1
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
words.
Args:
batch_or_word_index (`int`):
Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
the word in the sequence.
word_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
sequence.
sequence_index (`int`, *optional*, defaults to 0):
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
or 1) the provided word index belongs to.
Returns:
Optional [`~tokenization_utils_base.TokenSpan`] Span of tokens in the encoded sequence. Returns `None` if
no tokens correspond to the word.
"""
if not self._encodings:
raise ValueError(
"word_to_tokens() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if word_index < 0:
word_index = self._seq_len + word_index
span = self._encodings[batch_index].word_to_tokens(
word_index, sequence_index)
return TokenSpan(*span) if span is not None else None
def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan:
"""
Get the character span corresponding to an encoded token in a sequence of the batch.
Character spans are returned as a [`~tokenization_utils_base.CharSpan`] with:
- **start** -- Index of the first character in the original string associated to the token.
- **end** -- Index of the character following the last character in the original string associated to the
token.
Can be called as:
- `self.token_to_chars(token_index)` if batch size is 1
- `self.token_to_chars(batch_index, token_index)` if batch size is greater or equal to 1
Args:
batch_or_token_index (`int`):
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
the token in the sequence.
token_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the token or tokens in
the sequence.
Returns:
[`~tokenization_utils_base.CharSpan`]: Span of characters in the original string.
"""
if not self._encodings:
raise ValueError(
"token_to_chars() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
return CharSpan(*(self._encodings[batch_index].token_to_chars(token_index)))
def char_to_token(
self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0
) -> int:
"""
Get the index of the token in the encoded output comprising a character in the original string for a sequence
of the batch.
Can be called as:
- `self.char_to_token(char_index)` if batch size is 1
- `self.char_to_token(batch_index, char_index)` if batch size is greater or equal to 1
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
words.
Args:
batch_or_char_index (`int`):
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
the word in the sequence
char_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
sequence.
sequence_index (`int`, *optional*, defaults to 0):
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
or 1) the provided character index belongs to.
Returns:
`int`: Index of the token.
"""
if not self._encodings:
raise ValueError(
"char_to_token() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_token(char_index, sequence_index)
def word_to_chars(
self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0
) -> CharSpan:
"""
Get the character span in the original string corresponding to given word in a sequence of the batch.
Character spans are returned as a CharSpan NamedTuple with:
- start: index of the first character in the original string
- end: index of the character following the last character in the original string
Can be called as:
- `self.word_to_chars(word_index)` if batch size is 1
- `self.word_to_chars(batch_index, word_index)` if batch size is greater or equal to 1
Args:
batch_or_word_index (`int`):
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
the word in the sequence
word_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
sequence.
sequence_index (`int`, *optional*, defaults to 0):
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
or 1) the provided word index belongs to.
Returns:
`CharSpan` or `List[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan
are NamedTuple with:
- start: index of the first character associated to the token in the original string
- end: index of the character following the last character associated to the token in the original
string
"""
if not self._encodings:
raise ValueError(
"word_to_chars() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index, sequence_index)))
def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0) -> int:
"""
Get the word in the original string corresponding to a character in the original string of a sequence of the
batch.
Can be called as:
- `self.char_to_word(char_index)` if batch size is 1
- `self.char_to_word(batch_index, char_index)` if batch size is greater than 1
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
words.
Args:
batch_or_char_index (`int`):
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
the character in the original string.
char_index (`int`, *optional*):
If a batch index is provided in *batch_or_token_index*, this can be the index of the character in the
original string.
sequence_index (`int`, *optional*, defaults to 0):
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
or 1) the provided character index belongs to.
Returns:
`int` or `List[int]`: Index or indices of the associated encoded token(s).
"""
if not self._encodings:
raise ValueError(
"char_to_word() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_word(char_index, sequence_index)
def convert_to_tensors(
self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False
):
"""
Convert the inner content to tensors.
Args:
tensor_type (`str` or [`~file_utils.TensorType`], *optional*):
The type of tensors to use. If `str`, should be one of the values of the enum
[`~file_utils.TensorType`]. If `None`, no modification is done.
prepend_batch_axis (`int`, *optional*, defaults to `False`):
Whether or not to add the batch dimension during the conversion.
"""
if tensor_type is None:
return self
# Convert to TensorType
if not isinstance(tensor_type, TensorType):
tensor_type = TensorType(tensor_type)
# Get a function reference for the correct framework
if tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError(
"Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
import torch
as_tensor = torch.tensor
is_tensor = torch.is_tensor
else:
as_tensor = np.asarray
is_tensor = _is_numpy
# (mfuntowicz: This code is unreachable)
# else:
# raise ImportError(
# f"Unable to convert output to tensors format {tensor_type}"
# )
# Do the tensor conversion in batch
for key, value in self.items():
try:
if prepend_batch_axis:
value = [value]
if not is_tensor(value):
tensor = as_tensor(value)
# Removing this for now in favor of controlling the shape with `prepend_batch_axis`
# # at-least2d
# if tensor.ndim > 2:
# tensor = tensor.squeeze(0)
# elif tensor.ndim < 2:
# tensor = tensor[None, :]
self[key] = tensor
except: # noqa E722
if key == "overflowing_tokens":
raise ValueError(
"Unable to create tensor returning overflowing tokens of different lengths. "
"Please see if a fast version of this tokenizer is available to have this feature available."
)
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length."
)
return self
@torch_required
def to(self, device: Union[str, "torch.device"]) -> "BatchEncoding":
"""
Send all values to device by calling `v.to(device)` (PyTorch only).
Args:
device (`str` or `torch.device`): The device to put the tensors on.
Returns:
[`BatchEncoding`]: The same instance after modification.
"""
# This check catches things like APEX blindly calling "to" on all inputs to a module
# Otherwise it passes the casts down and casts the LongTensor containing the token idxs
# into a HalfTensor
if isinstance(device, str) or _is_torch_device(device) or isinstance(device, int):
self.data = {k: v.to(device=device) for k, v in self.data.items()}
else:
logger.warning(
f"Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.")
return self
class SpecialTokensMixin:
"""
A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to
special tokens. In particular, this class hold the attributes which can be used to directly access these special
tokens in a model-independent manner and allow to set and update the special tokens.
Args:
bos_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing the beginning of a sentence.
eos_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing the end of a sentence.
unk_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing an out-of-vocabulary token.
sep_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token separating two different sentences in the same input (used by BERT for instance).
pad_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
attention mechanisms or loss computation.
cls_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing the class of the input (used by BERT for instance).
mask_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing a masked token (used by masked-language modeling pretraining objectives, like
BERT).
additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
A tuple or a list of additional special tokens.
"""
SPECIAL_TOKENS_ATTRIBUTES = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
"additional_special_tokens",
]
def __init__(self, verbose=True, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._pad_token_type_id = 0
self._additional_special_tokens = []
self.verbose = verbose
# We directly set the hidden value to allow initialization with special tokens
# which are not yet in the vocabulary. Necessary for serialization/de-serialization
# TODO clean this up at some point (probably by switching to fast tokenizers)
for key, value in kwargs.items():
if value is None:
continue
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)
), f"Value {value} is not a list or tuple"
assert all(
isinstance(t, (str, AddedToken)) for t in value
), "One of the tokens is not a string or an AddedToken"
setattr(self, key, value)
elif isinstance(value, (str, AddedToken)):
setattr(self, key, value)
else:
raise TypeError(
f"special token {key} has to be either str or AddedToken but got: {type(value)}")
def sanitize_special_tokens(self) -> int:
"""
Make sure that all the special tokens attributes of the tokenizer (`tokenizer.mask_token`,
`tokenizer.cls_token`, etc.) are in the vocabulary.
Add the missing ones to the vocabulary if needed.
Return:
`int`: The number of tokens added in the vocabulary during the operation.
"""
return self.add_tokens(self.all_special_tokens_extended, special_tokens=True)
def add_special_tokens(self, special_tokens_dict: Dict[str, Union[str, AddedToken]]) -> int:
"""
Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If
special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the
current vocabulary).
Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding
matrix of the model so that its embedding matrix matches the tokenizer.
In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
Using `add_special_tokens` will ensure your special tokens can be used in several ways:
- Special tokens are carefully handled by the tokenizer (they are never split).
- You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This
makes it easy to develop model-agnostic training and fine-tuning scripts.
When possible, special tokens are already registered for provided pretrained models (for instance
[`BertTokenizer`] `cls_token` is already registered to be :obj*'[CLS]'* and XLM's one is also registered to be
`'</s>'`).
Args:
special_tokens_dict (dictionary *str* to *str* or `tokenizers.AddedToken`):
Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`,
`sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer
assign the index of the `unk_token` to them).
Returns:
`int`: Number of tokens added to the vocabulary.
Examples:
```python
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2Model.from_pretrained("gpt2")
special_tokens_dict = {"cls_token": "<CLS>"}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print("We have added", num_added_toks, "tokens")
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
assert tokenizer.cls_token == "<CLS>"
```"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token"
if self.verbose:
logger.info(
f"Assigning {value} to the {key} key of the tokenizer")
setattr(self, key, value)
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(
isinstance(t, (str, AddedToken)) for t in value
), f"Tokens {value} for key {key} should all be str or AddedToken instances"
added_tokens += self.add_tokens(value, special_tokens=True)
else:
assert isinstance(
value, (str, AddedToken)
), f"Token {value} for key {key} should be a str or an AddedToken instance"
added_tokens += self.add_tokens([value], special_tokens=True)
return added_tokens
def add_tokens(
self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False
) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
it with indices starting from length of the current vocabulary.
Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding
matrix of the model so that its embedding matrix matches the tokenizer.
In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
Args:
new_tokens (`str`, `tokenizers.AddedToken` or a list of *str* or `tokenizers.AddedToken`):
Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string
token to let you personalize its behavior: whether this token should only match against a single word,
whether this token should strip all potential whitespaces on the left side, whether this token should
strip all potential whitespaces on the right side, etc.
special_tokens (`bool`, *optional*, defaults to `False`):
Can be used to specify if the token is a special token. This mostly change the normalization behavior
(special tokens like CLS or [MASK] are usually not lower-cased for instance).
See details for `tokenizers.AddedToken` in HuggingFace tokenizers library.
Returns:
`int`: Number of tokens added to the vocabulary.
Examples:
```python
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
model = BertModel.from_pretrained("bert-base-uncased")
num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
print("We have added", num_added_toks, "tokens")
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
```"""
if not new_tokens:
return 0
if not isinstance(new_tokens, (list, tuple)):
new_tokens = [new_tokens]
return self._add_tokens(new_tokens, special_tokens=special_tokens)
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
raise NotImplementedError
@property
def bos_token(self) -> str:
"""
`str`: Beginning of sentence token. Log an error if used while not having been set.
"""
if self._bos_token is None and self.verbose:
logger.error("Using bos_token, but it is not set yet.")
return None
return str(self._bos_token)
@property
def eos_token(self) -> str:
"""
`str`: End of sentence token. Log an error if used while not having been set.
"""
if self._eos_token is None and self.verbose:
logger.error("Using eos_token, but it is not set yet.")
return None
return str(self._eos_token)
@property
def unk_token(self) -> str:
"""
`str`: Unknown token. Log an error if used while not having been set.
"""
if self._unk_token is None and self.verbose:
logger.error("Using unk_token, but it is not set yet.")
return None
return str(self._unk_token)
@property
def sep_token(self) -> str:
"""
`str`: Separation token, to separate context and query in an input sequence. Log an error if used while not
having been set.
"""
if self._sep_token is None and self.verbose:
logger.error("Using sep_token, but it is not set yet.")
return None
return str(self._sep_token)
@property
def pad_token(self) -> str:
"""
`str`: Padding token. Log an error if used while not having been set.
"""
if self._pad_token is None and self.verbose:
logger.error("Using pad_token, but it is not set yet.")
return None
return str(self._pad_token)
@property
def cls_token(self) -> str:
"""
`str`: Classification token, to extract a summary of an input sequence leveraging self-attention along the full
depth of the model. Log an error if used while not having been set.
"""
if self._cls_token is None and self.verbose:
logger.error("Using cls_token, but it is not set yet.")
return None
return str(self._cls_token)
@property
def mask_token(self) -> str:
"""
`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
having been set.
"""
if self._mask_token is None and self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@property
def additional_special_tokens(self) -> List[str]:
"""
`List[str]`: All the additional special tokens you may want to use. Log an error if used while not having been
set.
"""
if self._additional_special_tokens is None and self.verbose:
logger.error(
"Using additional_special_tokens, but it is not set yet.")
return None
return [str(tok) for tok in self._additional_special_tokens]
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
@property
def bos_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the beginning of sentence token in the vocabulary. Returns `None` if the token has not
been set.
"""
if self._bos_token is None:
return None
return self.convert_tokens_to_ids(self.bos_token)
@property
def eos_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been
set.
"""
if self._eos_token is None:
return None
return self.convert_tokens_to_ids(self.eos_token)
@property
def unk_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the unknown token in the vocabulary. Returns `None` if the token has not been set.
"""
if self._unk_token is None:
return None
return self.convert_tokens_to_ids(self.unk_token)
@property
def sep_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the separation token in the vocabulary, to separate context and query in an input
sequence. Returns `None` if the token has not been set.
"""
if self._sep_token is None:
return None
return self.convert_tokens_to_ids(self.sep_token)
@property
def pad_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the padding token in the vocabulary. Returns `None` if the token has not been set.
"""
if self._pad_token is None:
return None
return self.convert_tokens_to_ids(self.pad_token)
@property
def pad_token_type_id(self) -> int:
"""
`int`: Id of the padding token type in the vocabulary.
"""
return self._pad_token_type_id
@property
def cls_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the classification token in the vocabulary, to extract a summary of an input sequence
leveraging self-attention along the full depth of the model.
Returns `None` if the token has not been set.
"""
if self._cls_token is None:
return None
return self.convert_tokens_to_ids(self.cls_token)
@property
def mask_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the mask token in the vocabulary, used when training a model with masked-language
modeling. Returns `None` if the token has not been set.
"""
if self._mask_token is None:
return None
return self.convert_tokens_to_ids(self.mask_token)
@property
def additional_special_tokens_ids(self) -> List[int]:
"""
`List[int]`: Ids of all the additional special tokens in the vocabulary. Log an error if used while not having
been set.
"""
return self.convert_tokens_to_ids(self.additional_special_tokens)
@bos_token_id.setter
def bos_token_id(self, value):
self._bos_token = self.convert_tokens_to_ids(value)
@eos_token_id.setter
def eos_token_id(self, value):
self._eos_token = self.convert_tokens_to_ids(value)
@unk_token_id.setter
def unk_token_id(self, value):
self._unk_token = self.convert_tokens_to_ids(value)
@sep_token_id.setter
def sep_token_id(self, value):
self._sep_token = self.convert_tokens_to_ids(value)
@pad_token_id.setter
def pad_token_id(self, value):
self._pad_token = self.convert_tokens_to_ids(value)
@cls_token_id.setter
def cls_token_id(self, value):
self._cls_token = self.convert_tokens_to_ids(value)
@mask_token_id.setter
def mask_token_id(self, value):
self._mask_token = self.convert_tokens_to_ids(value)
@additional_special_tokens_ids.setter
def additional_special_tokens_ids(self, values):
self._additional_special_tokens = [
self.convert_tokens_to_ids(value) for value in values]
@property
def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]:
"""
`Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (`cls_token`,
`unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).
Convert potential tokens of `tokenizers.AddedToken` type to string.
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = (
type(attr_value)(str(attr_value_sub)
for attr_value_sub in attr_value)
if isinstance(attr_value, (list, tuple))
else str(attr_value)
)
return set_attr
@property
def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]:
"""
`Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping
special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).
Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how
special tokens are tokenized.
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self) -> List[str]:
"""
`List[str]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes.
Convert tokens of `tokenizers.AddedToken` type to string.
"""
all_toks = [str(s) for s in self.all_special_tokens_extended]
return all_toks
@property
def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]:
"""
`List[Union[str, tokenizers.AddedToken]]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.) mapped to class
attributes.
Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how
special tokens are tokenized.
"""
all_toks = []
set_attr = self.special_tokens_map_extended
for attr_value in set_attr.values():
all_toks = all_toks + \
(list(attr_value) if isinstance(
attr_value, (list, tuple)) else [attr_value])
all_toks = list(OrderedDict.fromkeys(all_toks))
return all_toks
@property
def all_special_ids(self) -> List[int]:
"""
`List[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes.
"""
all_toks = self.all_special_tokens
all_ids = self.convert_tokens_to_ids(all_toks)
return all_ids
ENCODE_KWARGS_DOCSTRING = r"""
add_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to encode the sequences with the special tokens relative to their model.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will
truncate token by token, removing a token from the longest sequence in the pair if a pair of
sequences (or a batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
stride (`int`, *optional*, defaults to 0):
If set to a number along with `max_length`, the overflowing tokens returned when
`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
returned to provide some overlap between truncated and overflowing sequences. The value of this
argument defines the number of overlapping tokens.
is_split_into_words (`bool`, *optional*, defaults to `False`):
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
which it will tokenize. This is useful for NER or token classification.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
"""
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
return_token_type_ids (`bool`, *optional*):
Whether to return token type IDs. If left to the default, will return the token type IDs according to
the specific tokenizer's default, defined by the `return_outputs` attribute.
[What are token type IDs?](../glossary#token-type-ids)
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
of returning overflowing tokens.
return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
Whether or not to return special tokens mask information.
return_offsets_mapping (`bool`, *optional*, defaults to `False`):
Whether or not to return `(char_start, char_end)` for each token.
This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
Python's tokenizer, this method will raise `NotImplementedError`.
return_length (`bool`, *optional*, defaults to `False`):
Whether or not to return the lengths of the encoded inputs.
verbose (`bool`, *optional*, defaults to `True`):
Whether or not to print more information and warnings.
**kwargs: passed to the `self.tokenize()` method
Return:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
[What are input IDs?](../glossary#input-ids)
- **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
if *"token_type_ids"* is in `self.model_input_names`).
[What are token type IDs?](../glossary#token-type-ids)
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
[What are attention masks?](../glossary#attention-mask)
- **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
`return_overflowing_tokens=True`).
- **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
`return_overflowing_tokens=True`).
- **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
- **length** -- The length of the inputs (when `return_length=True`)
"""
INIT_TOKENIZER_DOCSTRING = r"""
Class attributes (overridden by derived classes)
- **vocab_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each
vocabulary file required by the model, and as associated values, the filename for saving the associated file
(string).
- **pretrained_vocab_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the
low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the
associated pretrained vocabulary file.
- **max_model_input_sizes** (`Dict[str, Optional[int]]`) -- A dictionary with, as keys, the `short-cut-names`
of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model,
or `None` if the model has no maximum input size.
- **pretrained_init_configuration** (`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the
`short-cut-names` of the pretrained models, and as associated values, a dictionary of specific arguments to
pass to the `__init__` method of the tokenizer class for this pretrained model when loading the tokenizer
with the [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`] method.
- **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model.
- **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
Should be `'right'` or `'left'`.
- **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
applied. Should be `'right'` or `'left'`.
Args:
model_max_length (`int`, *optional*):
The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is
loaded with [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], this will be set to the
value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will
default to VERY_LARGE_INTEGER (`int(1e30)`).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
truncation_side (`str`, *optional*):
The side on which the model should have truncation applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
model_input_names (`List[string]`, *optional*):
The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or
`"attention_mask"`). Default value is picked from the class attribute of the same name.
bos_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing the beginning of a sentence. Will be associated to `self.bos_token` and
`self.bos_token_id`.
eos_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing the end of a sentence. Will be associated to `self.eos_token` and
`self.eos_token_id`.
unk_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing an out-of-vocabulary token. Will be associated to `self.unk_token` and
`self.unk_token_id`.
sep_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token separating two different sentences in the same input (used by BERT for instance). Will be
associated to `self.sep_token` and `self.sep_token_id`.
pad_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
attention mechanisms or loss computation. Will be associated to `self.pad_token` and `self.pad_token_id`.
cls_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing the class of the input (used by BERT for instance). Will be associated to
`self.cls_token` and `self.cls_token_id`.
mask_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token representing a masked token (used by masked-language modeling pretraining objectives, like
BERT). Will be associated to `self.mask_token` and `self.mask_token_id`.
additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
A tuple or a list of additional special tokens. Add them here to ensure they won't be split by the
tokenization process. Will be associated to `self.additional_special_tokens` and
`self.additional_special_tokens_ids`.
"""
@add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
"""
Base class for [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`].
Handles shared (mostly boiler plate) methods for those two classes.
"""
vocab_files_names: Dict[str, str] = {}
pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {}
pretrained_init_configuration: Dict[str, Dict[str, Any]] = {}
max_model_input_sizes: Dict[str, Optional[int]] = {}
_auto_class: Optional[str] = None
# first name has to correspond to main model input name
# to make sure `tokenizer.pad(...)` works correctly
model_input_names: List[str] = [
"input_ids", "token_type_ids", "attention_mask"]
padding_side: str = "right"
truncation_side: str = "right"
slow_tokenizer_class = None
def __init__(self, **kwargs):
# inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
self.init_inputs = ()
self.init_kwargs = copy.deepcopy(kwargs)
self.name_or_path = kwargs.pop("name_or_path", "")
self._processor_class = kwargs.pop("processor_class", None)
# For backward compatibility we fallback to set model_max_length from max_len if provided
model_max_length = kwargs.pop(
"model_max_length", kwargs.pop("max_len", None))
self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
# Padding and truncation side are right by default and overridden in subclasses. If specified in the kwargs, it
# is changed.
self.padding_side = kwargs.pop("padding_side", self.padding_side)
if self.padding_side not in ["right", "left"]:
raise ValueError(
f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
)
self.truncation_side = kwargs.pop(
"truncation_side", self.truncation_side)
if self.truncation_side not in ["right", "left"]:
raise ValueError(
f"Padding side should be selected between 'right' and 'left', current value: {self.truncation_side}"
)
self.model_input_names = kwargs.pop(
"model_input_names", self.model_input_names)
self.deprecation_warnings = (
{}
) # Use to store when we have already noticed a deprecation warning (avoid overlogging).
super().__init__(**kwargs)
@property
def max_len_single_sentence(self) -> int:
"""
`int`: The maximum length of a sentence that can be fed to the model.
"""
return self.model_max_length - self.num_special_tokens_to_add(pair=False)
@property
def max_len_sentences_pair(self) -> int:
"""
`int`: The maximum combined length of a pair of sentences that can be fed to the model.
"""
return self.model_max_length - self.num_special_tokens_to_add(pair=True)
@max_len_single_sentence.setter
def max_len_single_sentence(self, value) -> int:
# For backward compatibility, allow to try to setup 'max_len_single_sentence'.
if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose:
if not self.deprecation_warnings.get("max_len_single_sentence", False):
logger.warning(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
self.deprecation_warnings["max_len_single_sentence"] = True
else:
raise ValueError(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
@max_len_sentences_pair.setter
def max_len_sentences_pair(self, value) -> int:
# For backward compatibility, allow to try to setup 'max_len_sentences_pair'.
if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose:
if not self.deprecation_warnings.get("max_len_sentences_pair", False):
logger.warning(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
self.deprecation_warnings["max_len_sentences_pair"] = True
else:
raise ValueError(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
def _set_processor_class(self, processor_class: str):
"""Sets processor class as an attribute."""
self._processor_class = processor_class
def __repr__(self) -> str:
return (
f"{'PreTrainedTokenizerFast' if self.is_fast else 'PreTrainedTokenizer'}(name_or_path='{self.name_or_path}', "
f"vocab_size={self.vocab_size}, model_max_len={self.model_max_length}, is_fast={self.is_fast}, "
f"padding_side='{self.padding_side}', truncation_side='{self.truncation_side}', special_tokens={self.special_tokens_map_extended})"
)
def get_vocab(self) -> Dict[str, int]:
"""
Returns the vocabulary as a dictionary of token to index.
`tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the
vocab.
Returns:
`Dict[str, int]`: The vocabulary.
"""
raise NotImplementedError()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs):
r"""
Instantiate a [`~tokenization_utils_base.PreTrainedTokenizerBase`] (or a derived class) from a predefined
tokenizer.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
using the [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary
file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g.,
`./my_model_directory/vocab.txt`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download the vocabulary files and override the cached versions if they
exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Attempt to resume the download if such a file
exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
local_files_only (`bool`, *optional*, defaults to `False`):
Whether or not to only rely on local files and not to attempt to download any files.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
subfolder (`str`, *optional*):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
facebook/rag-token-base), specify it here.
inputs (additional positional arguments, *optional*):
Will be passed along to the Tokenizer `__init__` method.
kwargs (additional keyword arguments, *optional*):
Will be passed to the Tokenizer `__init__` method. Can be used to set special tokens like `bos_token`,
`eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
`additional_special_tokens`. See parameters in the `__init__` for more details.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
# We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from huggingface.co and cache.
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
# Download vocabulary from huggingface.co (user-uploaded) and cache.
tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
# If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
tokenizer = BertTokenizer.from_pretrained("./test/saved_model/")
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt")
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", unk_token="<unk>")
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == "<unk>"
```"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
subfolder = kwargs.pop("subfolder", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "tokenizer",
"from_auto_class": from_auto_class, "is_fast": "Fast" in cls.__name__}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
vocab_files = {}
init_configuration = {}
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
if len(cls.vocab_files_names) > 1:
raise ValueError(
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not "
"supported for this tokenizer. Use a model identifier or the path to a directory instead."
)
warnings.warn(
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and "
"won't be possible anymore in v5. Use a model identifier or the path to a directory instead.",
FutureWarning,
)
file_id = list(cls.vocab_files_names.keys())[0]
vocab_files[file_id] = pretrained_model_name_or_path
else:
# At this point pretrained_model_name_or_path is either a directory or a model identifier name
additional_files_names = {
"added_tokens_file": ADDED_TOKENS_FILE,
"special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE,
"tokenizer_config_file": TOKENIZER_CONFIG_FILE,
}
vocab_files_target = {
**cls.vocab_files_names, **additional_files_names}
if "tokenizer_file" in vocab_files_target:
# Try to get the tokenizer config to see if there are versioned tokenizer files.
fast_tokenizer_file = FULL_TOKENIZER_FILE
resolved_config_file = get_file_from_repo(
pretrained_model_name_or_path,
TOKENIZER_CONFIG_FILE,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
use_auth_token=use_auth_token,
revision=revision,
local_files_only=local_files_only,
)
if resolved_config_file is not None:
with open(resolved_config_file, encoding="utf-8") as reader:
tokenizer_config = json.load(reader)
if "fast_tokenizer_files" in tokenizer_config:
fast_tokenizer_file = get_fast_tokenizer_file(
tokenizer_config["fast_tokenizer_files"])
vocab_files_target["tokenizer_file"] = fast_tokenizer_file
# Look for the tokenizer files
for file_id, file_name in vocab_files_target.items():
if os.path.isdir(pretrained_model_name_or_path):
if subfolder is not None:
full_file_name = os.path.join(
pretrained_model_name_or_path, subfolder, file_name)
else:
full_file_name = os.path.join(
pretrained_model_name_or_path, file_name)
if not os.path.exists(full_file_name):
logger.info(
f"Didn't find file {full_file_name}. We won't load it.")
full_file_name = None
else:
full_file_name = hf_bucket_url(
pretrained_model_name_or_path,
filename=file_name,
subfolder=subfolder,
revision=revision,
mirror=None,
)
vocab_files[file_id] = full_file_name
# Get files from url, cache, or disk depending on the case
resolved_vocab_files = {}
unresolved_files = []
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
try:
resolved_vocab_files[file_id] = cached_path(
file_path,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except FileNotFoundError as error:
if local_files_only:
unresolved_files.append(file_id)
else:
raise error
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to "
"pass a token having permission to this repo with `use_auth_token` or log in with "
"`huggingface-cli login` and pass `use_auth_token=True`."
)
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists "
"for this model name. Check the model page at "
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
)
except EntryNotFoundError:
logger.debug(
f"{pretrained_model_name_or_path} does not contain a file named {file_path}.")
resolved_vocab_files[file_id] = None
except HTTPError as err:
if "404 Client Error" in str(err):
logger.debug(
f"Connection problem to access {file_path}.")
resolved_vocab_files[file_id] = None
else:
raise err
if len(unresolved_files) > 0:
logger.info(
f"Can't load following files from cache: {unresolved_files} and cannot check if these "
"files are necessary for the tokenizer to operate."
)
if all(full_file_name is None for full_file_name in resolved_vocab_files.values()):
raise EnvironmentError(
f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing all relevant files for a {cls.__name__} tokenizer."
)
for file_id, file_path in vocab_files.items():
if file_id not in resolved_vocab_files:
continue
if file_path == resolved_vocab_files[file_id]:
logger.info(f"loading file {file_path}")
else:
logger.info(
f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}")
return cls._from_pretrained(
resolved_vocab_files,
pretrained_model_name_or_path,
init_configuration,
*init_inputs,
use_auth_token=use_auth_token,
cache_dir=cache_dir,
**kwargs,
)
@classmethod
def _from_pretrained(
cls,
resolved_vocab_files,
pretrained_model_name_or_path,
init_configuration,
*init_inputs,
use_auth_token=None,
cache_dir=None,
**kwargs
):
# We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json
# file or if `from_slow` is set to True.
from_slow = kwargs.get("from_slow", False)
has_tokenizer_file = resolved_vocab_files.get(
"tokenizer_file", None) is not None
if (from_slow or not has_tokenizer_file) and cls.slow_tokenizer_class is not None:
slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained(
copy.deepcopy(resolved_vocab_files),
pretrained_model_name_or_path,
copy.deepcopy(init_configuration),
*init_inputs,
**(copy.deepcopy(kwargs)),
)
else:
slow_tokenizer = None
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop(
"tokenizer_config_file", None)
if tokenizer_config_file is not None:
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
# First attempt. We get tokenizer_class from tokenizer_config to check mismatch between tokenizers.
config_tokenizer_class = init_kwargs.get("tokenizer_class")
init_kwargs.pop("tokenizer_class", None)
init_kwargs.pop("auto_map", None)
saved_init_inputs = init_kwargs.pop("init_inputs", ())
if not init_inputs:
init_inputs = saved_init_inputs
else:
config_tokenizer_class = None
init_kwargs = init_configuration
if config_tokenizer_class is None:
from .models.auto.configuration_auto import AutoConfig # tests_ignore
# Second attempt. If we have not yet found tokenizer_class, let's try to use the config.
try:
config = AutoConfig.from_pretrained(
pretrained_model_name_or_path,
use_auth_token=use_auth_token,
cache_dir=cache_dir,
)
config_tokenizer_class = config.tokenizer_class
except (OSError, ValueError, KeyError):
# skip if an error occurred.
config = None
if config_tokenizer_class is None:
# Third attempt. If we have not yet found the original type of the tokenizer,
# we are loading we see if we can infer it from the type of the configuration file
from .models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES # tests_ignore
if hasattr(config, "model_type"):
model_type = config.model_type
else:
# Fallback: use pattern matching on the string.
model_type = None
for pattern in TOKENIZER_MAPPING_NAMES.keys():
if pattern in str(pretrained_model_name_or_path):
model_type = pattern
break
if model_type is not None:
config_tokenizer_class, config_tokenizer_class_fast = TOKENIZER_MAPPING_NAMES.get(
model_type, (None, None)
)
if config_tokenizer_class is None:
config_tokenizer_class = config_tokenizer_class_fast
if config_tokenizer_class is not None:
if cls.__name__.replace("Fast", "") != config_tokenizer_class.replace("Fast", ""):
logger.warning(
"The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. "
"It may result in unexpected tokenization. \n"
f"The tokenizer class you load from this checkpoint is '{config_tokenizer_class}'. \n"
f"The class this function is called from is '{cls.__name__}'."
)
# Update with newly provided kwargs
init_kwargs.update(kwargs)
# Convert AddedTokens serialized as dict to class instances
def convert_added_tokens(obj: Union[AddedToken, Any]):
if isinstance(obj, dict) and "__type" in obj and obj["__type"] == "AddedToken":
obj.pop("__type")
return AddedToken(**obj)
elif isinstance(obj, (list, tuple)):
return list(convert_added_tokens(o) for o in obj)
elif isinstance(obj, dict):
return {k: convert_added_tokens(v) for k, v in obj.items()}
return obj
init_kwargs = convert_added_tokens(init_kwargs)
# Set max length if needed
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
if model_max_length is not None and isinstance(model_max_length, (int, float)):
init_kwargs["model_max_length"] = min(init_kwargs.get(
"model_max_length", int(1e30)), model_max_length)
# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
if slow_tokenizer is not None:
init_kwargs["__slow_tokenizer"] = slow_tokenizer
init_kwargs["name_or_path"] = pretrained_model_name_or_path
# Instantiate tokenizer.
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except OSError:
raise OSError(
"Unable to load vocabulary from file. "
"Please check that the provided vocabulary is accessible and not corrupted."
)
# Save inputs and kwargs for saving and re-loading with ``save_pretrained``
# Removed: Now done at the base class level
# tokenizer.init_inputs = init_inputs
# tokenizer.init_kwargs = init_kwargs
# If there is a complementary special token map, load it
special_tokens_map_file = resolved_vocab_files.pop(
"special_tokens_map_file", None)
if special_tokens_map_file is not None:
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
for key, value in special_tokens_map.items():
if key in kwargs and kwargs[key]:
# This value has already been redefined by the kwargs
# We keep this new value and ignore the one stored in the special_tokens_map_file
continue
if isinstance(value, dict):
value = AddedToken(**value)
elif isinstance(value, list):
value = [AddedToken(
**token) if isinstance(token, dict) else token for token in value]
setattr(tokenizer, key, value)
# Add supplementary tokens.
special_tokens = tokenizer.all_special_tokens
if added_tokens_file is not None:
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
# Sort added tokens by index
added_tok_encoder_sorted = list(
sorted(added_tok_encoder.items(), key=lambda x: x[1]))
for token, index in added_tok_encoder_sorted:
if has_tokenizer_file and index != len(tokenizer) and tokenizer.convert_tokens_to_ids(token) != index:
# Tokenizer fast: added token needs to either be in the vocabulary with the proper index or the
# index is the current length of the tokenizer (not in vocabulary)
raise ValueError(
f"Wrong index found for {token}: should be {tokenizer.convert_tokens_to_ids(token)} but found "
f"{index}."
)
elif not has_tokenizer_file and index != len(tokenizer):
# Tokenizer slow: added token cannot already be in the vocabulary so its index needs to be the
# current length of the tokenizer.
raise ValueError(
f"Non-consecutive added token '{token}' found. "
f"Should have index {len(tokenizer)} but has index {index} in saved vocabulary."
)
# Safe to call on a tokenizer fast even if token already there.
tokenizer.add_tokens(
token, special_tokens=bool(token in special_tokens))
# Check all our special tokens are registered as "no split" token (we don't cut them) and are in the vocab
added_tokens = tokenizer.sanitize_special_tokens()
if added_tokens:
logger.warning_advice(
"Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained."
)
return tokenizer
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
legacy_format: Optional[bool] = None,
filename_prefix: Optional[str] = None,
push_to_hub: bool = False,
**kwargs,
) -> Tuple[str]:
"""
Save the full tokenizer state.
This method make sure the full tokenizer can then be re-loaded using the
[`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] class method..
Warning,None This won't save modifications you may have applied to the tokenizer after the instantiation (for
instance, modifying `tokenizer.do_lower_case` after creation).
Args:
save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved.
legacy_format (`bool`, *optional*):
Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON
format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate
added_tokens files.
If `False`, will only save the tokenizer in the unified JSON format. This format is incompatible with
"slow" tokenizers (not powered by the *tokenizers* library), so the tokenizer will not be able to be
loaded in the corresponding "slow" tokenizer.
If `True`, will save the tokenizer in legacy format. If the "slow" tokenizer doesn't exits, a value
error is raised.
filename_prefix: (`str`, *optional*):
A prefix to add to the names of the files saved by the tokenizer.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
<Tip warning={true}>
Using `push_to_hub=True` will synchronize the repository you are pushing to with `save_directory`,
which requires `save_directory` to be a local clone of the repo you are pushing to if it's an existing
folder. Pass along `temp_dir=True` to use a temporary directory instead.
</Tip>
Returns:
A tuple of `str`: The files saved.
"""
if os.path.isfile(save_directory):
logger.error(
f"Provided path ({save_directory}) should be a directory, not a file")
return
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
special_tokens_map_file = os.path.join(
save_directory, (filename_prefix +
"-" if filename_prefix else "") + SPECIAL_TOKENS_MAP_FILE
)
tokenizer_config_file = os.path.join(
save_directory, (filename_prefix +
"-" if filename_prefix else "") + TOKENIZER_CONFIG_FILE
)
tokenizer_config = copy.deepcopy(self.init_kwargs)
if len(self.init_inputs) > 0:
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
# Sanitize AddedTokens
def convert_added_tokens(obj: Union[AddedToken, Any], add_type_field=True):
if isinstance(obj, AddedToken):
out = obj.__getstate__()
if add_type_field:
out["__type"] = "AddedToken"
return out
elif isinstance(obj, (list, tuple)):
return list(convert_added_tokens(o, add_type_field=add_type_field) for o in obj)
elif isinstance(obj, dict):
return {k: convert_added_tokens(v, add_type_field=add_type_field) for k, v in obj.items()}
return obj
# add_type_field=True to allow dicts in the kwargs / differentiate from AddedToken serialization
tokenizer_config = convert_added_tokens(
tokenizer_config, add_type_field=True)
# Add tokenizer class to the tokenizer config to be able to reload it with from_pretrained
tokenizer_class = self.__class__.__name__
# Remove the Fast at the end unless we have a special `PreTrainedTokenizerFast`
if tokenizer_class.endswith("Fast") and tokenizer_class != "PreTrainedTokenizerFast":
tokenizer_class = tokenizer_class[:-4]
tokenizer_config["tokenizer_class"] = tokenizer_class
if getattr(self, "_auto_map", None) is not None:
tokenizer_config["auto_map"] = self._auto_map
if getattr(self, "_processor_class", None) is not None:
tokenizer_config["processor_class"] = self._processor_class
# If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if self._auto_class is not None:
custom_object_save(self, save_directory, config=tokenizer_config)
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
logger.info(f"tokenizer config file saved in {tokenizer_config_file}")
# Sanitize AddedTokens in special_tokens_map
write_dict = convert_added_tokens(
self.special_tokens_map_extended, add_type_field=False)
with open(special_tokens_map_file, "w", encoding="utf-8") as f:
f.write(json.dumps(write_dict, ensure_ascii=False))
logger.info(f"Special tokens file saved in {special_tokens_map_file}")
file_names = (tokenizer_config_file, special_tokens_map_file)
save_files = self._save_pretrained(
save_directory=save_directory,
file_names=file_names,
legacy_format=legacy_format,
filename_prefix=filename_prefix,
)
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Tokenizer pushed to the hub in this commit: {url}")
return save_files
def _save_pretrained(
self,
save_directory: Union[str, os.PathLike],
file_names: Tuple[str],
legacy_format: Optional[bool] = None,
filename_prefix: Optional[str] = None,
) -> Tuple[str]:
"""
Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens.
Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the
specific [`~tokenization_utils_fast.PreTrainedTokenizerFast._save_pretrained`]
"""
if legacy_format is False:
raise ValueError(
"Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format."
)
save_directory = str(save_directory)
added_tokens_file = os.path.join(
save_directory, (filename_prefix +
"-" if filename_prefix else "") + ADDED_TOKENS_FILE
)
added_vocab = self.get_added_vocab()
if added_vocab:
with open(added_tokens_file, "w", encoding="utf-8") as f:
out_str = json.dumps(added_vocab, ensure_ascii=False)
f.write(out_str)
logger.info(f"added tokens file saved in {added_tokens_file}")
vocab_files = self.save_vocabulary(
save_directory, filename_prefix=filename_prefix)
return file_names + vocab_files + (added_tokens_file,)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
"""
Save only the vocabulary of the tokenizer (vocabulary + added tokens).
This method won't save the configuration and special token mappings of the tokenizer. Use
[`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
An optional prefix to add to the named of the saved files.
Returns:
`Tuple(str)`: Paths to the files saved.
"""
raise NotImplementedError
def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
"""
Converts a string in a sequence of tokens, replacing unknown tokens with the `unk_token`.
Args:
text (`str`):
The sequence to be encoded.
pair (`str`, *optional*):
A second sequence to be encoded with the first.
add_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to add the special tokens associated with the corresponding model.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific encode method. See details in
[`~PreTrainedTokenizerBase.__call__`]
Returns:
`List[str]`: The list of tokens.
"""
raise NotImplementedError
@add_end_docstrings(
ENCODE_KWARGS_DOCSTRING,
"""
**kwargs: Passed along to the `.tokenize()` method.
""",
"""
Returns:
`List[int]`, `torch.Tensor`, `tf.Tensor` or `np.ndarray`: The tokenized ids of the text.
""",
)
def encode(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput,
PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs
) -> List[int]:
"""
Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`.
Args:
text (`str`, `List[str]` or `List[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
text_pair (`str`, `List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
"""
encoded_inputs = self.encode_plus(
text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
def num_special_tokens_to_add(self, pair: bool = False) -> int:
raise NotImplementedError
def _get_padding_truncation_strategies(
self, padding=False, truncation=False, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs
):
"""
Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy
and pad_to_max_length) and behaviors.
"""
old_truncation_strategy = kwargs.pop(
"truncation_strategy", "do_not_truncate")
old_pad_to_max_length = kwargs.pop("pad_to_max_length", False)
# Backward compatibility for previous behavior, maybe we should deprecate it:
# If you only set max_length, it activates truncation for max_length
if max_length is not None and padding is False and truncation is False:
if verbose:
if not self.deprecation_warnings.get("Truncation-not-explicitly-activated", False):
logger.warning(
"Truncation was not explicitly activated but `max_length` is provided a specific value, "
"please use `truncation=True` to explicitly truncate examples to max length. "
"Defaulting to 'longest_first' truncation strategy. "
"If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy "
"more precisely by providing a specific strategy to `truncation`."
)
self.deprecation_warnings["Truncation-not-explicitly-activated"] = True
truncation = "longest_first"
# Get padding strategy
if padding is False and old_pad_to_max_length:
if verbose:
warnings.warn(
"The `pad_to_max_length` argument is deprecated and will be removed in a future version, "
"use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or "
"use `padding='max_length'` to pad to a max length. In this case, you can give a specific "
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the "
"maximal input size of the model (e.g. 512 for Bert).",
FutureWarning,
)
if max_length is None:
padding_strategy = PaddingStrategy.LONGEST
else:
padding_strategy = PaddingStrategy.MAX_LENGTH
elif padding is not False:
if padding is True:
if verbose:
if max_length is not None and (truncation is False or truncation == "do_not_truncate"):
warnings.warn(
"`max_length` is ignored when `padding`=`True` and there is no truncation strategy. "
"To pad to max length, use `padding='max_length'`."
)
if old_pad_to_max_length is not False:
warnings.warn(
"Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.")
# Default to pad to the longest sequence in the batch
padding_strategy = PaddingStrategy.LONGEST
elif not isinstance(padding, PaddingStrategy):
padding_strategy = PaddingStrategy(padding)
elif isinstance(padding, PaddingStrategy):
padding_strategy = padding
else:
padding_strategy = PaddingStrategy.DO_NOT_PAD
# Get truncation strategy
if truncation is False and old_truncation_strategy != "do_not_truncate":
if verbose:
warnings.warn(
"The `truncation_strategy` argument is deprecated and will be removed in a future version, "
"use `truncation=True` to truncate examples to a max length. You can give a specific "
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the "
"maximal input size of the model (e.g. 512 for Bert). "
" If you have pairs of inputs, you can give a specific truncation strategy selected among "
"`truncation='only_first'` (will only truncate the first sentence in the pairs) "
"`truncation='only_second'` (will only truncate the second sentence in the pairs) "
"or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence in the pairs).",
FutureWarning,
)
truncation_strategy = TruncationStrategy(old_truncation_strategy)
elif truncation is not False:
if truncation is True:
truncation_strategy = (
TruncationStrategy.LONGEST_FIRST
) # Default to truncate the longest sequences in pairs of inputs
elif not isinstance(truncation, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation)
elif isinstance(truncation, TruncationStrategy):
truncation_strategy = truncation
else:
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
if self.model_max_length > LARGE_INTEGER:
if verbose:
if not self.deprecation_warnings.get("Asking-to-pad-to-max_length", False):
logger.warning(
"Asking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. "
"Default to no padding."
)
self.deprecation_warnings["Asking-to-pad-to-max_length"] = True
padding_strategy = PaddingStrategy.DO_NOT_PAD
else:
max_length = self.model_max_length
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
if self.model_max_length > LARGE_INTEGER:
if verbose:
if not self.deprecation_warnings.get("Asking-to-truncate-to-max_length", False):
logger.warning(
"Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. "
"Default to no truncation."
)
self.deprecation_warnings["Asking-to-truncate-to-max_length"] = True
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
else:
max_length = self.model_max_length
# Test if we have a padding token
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (not self.pad_token or self.pad_token_id < 0):
raise ValueError(
"Asking to pad but the tokenizer does not have a padding token. "
"Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
"or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`."
)
# Check that we will truncate to a multiple of pad_to_multiple_of if both are provided
if (
truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
and padding_strategy != PaddingStrategy.DO_NOT_PAD
and pad_to_multiple_of is not None
and max_length is not None
and (max_length % pad_to_multiple_of != 0)
):
raise ValueError(
f"Truncation and padding are both activated but "
f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})."
)
return padding_strategy, truncation_strategy, max_length, kwargs
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
text_pair: Optional[Union[TextInput, PreTokenizedInput,
List[TextInput], List[PreTokenizedInput]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
"""
# Input type checking for clearer error
def _is_valid_text_input(t):
if isinstance(t, str):
# Strings are fine
return True
elif isinstance(t, (list, tuple)):
# List are fine as long as they are...
if len(t) == 0:
# ... empty
return True
elif isinstance(t[0], str):
# ... list of strings
return True
elif isinstance(t[0], (list, tuple)):
# ... list with an empty list or with a list of strings
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if not _is_valid_text_input(text):
raise ValueError(
"text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples)."
)
if text_pair is not None and not _is_valid_text_input(text_pair):
raise ValueError(
"text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples)."
)
if is_split_into_words:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(
text[0], (list, tuple))
else:
is_batched = isinstance(text, (list, tuple))
if is_batched:
if isinstance(text_pair, str):
raise TypeError(
"when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as `text`."
)
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}."
)
batch_text_or_text_pairs = list(
zip(text, text_pair)) if text_pair is not None else text
return self.batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput,
PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences.
<Tip warning={true}>
This method is deprecated, `__call__` should be used instead.
</Tip>
Args:
text (`str`, `List[str]` or `List[int]` (the latter only for not-fast tokenizers)):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
text_pair (`str`, `List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput,
PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
raise NotImplementedError
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.
<Tip warning={true}>
This method is deprecated, `__call__` should be used instead.
</Tip>
Args:
batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`):
Batch of sequences or pair of sequences to be encoded. This can be a list of
string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see
details in `encode_plus`).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
raise NotImplementedError
def pad(
self,
encoded_inputs: Union[
BatchEncoding,
List[BatchEncoding],
Dict[str, EncodedInput],
Dict[str, List[EncodedInput]],
List[Dict[str, EncodedInput]],
],
padding: Union[bool, str, PaddingStrategy] = True,
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
verbose: bool = True,
) -> BatchEncoding:
"""
Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
in the batch.
Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`,
`self.pad_token_id` and `self.pad_token_type_id`)
<Tip>
If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
PyTorch tensors, you will lose the specific device of your tensors however.
</Tip>
Args:
encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`):
Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of
tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str,
List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
collate function.
Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see
the note above for the return type.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
>= 7.5 (Volta).
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
verbose (`bool`, *optional*, defaults to `True`):
Whether or not to print more information and warnings.
"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], (dict, BatchEncoding)):
encoded_inputs = {key: [
example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
# The model's main input name, usually `input_ids`, has be passed for padding
if self.model_input_names[0] not in encoded_inputs:
raise ValueError(
"You should supply an encoding or a list of encodings to this method "
f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
)
required_input = encoded_inputs[self.model_input_names[0]]
if not required_input:
if return_attention_mask:
encoded_inputs["attention_mask"] = []
return encoded_inputs
# If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
for item in required_input:
if len(item) != 0:
first_element = item[0]
break
# At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
if not isinstance(first_element, (int, list, tuple)):
if is_torch_available() and _is_torch(first_element):
return_tensors = "pt" if return_tensors is None else return_tensors
elif isinstance(first_element, np.ndarray):
return_tensors = "np" if return_tensors is None else return_tensors
else:
raise ValueError(
f"type of {first_element} unknown: {type(first_element)}. "
f"Should be one of a python, numpy, pytorch or tensorflow object."
)
for key, value in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
# Convert padding_strategy in PaddingStrategy
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
padding=padding, max_length=max_length, verbose=verbose
)
required_input = encoded_inputs[self.model_input_names[0]]
if required_input and not isinstance(required_input[0], (list, tuple)):
encoded_inputs = self._pad(
encoded_inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(required_input)
assert all(
len(v) == batch_size for v in encoded_inputs.values()
), "Some items in the output dictionary have a different batch size than others."
if padding_strategy == PaddingStrategy.LONGEST:
max_length = max(len(inputs) for inputs in required_input)
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = dict((k, v[i]) for k, v in encoded_inputs.items())
outputs = self._pad(
inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create the token type IDs corresponding to the sequences passed. [What are token type
IDs?](../glossary#token-type-ids)
Should be overridden in a subclass if the model has a special way of building those.
Args:
token_ids_0 (`List[int]`): The first tokenized sequence.
token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
Returns:
`List[int]`: The token type ids.
"""
if token_ids_1 is None:
return len(token_ids_0) * [0]
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens.
This implementation does not add special tokens and this method should be overridden in a subclass.
Args:
token_ids_0 (`List[int]`): The first tokenized sequence.
token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
Returns:
`List[int]`: The model input with special tokens.
"""
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
prepend_batch_axis: bool = False,
**kwargs
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids*
different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return
overflowing tokens. Such a combination of arguments will raise an error.
Args:
ids (`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
pair_ids (`List[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
if return_token_type_ids and not add_special_tokens:
raise ValueError(
"Asking to return token_type_ids while setting add_special_tokens to False "
"results in an undefined behavior. Please set add_special_tokens to True or "
"set return_token_type_ids to None."
)
if (
return_overflowing_tokens
and truncation_strategy == TruncationStrategy.LONGEST_FIRST
and pair_ids is not None
):
raise ValueError(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
# Load from model defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
# Compute the total size of the returned encodings
total_len = len_ids + len_pair_ids + \
(self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
# Truncation: Handle max sequence length
overflowing_tokens = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(
ids, pair_ids)
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * \
len(ids) + ([0] * len(pair_ids) if pair else [])
# Build output dictionary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(
ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
# Check lengths
self._eventual_warn_about_too_long_sequence(
encoded_inputs["input_ids"], max_length, verbose)
# Padding
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
batch_outputs = BatchEncoding(
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
)
return batch_outputs
def truncate_sequences(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
num_tokens_to_remove: int = 0,
truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
stride: int = 0,
) -> Tuple[List[int], List[int], List[int]]:
"""
Truncates a sequence pair in-place following the strategy.
Args:
ids (`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
pair_ids (`List[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
of sequences (or a batch of pairs) is provided.
"""
if num_tokens_to_remove <= 0:
return ids, pair_ids, []
if not isinstance(truncation_strategy, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
):
if len(ids) > num_tokens_to_remove:
window_len = min(len(ids), stride + num_tokens_to_remove)
if self.truncation_side == "left":
overflowing_tokens = ids[:window_len]
ids = ids[num_tokens_to_remove:]
elif self.truncation_side == "right":
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
else:
raise ValueError(
f"invalid truncation strategy: {self.truncation_side}, use 'left' or 'right'.")
else:
error_msg = (
f"We need to remove {num_tokens_to_remove} to truncate the input "
f"but the first sequence has a length {len(ids)}. "
)
if truncation_strategy == TruncationStrategy.ONLY_FIRST:
error_msg = (
error_msg + "Please select another truncation strategy than "
f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
)
logger.error(error_msg)
elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
logger.warning(
f"Be aware, overflowing tokens are not returned for the setting you have chosen,"
f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
f"truncation strategy. So the returned list will always be empty even if some "
f"tokens have been removed."
)
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
if self.truncation_side == "right":
ids = ids[:-1]
elif self.truncation_side == "left":
ids = ids[1:]
else:
raise ValueError(
"invalid truncation strategy:" + str(self.truncation_side))
else:
if self.truncation_side == "right":
pair_ids = pair_ids[:-1]
elif self.truncation_side == "left":
pair_ids = pair_ids[1:]
else:
raise ValueError(
"invalid truncation strategy:" + str(self.truncation_side))
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
if len(pair_ids) > num_tokens_to_remove:
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
if self.truncation_side == "right":
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
elif self.truncation_side == "left":
overflowing_tokens = pair_ids[:window_len]
pair_ids = pair_ids[num_tokens_to_remove:]
else:
raise ValueError(
"invalid truncation strategy:" + str(self.truncation_side))
else:
logger.error(
f"We need to remove {num_tokens_to_remove} to truncate the input "
f"but the second sequence has a length {len(pair_ids)}. "
f"Please select another truncation strategy than {truncation_strategy}, "
f"for instance 'longest_first' or 'only_first'."
)
return (ids, pair_ids, overflowing_tokens)
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
>= 7.5 (Volta).
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) +
1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(
required_input) != max_length
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [
0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] +
[self.pad_token_type_id] * difference
)
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [
1] * difference
encoded_inputs[self.model_input_names[0]
] = required_input + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * \
difference + encoded_inputs["attention_mask"]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [
1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs[self.model_input_names[0]] = [
self.pad_token_id] * difference + required_input
else:
raise ValueError("Invalid padding strategy:" +
str(self.padding_side))
return encoded_inputs
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""
Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we
often want to remove sub-word tokenization artifacts at the same time.
Args:
tokens (`List[str]`): The token to join in a string.
Returns:
`str`: The joined tokens.
"""
raise NotImplementedError
def batch_decode(
self,
sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
**kwargs
) -> List[str]:
"""
Convert a list of lists of token ids into a list of strings by calling decode.
Args:
sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to clean up the tokenization spaces.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`List[str]`: The list of decoded sentences.
"""
return [
self.decode(
seq,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
for seq in sequences
]
def decode(
self,
token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
**kwargs
) -> str:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
Args:
token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to clean up the tokenization spaces.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`str`: The decoded sentence.
"""
# Convert inputs to python lists
token_ids = to_py_obj(token_ids)
return self._decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
def _decode(
self,
token_ids: Union[int, List[int]],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
**kwargs
) -> str:
raise NotImplementedError
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of ids of the first sequence.
token_ids_1 (`List[int]`, *optional*):
List of ids of the second sequence.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
assert already_has_special_tokens and token_ids_1 is None, (
"You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
"Please use a slow (full python) tokenizer to activate this argument. "
"Or set `return_special_tokens_mask=True` when calling the encoding method "
"to get the special tokens mask in any tokenizer. "
)
all_special_ids = self.all_special_ids # cache the property
special_tokens_mask = [
1 if token in all_special_ids else 0 for token in token_ids_0]
return special_tokens_mask
@staticmethod
def clean_up_tokenization(out_string: str) -> str:
"""
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
Args:
out_string (`str`): The text to clean up.
Returns:
`str`: The cleaned-up string.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string
def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool):
"""
Depending on the input and internal state we might trigger a warning about a sequence that is too long for its
corresponding model
Args:
ids (`List[str]`): The ids produced by the tokenization
max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set)
verbose (`bool`): Whether or not to print more information and warnings.
"""
if max_length is None and len(ids) > self.model_max_length and verbose:
if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False):
logger.warning(
"Token indices sequence length is longer than the specified maximum sequence length "
f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model "
"will result in indexing errors"
)
self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True
@contextmanager
def as_target_tokenizer(self):
"""
Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
sequence-to-sequence models that need a slightly different processing for the labels.
"""
yield
@classmethod
def register_for_auto_class(cls, auto_class="AutoTokenizer"):
"""
Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the
library are already mapped with `AutoTokenizer`.
<Tip warning={true}>
This API is experimental and may have some slight breaking changes in the next releases.
</Tip>
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoTokenizer"`):
The auto class to register this new tokenizer with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
def prepare_seq2seq_batch(
self,
src_texts: List[str],
tgt_texts: Optional[List[str]] = None,
max_length: Optional[int] = None,
max_target_length: Optional[int] = None,
padding: str = "longest",
return_tensors: str = None,
truncation: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Prepare model inputs for translation. For best performance, translate one sentence at a time.
Arguments:
src_texts (`List[str]`):
List of documents to summarize or source language texts.
tgt_texts (`list`, *optional*):
List of summaries or target language texts.
max_length (`int`, *optional*):
Controls the maximum length for encoder inputs (documents to summarize or source language texts) If
left unset or set to `None`, this will use the predefined model maximum length if a maximum length is
required by one of the truncation/padding parameters. If the model has no specific maximum input length
(like XLNet) truncation/padding to a maximum length will be deactivated.
max_target_length (`int`, *optional*):
Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set
to `None`, this will use the max_length value.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `True`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will
truncate token by token, removing a token from the longest sequence in the pair if a pair of
sequences (or a batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
**kwargs:
Additional keyword arguments passed along to `self.__call__`.
Return:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to the encoder.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
- **labels** -- List of token ids for tgt_texts.
The full set of keys `[input_ids, attention_mask, labels]`, will only be returned if tgt_texts is passed.
Otherwise, input_ids, attention_mask will be the only keys.
"""
# docstyle-ignore
formatted_warning = """
`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular
`__call__` method to prepare your inputs and the tokenizer under the `as_target_tokenizer` context manager to prepare
your targets.
Here is a short example:
model_inputs = tokenizer(src_texts, ...)
with tokenizer.as_target_tokenizer():
labels = tokenizer(tgt_texts, ...)
model_inputs["labels"] = labels["input_ids"]
See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice.
For a more complete example, see the implementation of `prepare_seq2seq_batch`.
"""
warnings.warn(formatted_warning, FutureWarning)
# mBART-specific kwargs that should be ignored by other models.
kwargs.pop("src_lang", None)
kwargs.pop("tgt_lang", None)
if max_length is None:
max_length = self.model_max_length
model_inputs = self(
src_texts,
add_special_tokens=True,
return_tensors=return_tensors,
max_length=max_length,
padding=padding,
truncation=truncation,
**kwargs,
)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
max_target_length = max_length
with self.as_target_tokenizer():
labels = self(
tgt_texts,
add_special_tokens=True,
return_tensors=return_tensors,
padding=padding,
max_length=max_target_length,
truncation=truncation,
**kwargs,
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def get_fast_tokenizer_file(tokenization_files: List[str]) -> str:
"""
Get the tokenization file to use for this version of transformers.
Args:
tokenization_files (`List[str]`): The list of available configuration files.
Returns:
`str`: The tokenization file to use.
"""
tokenizer_files_map = {}
for file_name in tokenization_files:
search = _re_tokenizer_file.search(file_name)
if search is not None:
v = search.groups()[0]
tokenizer_files_map[v] = file_name
available_versions = sorted(tokenizer_files_map.keys())
# Defaults to FULL_TOKENIZER_FILE and then try to look at some newer versions.
tokenizer_file = FULL_TOKENIZER_FILE
transformers_version = version.parse(__version__)
for v in available_versions:
if version.parse(v) <= transformers_version:
tokenizer_file = tokenizer_files_map[v]
else:
# No point going further since the versions are sorted.
break
return tokenizer_file
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
PreTrainedTokenizerBase.push_to_hub = copy_func(
PreTrainedTokenizerBase.push_to_hub)
PreTrainedTokenizerBase.push_to_hub.__doc__ = PreTrainedTokenizerBase.push_to_hub.__doc__.format(
object="tokenizer", object_class="AutoTokenizer", object_files="tokenizer files"
)
|
#!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the executables produced by gitian only contain
certain symbols and are only linked against allowed libraries.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
import subprocess
import sys
import os
from typing import List, Optional
import lief
import pixie
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
# - g++ version 4.9.2 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.19 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=libc6)
#
# Ubuntu 16.04 (Xenial) EOL: 2024. https://wiki.ubuntu.com/Releases
#
# - g++ version 5.3.1 (https://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=xenial§ion=all)
# - libc version 2.23.0 (https://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=xenial§ion=all)
#
# CentOS 7 EOL: 2024. https://wiki.centos.org/FAQ/General
#
# - g++ version 4.8.5 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
# - libc version 2.17 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.8.5: GCC_4.8.0
# (glibc) GLIBC_2_17
#
MAX_VERSIONS = {
'GCC': (4,8,0),
'GLIBC': (2,17),
'LIBATOMIC': (1,0)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
'environ', '_environ', '__environ',
}
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
# garliccoind and garliccoin-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
'libpthread.so.0', # threading
'libm.so.6', # math library
'librt.so.1', # real-time (clock)
'libatomic.so.1',
'ld-linux-x86-64.so.2', # 64-bit dynamic linker
'ld-linux.so.2', # 32-bit dynamic linker
'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker
'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker
'ld64.so.1', # POWER64 ABIv1 dynamic linker
'ld64.so.2', # POWER64 ABIv2 dynamic linker
'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker
# garliccoin-qt only
'libxcb.so.1', # part of X11
'libxkbcommon.so.0', # keyboard keymapping
'libxkbcommon-x11.so.0', # keyboard keymapping
'libfontconfig.so.1', # font support
'libfreetype.so.6', # font parsing
'libdl.so.2' # programming interface to dynamic linker
}
ARCH_MIN_GLIBC_VER = {
pixie.EM_386: (2,1),
pixie.EM_X86_64: (2,2,5),
pixie.EM_ARM: (2,4),
pixie.EM_AARCH64:(2,17),
pixie.EM_PPC64: (2,17),
pixie.EM_RISCV: (2,27)
}
MACHO_ALLOWED_LIBRARIES = {
# garliccoind and garliccoin-qt
'libc++.1.dylib', # C++ Standard Library
'libSystem.B.dylib', # libc, libm, libpthread, libinfo
# garliccoin-qt only
'AppKit', # user interface
'ApplicationServices', # common application tasks.
'Carbon', # deprecated c back-compat API
'CoreFoundation', # low level func, data types
'CoreGraphics', # 2D rendering
'CoreServices', # operating system services
'CoreText', # interface for laying out text and handling fonts.
'CoreVideo', # video processing
'Foundation', # base layer functionality for apps/frameworks
'ImageIO', # read and write image file formats.
'IOKit', # user-space access to hardware devices and drivers.
'IOSurface', # cross process image/drawing buffers
'libobjc.A.dylib', # Objective-C runtime library
'Metal', # 3D graphics
'Security', # access control and authentication
'QuartzCore', # animation
}
PE_ALLOWED_LIBRARIES = {
'ADVAPI32.dll', # security & registry
'IPHLPAPI.DLL', # IP helper API
'KERNEL32.dll', # win32 base APIs
'msvcrt.dll', # C standard library for MSVC
'SHELL32.dll', # shell API
'USER32.dll', # user interface
'WS2_32.dll', # sockets
# garliccoin-qt only
'dwmapi.dll', # desktop window manager
'GDI32.dll', # graphics device interface
'IMM32.dll', # input method editor
'NETAPI32.dll',
'ole32.dll', # component object model
'OLEAUT32.dll', # OLE Automation API
'SHLWAPI.dll', # light weight shell API
'USERENV.dll',
'UxTheme.dll',
'VERSION.dll', # version checking
'WINMM.dll', # WinMM audio API
'WTSAPI32.dll',
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def check_version(max_versions, version, arch) -> bool:
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch]
def check_imported_symbols(filename) -> bool:
elf = pixie.load(filename)
cppfilt = CPPFilt()
ok: bool = True
for symbol in elf.dyn_symbols:
if not symbol.is_import:
continue
sym = symbol.name.decode()
version = symbol.version.decode() if symbol.version is not None else None
if version and not check_version(MAX_VERSIONS, version, elf.hdr.e_machine):
print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version))
ok = False
return ok
def check_exported_symbols(filename) -> bool:
elf = pixie.load(filename)
cppfilt = CPPFilt()
ok: bool = True
for symbol in elf.dyn_symbols:
if not symbol.is_export:
continue
sym = symbol.name.decode()
if elf.hdr.e_machine == pixie.EM_RISCV or sym in IGNORE_EXPORTS:
continue
print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym)))
ok = False
return ok
def check_ELF_libraries(filename) -> bool:
ok: bool = True
elf = pixie.load(filename)
for library_name in elf.query_dyn_tags(pixie.DT_NEEDED):
assert(isinstance(library_name, bytes))
if library_name.decode() not in ELF_ALLOWED_LIBRARIES:
print('{}: NEEDED library {} is not allowed'.format(filename, library_name.decode()))
ok = False
return ok
def check_MACHO_libraries(filename) -> bool:
ok: bool = True
binary = lief.parse(filename)
for dylib in binary.libraries:
split = dylib.name.split('/')
if split[-1] not in MACHO_ALLOWED_LIBRARIES:
print(f'{split[-1]} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
def check_PE_libraries(filename) -> bool:
ok: bool = True
binary = lief.parse(filename)
for dylib in binary.libraries:
if dylib not in PE_ALLOWED_LIBRARIES:
print(f'{dylib} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
CHECKS = {
'ELF': [
('IMPORTED_SYMBOLS', check_imported_symbols),
('EXPORTED_SYMBOLS', check_exported_symbols),
('LIBRARY_DEPENDENCIES', check_ELF_libraries)
],
'MACHO': [
('DYNAMIC_LIBRARIES', check_MACHO_libraries)
],
'PE' : [
('DYNAMIC_LIBRARIES', check_PE_libraries)
]
}
def identify_executable(executable) -> Optional[str]:
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
elif magic.startswith(b'\xcf\xfa'):
return 'MACHO'
return None
if __name__ == '__main__':
retval: int = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print(f'{filename}: unknown format')
retval = 1
continue
failed: List[str] = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print(f'{filename}: failed {' '.join(failed)}')
retval = 1
except IOError:
print(f'{filename}: cannot open')
retval = 1
sys.exit(retval)
| #!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the executables produced by gitian only contain
certain symbols and are only linked against allowed libraries.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
import subprocess
import sys
import os
from typing import List, Optional
import lief
import pixie
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
# - g++ version 4.9.2 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.19 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=libc6)
#
# Ubuntu 16.04 (Xenial) EOL: 2024. https://wiki.ubuntu.com/Releases
#
# - g++ version 5.3.1 (https://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=xenial§ion=all)
# - libc version 2.23.0 (https://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=xenial§ion=all)
#
# CentOS 7 EOL: 2024. https://wiki.centos.org/FAQ/General
#
# - g++ version 4.8.5 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
# - libc version 2.17 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.8.5: GCC_4.8.0
# (glibc) GLIBC_2_17
#
MAX_VERSIONS = {
'GCC': (4,8,0),
'GLIBC': (2,17),
'LIBATOMIC': (1,0)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
'environ', '_environ', '__environ',
}
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
# garliccoind and garliccoin-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
'libpthread.so.0', # threading
'libm.so.6', # math library
'librt.so.1', # real-time (clock)
'libatomic.so.1',
'ld-linux-x86-64.so.2', # 64-bit dynamic linker
'ld-linux.so.2', # 32-bit dynamic linker
'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker
'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker
'ld64.so.1', # POWER64 ABIv1 dynamic linker
'ld64.so.2', # POWER64 ABIv2 dynamic linker
'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker
# garliccoin-qt only
'libxcb.so.1', # part of X11
'libxkbcommon.so.0', # keyboard keymapping
'libxkbcommon-x11.so.0', # keyboard keymapping
'libfontconfig.so.1', # font support
'libfreetype.so.6', # font parsing
'libdl.so.2' # programming interface to dynamic linker
}
ARCH_MIN_GLIBC_VER = {
pixie.EM_386: (2,1),
pixie.EM_X86_64: (2,2,5),
pixie.EM_ARM: (2,4),
pixie.EM_AARCH64:(2,17),
pixie.EM_PPC64: (2,17),
pixie.EM_RISCV: (2,27)
}
MACHO_ALLOWED_LIBRARIES = {
# garliccoind and garliccoin-qt
'libc++.1.dylib', # C++ Standard Library
'libSystem.B.dylib', # libc, libm, libpthread, libinfo
# garliccoin-qt only
'AppKit', # user interface
'ApplicationServices', # common application tasks.
'Carbon', # deprecated c back-compat API
'CoreFoundation', # low level func, data types
'CoreGraphics', # 2D rendering
'CoreServices', # operating system services
'CoreText', # interface for laying out text and handling fonts.
'CoreVideo', # video processing
'Foundation', # base layer functionality for apps/frameworks
'ImageIO', # read and write image file formats.
'IOKit', # user-space access to hardware devices and drivers.
'IOSurface', # cross process image/drawing buffers
'libobjc.A.dylib', # Objective-C runtime library
'Metal', # 3D graphics
'Security', # access control and authentication
'QuartzCore', # animation
}
PE_ALLOWED_LIBRARIES = {
'ADVAPI32.dll', # security & registry
'IPHLPAPI.DLL', # IP helper API
'KERNEL32.dll', # win32 base APIs
'msvcrt.dll', # C standard library for MSVC
'SHELL32.dll', # shell API
'USER32.dll', # user interface
'WS2_32.dll', # sockets
# garliccoin-qt only
'dwmapi.dll', # desktop window manager
'GDI32.dll', # graphics device interface
'IMM32.dll', # input method editor
'NETAPI32.dll',
'ole32.dll', # component object model
'OLEAUT32.dll', # OLE Automation API
'SHLWAPI.dll', # light weight shell API
'USERENV.dll',
'UxTheme.dll',
'VERSION.dll', # version checking
'WINMM.dll', # WinMM audio API
'WTSAPI32.dll',
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def check_version(max_versions, version, arch) -> bool:
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch]
def check_imported_symbols(filename) -> bool:
elf = pixie.load(filename)
cppfilt = CPPFilt()
ok: bool = True
for symbol in elf.dyn_symbols:
if not symbol.is_import:
continue
sym = symbol.name.decode()
version = symbol.version.decode() if symbol.version is not None else None
if version and not check_version(MAX_VERSIONS, version, elf.hdr.e_machine):
print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version))
ok = False
return ok
def check_exported_symbols(filename) -> bool:
elf = pixie.load(filename)
cppfilt = CPPFilt()
ok: bool = True
for symbol in elf.dyn_symbols:
if not symbol.is_export:
continue
sym = symbol.name.decode()
if elf.hdr.e_machine == pixie.EM_RISCV or sym in IGNORE_EXPORTS:
continue
print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym)))
ok = False
return ok
def check_ELF_libraries(filename) -> bool:
ok: bool = True
elf = pixie.load(filename)
for library_name in elf.query_dyn_tags(pixie.DT_NEEDED):
assert(isinstance(library_name, bytes))
if library_name.decode() not in ELF_ALLOWED_LIBRARIES:
print('{}: NEEDED library {} is not allowed'.format(filename, library_name.decode()))
ok = False
return ok
def check_MACHO_libraries(filename) -> bool:
ok: bool = True
binary = lief.parse(filename)
for dylib in binary.libraries:
split = dylib.name.split('/')
if split[-1] not in MACHO_ALLOWED_LIBRARIES:
print(f'{split[-1]} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
def check_PE_libraries(filename) -> bool:
ok: bool = True
binary = lief.parse(filename)
for dylib in binary.libraries:
if dylib not in PE_ALLOWED_LIBRARIES:
print(f'{dylib} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
CHECKS = {
'ELF': [
('IMPORTED_SYMBOLS', check_imported_symbols),
('EXPORTED_SYMBOLS', check_exported_symbols),
('LIBRARY_DEPENDENCIES', check_ELF_libraries)
],
'MACHO': [
('DYNAMIC_LIBRARIES', check_MACHO_libraries)
],
'PE' : [
('DYNAMIC_LIBRARIES', check_PE_libraries)
]
}
def identify_executable(executable) -> Optional[str]:
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
elif magic.startswith(b'\xcf\xfa'):
return 'MACHO'
return None
if __name__ == '__main__':
retval: int = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print(f'{filename}: unknown format')
retval = 1
continue
failed: List[str] = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print(f'{filename}: failed {" ".join(failed)}')
retval = 1
except IOError:
print(f'{filename}: cannot open')
retval = 1
sys.exit(retval)
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Common modules
"""
import logging
import math
import warnings
from copy import copy
from pathlib import Path
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from PIL import Image
from torch.cuda import amp
from utils.datasets import exif_transpose, letterbox
from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, save_one_box, \
scale_coords, xyxy2xywh
from utils.plots import Annotator, colors
from utils.torch_utils import time_sync
LOGGER = logging.getLogger(__name__)
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def forward_fuse(self, x):
return self.act(self.conv(x))
class DWConv(Conv):
# Depth-wise convolution class
def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class TransformerLayer(nn.Module):
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
def __init__(self, c, num_heads):
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
class TransformerBlock(nn.Module):
# Vision Transformer https://arxiv.org/abs/2010.11929
def __init__(self, c1, c2, num_heads, num_layers):
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
self.c2 = c2
def forward(self, x):
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2).unsqueeze(0).transpose(0, 3).squeeze(3)
return self.tr(p + self.linear(p)).unsqueeze(3).transpose(0, 3).reshape(b, self.c2, w, h)
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class C3TR(C3):
# C3 module with TransformerBlock()
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)
class C3SPP(C3):
# C3 module with SPP()
def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = SPP(c_, c_, k)
class C3Ghost(C3):
# C3 module with GhostBottleneck()
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e) # hidden channels
self.m = nn.Sequential(*[GhostBottleneck(c_, c_) for _ in range(n)])
class SPP(nn.Module):
# Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
def __init__(self, c1, c2, k=(5, 9, 13)):
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class SPPF(nn.Module):
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * 4, c2, 1, 1)
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
def forward(self, x):
x = self.cv1(x)
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
y1 = self.m(x)
y2 = self.m(y1)
return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
# self.contract = Contract(gain=2)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
# return self.conv(self.contract(x))
class GhostConv(nn.Module):
# Ghost Convolution https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
super().__init__()
c_ = c2 // 2 # hidden channels
self.cv1 = Conv(c1, c_, k, s, None, g, act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
def forward(self, x):
y = self.cv1(x)
return torch.cat([y, self.cv2(y)], 1)
class GhostBottleneck(nn.Module):
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
super().__init__()
c_ = c2 // 2
self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
def forward(self, x):
return self.conv(x) + self.shortcut(x)
class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super().__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class AutoShape(nn.Module):
# YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class
multi_label = False # NMS multiple labels per box
max_det = 1000 # maximum number of detections per image
def __init__(self, model):
super().__init__()
self.model = model.eval()
def autoshape(self):
LOGGER.info('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self
def _apply(self, fn):
# Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
self = super()._apply(fn)
m = self.model.model[-1] # Detect()
m.stride = fn(m.stride)
m.grid = list(map(fn, m.grid))
if isinstance(m.anchor_grid, list):
m.anchor_grid = list(map(fn, m.anchor_grid))
return self
@torch.no_grad()
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=640, width=1280, RGB images example inputs are:
# file: imgs = 'data/images/zidane.jpg' # str or PosixPath
# URI: = 'https://ultralytics.com/images/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
# numpy: = np.zeros((640,1280,3)) # HWC
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
t = [time_sync()]
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
with amp.autocast(enabled=p.device.type != 'cpu'):
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
f = f'image{i}' # filename
if isinstance(im, (str, Path)): # filename or uri
im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
im = np.asarray(exif_transpose(im))
elif isinstance(im, Image.Image): # PIL Image
im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
files.append(Path(f).with_suffix('.jpg').name)
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
t.append(time_sync())
with amp.autocast(enabled=p.device.type != 'cpu'):
# Inference
y = self.model(x, augment, profile)[0] # forward
t.append(time_sync())
# Post-process
y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes,
multi_label=self.multi_label, max_det=self.max_det) # NMS
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
t.append(time_sync())
return Detections(imgs, y, files, t, self.names, x.shape)
class Detections:
# YOLOv5 detections class for inference results
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super().__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.files = files # image filenames
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred) # number of images (batch size)
self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
self.s = shape # inference BCHW shape
def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
crops = []
for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
if pred.shape[0]:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
s += f"{n} {self.names[int(c)]}{"s" * (n > 1)}, " # add to string
if show or save or render or crop:
annotator = Annotator(im, example=str(self.names))
for *box, conf, cls in reversed(pred): # xyxy, confidence, class
label = f'{self.names[int(cls)]} {conf:.2f}'
if crop:
file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label,
'im': save_one_box(box, im, file=file, save=save)})
else: # all others
annotator.box_label(box, label, color=colors(cls))
im = annotator.im
else:
s += '(no detections)'
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
if pprint:
LOGGER.info(s.rstrip(', '))
if show:
im.show(self.files[i]) # show
if save:
f = self.files[i]
im.save(save_dir / f) # save
if i == self.n - 1:
LOGGER.info(f"Saved {self.n} image{"s" * (self.n > 1)} to {colorstr("bold", save_dir)}")
if render:
self.imgs[i] = np.asarray(im)
if crop:
if save:
LOGGER.info(f'Saved results to {save_dir}\n')
return crops
def print(self):
self.display(pprint=True) # print results
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' %
self.t)
def show(self):
self.display(show=True) # show results
def save(self, save_dir='runs/detect/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir
self.display(save=True, save_dir=save_dir) # save results
def crop(self, save=True, save_dir='runs/detect/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None
return self.display(crop=True, save=save, save_dir=save_dir) # crop results
def render(self):
self.display(render=True) # render results
return self.imgs
def pandas(self):
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
new = copy(self) # return copy
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
return new
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
def __len__(self):
return self.n
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2)
| # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Common modules
"""
import logging
import math
import warnings
from copy import copy
from pathlib import Path
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from PIL import Image
from torch.cuda import amp
from utils.datasets import exif_transpose, letterbox
from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, save_one_box, \
scale_coords, xyxy2xywh
from utils.plots import Annotator, colors
from utils.torch_utils import time_sync
LOGGER = logging.getLogger(__name__)
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def forward_fuse(self, x):
return self.act(self.conv(x))
class DWConv(Conv):
# Depth-wise convolution class
def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class TransformerLayer(nn.Module):
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
def __init__(self, c, num_heads):
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
class TransformerBlock(nn.Module):
# Vision Transformer https://arxiv.org/abs/2010.11929
def __init__(self, c1, c2, num_heads, num_layers):
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
self.c2 = c2
def forward(self, x):
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2).unsqueeze(0).transpose(0, 3).squeeze(3)
return self.tr(p + self.linear(p)).unsqueeze(3).transpose(0, 3).reshape(b, self.c2, w, h)
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class C3TR(C3):
# C3 module with TransformerBlock()
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)
class C3SPP(C3):
# C3 module with SPP()
def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = SPP(c_, c_, k)
class C3Ghost(C3):
# C3 module with GhostBottleneck()
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e) # hidden channels
self.m = nn.Sequential(*[GhostBottleneck(c_, c_) for _ in range(n)])
class SPP(nn.Module):
# Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
def __init__(self, c1, c2, k=(5, 9, 13)):
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class SPPF(nn.Module):
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * 4, c2, 1, 1)
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
def forward(self, x):
x = self.cv1(x)
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
y1 = self.m(x)
y2 = self.m(y1)
return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
# self.contract = Contract(gain=2)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
# return self.conv(self.contract(x))
class GhostConv(nn.Module):
# Ghost Convolution https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
super().__init__()
c_ = c2 // 2 # hidden channels
self.cv1 = Conv(c1, c_, k, s, None, g, act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
def forward(self, x):
y = self.cv1(x)
return torch.cat([y, self.cv2(y)], 1)
class GhostBottleneck(nn.Module):
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
super().__init__()
c_ = c2 // 2
self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
def forward(self, x):
return self.conv(x) + self.shortcut(x)
class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super().__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class AutoShape(nn.Module):
# YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class
multi_label = False # NMS multiple labels per box
max_det = 1000 # maximum number of detections per image
def __init__(self, model):
super().__init__()
self.model = model.eval()
def autoshape(self):
LOGGER.info('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self
def _apply(self, fn):
# Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
self = super()._apply(fn)
m = self.model.model[-1] # Detect()
m.stride = fn(m.stride)
m.grid = list(map(fn, m.grid))
if isinstance(m.anchor_grid, list):
m.anchor_grid = list(map(fn, m.anchor_grid))
return self
@torch.no_grad()
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=640, width=1280, RGB images example inputs are:
# file: imgs = 'data/images/zidane.jpg' # str or PosixPath
# URI: = 'https://ultralytics.com/images/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
# numpy: = np.zeros((640,1280,3)) # HWC
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
t = [time_sync()]
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
with amp.autocast(enabled=p.device.type != 'cpu'):
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
f = f'image{i}' # filename
if isinstance(im, (str, Path)): # filename or uri
im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
im = np.asarray(exif_transpose(im))
elif isinstance(im, Image.Image): # PIL Image
im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
files.append(Path(f).with_suffix('.jpg').name)
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
t.append(time_sync())
with amp.autocast(enabled=p.device.type != 'cpu'):
# Inference
y = self.model(x, augment, profile)[0] # forward
t.append(time_sync())
# Post-process
y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes,
multi_label=self.multi_label, max_det=self.max_det) # NMS
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
t.append(time_sync())
return Detections(imgs, y, files, t, self.names, x.shape)
class Detections:
# YOLOv5 detections class for inference results
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super().__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.files = files # image filenames
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred) # number of images (batch size)
self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
self.s = shape # inference BCHW shape
def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
crops = []
for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
if pred.shape[0]:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
if show or save or render or crop:
annotator = Annotator(im, example=str(self.names))
for *box, conf, cls in reversed(pred): # xyxy, confidence, class
label = f'{self.names[int(cls)]} {conf:.2f}'
if crop:
file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label,
'im': save_one_box(box, im, file=file, save=save)})
else: # all others
annotator.box_label(box, label, color=colors(cls))
im = annotator.im
else:
s += '(no detections)'
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
if pprint:
LOGGER.info(s.rstrip(', '))
if show:
im.show(self.files[i]) # show
if save:
f = self.files[i]
im.save(save_dir / f) # save
if i == self.n - 1:
LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
if render:
self.imgs[i] = np.asarray(im)
if crop:
if save:
LOGGER.info(f'Saved results to {save_dir}\n')
return crops
def print(self):
self.display(pprint=True) # print results
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' %
self.t)
def show(self):
self.display(show=True) # show results
def save(self, save_dir='runs/detect/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir
self.display(save=True, save_dir=save_dir) # save results
def crop(self, save=True, save_dir='runs/detect/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None
return self.display(crop=True, save=save, save_dir=save_dir) # crop results
def render(self):
self.display(render=True) # render results
return self.imgs
def pandas(self):
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
new = copy(self) # return copy
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
return new
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
def __len__(self):
return self.n
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2)
|
#!/usr/bin/env python
from __future__ import print_function
import utils
from accesslink import AccessLink
from datetime import datetime
try:
input = raw_input
except NameError:
pass
CONFIG_FILENAME = 'config.yml'
class PolarAccessLinkExample(object):
"""Example application for Polar Open AccessLink v3."""
def __init__(self):
self.config = utils.load_config(CONFIG_FILENAME)
if 'access_token' not in self.config:
print('Authorization is required. Run authorization.py first.')
return
self.accesslink = AccessLink(client_id=self.config['client_id'],
client_secret=self.config['client_secret'])
self.running = True
self.show_menu()
def show_menu(self):
while self.running:
print('\nChoose an option:\n' +
'-----------------------\n' +
' 1 => Get data\n' +
' 2 => Revoke access token\n' +
'-1 => Exit\n' +
'-----------------------')
self.get_menu_choice()
def get_menu_choice(self):
choice = input('> ')
{
'1': self.get_all_data,
# '1': self.get_user_information,
# '2': self.check_available_data,
'2': self.revoke_access_token,
'-1': self.exit
}.get(choice, self.get_menu_choice)()
def get_all_data(self):
self.get_user_information()
self.check_available_data()
def get_user_information(self):
user_info = self.accesslink.users.get_information(user_id=self.config['user_id'],
access_token=self.config['access_token'])
print('==========\tUSER INFORMATION\t==========')
utils.pretty_print_json(user_info)
utils.save_json_to_file(user_info, f'user_data/user_data_{datetime.today().strftime('%Y-%m-%d')}.json')
def check_available_data(self):
available_data = self.accesslink.pull_notifications.list()
print('==========\tDATA\t==========')
if not available_data:
print('No new data available.')
return
print('Available data:')
utils.pretty_print_json(available_data)
for item in available_data['available-user-data']:
if item['data-type'] == 'EXERCISE':
self.get_exercises()
elif item['data-type'] == 'ACTIVITY_SUMMARY':
self.get_daily_activity()
elif item['data-type'] == 'PHYSICAL_INFORMATION':
self.get_physical_info()
def revoke_access_token(self):
self.accesslink.users.delete(user_id=self.config['user_id'],
access_token=self.config['access_token'])
del self.config['access_token']
del self.config['user_id']
utils.save_config(self.config, CONFIG_FILENAME)
print('Access token was successfully revoked.')
self.exit()
def exit(self):
self.running = False
def get_exercises(self):
transaction = self.accesslink.training_data.create_transaction(user_id=self.config['user_id'],
access_token=self.config['access_token'])
if not transaction:
print('No new exercises available.')
return
resource_urls = transaction.list_exercises()['exercises']
for url in resource_urls:
exercise_summary = transaction.get_exercise_summary(url)
gpx_data = transaction.get_gpx(url)
tcx_data = transaction.get_tcx(url)
hr_data = transaction.get_heart_rate_zones(url)
samples_data = transaction.get_available_samples(url)
sample_data = transaction.get_samples(url)
print('Exercise summary:')
utils.pretty_print_json(exercise_summary)
time = utils.polar_datetime_to_python_datetime_str(str(exercise_summary['start-time']))
utils.save_json_to_file(exercise_summary, f'exercises_data/summary_data_{time}.json')
if gpx_data: # not empty dict. If there is no data, this variable will have '{}' value
utils.save_json_to_file(utils.xml_to_dict(gpx_data), f'exercises_data/gpx_data_{time}.json')
if tcx_data:
utils.save_json_to_file(utils.xml_to_dict(tcx_data), f'exercises_data/tcx_data_{time}.json')
if hr_data:
utils.save_json_to_file(hr_data, f'exercises_data/hr_data_{time}.json')
if samples_data:
utils.save_json_to_file(samples_data, f'exercises_data/samples_data_{time}.json')
if sample_data:
utils.save_json_to_file(sample_data, f'exercises_data/sample_data_{time}.json')
transaction.commit()
def get_daily_activity(self):
transaction = self.accesslink.daily_activity.create_transaction(user_id=self.config['user_id'],
access_token=self.config['access_token'])
if not transaction:
print('No new daily activity available.')
return
resource_urls = transaction.list_activities()['activity-log']
for url in resource_urls:
activity_summary = transaction.get_activity_summary(url)
print('Activity summary:')
utils.pretty_print_json(activity_summary)
utils.save_json_to_file(activity_summary, f'daily_activity_data/daily_activity_data_{str(activity_summary['date'])}.json')
transaction.commit()
def get_physical_info(self):
transaction = self.accesslink.physical_info.create_transaction(user_id=self.config['user_id'],
access_token=self.config['access_token'])
if not transaction:
print('No new physical information available.')
return
resource_urls = transaction.list_physical_infos()['physical-informations']
for url in resource_urls:
physical_info = transaction.get_physical_info(url)
print('Physical info:')
utils.pretty_print_json(physical_info)
time = utils.polar_datetime_to_python_datetime_str(str(physical_info['created']))
utils.save_json_to_file(physical_info, f'physical_data/physical_data{time}.json')
transaction.commit()
if __name__ == '__main__':
PolarAccessLinkExample()
| #!/usr/bin/env python
from __future__ import print_function
import utils
from accesslink import AccessLink
from datetime import datetime
try:
input = raw_input
except NameError:
pass
CONFIG_FILENAME = 'config.yml'
class PolarAccessLinkExample(object):
"""Example application for Polar Open AccessLink v3."""
def __init__(self):
self.config = utils.load_config(CONFIG_FILENAME)
if 'access_token' not in self.config:
print('Authorization is required. Run authorization.py first.')
return
self.accesslink = AccessLink(client_id=self.config['client_id'],
client_secret=self.config['client_secret'])
self.running = True
self.show_menu()
def show_menu(self):
while self.running:
print('\nChoose an option:\n' +
'-----------------------\n' +
' 1 => Get data\n' +
' 2 => Revoke access token\n' +
'-1 => Exit\n' +
'-----------------------')
self.get_menu_choice()
def get_menu_choice(self):
choice = input('> ')
{
'1': self.get_all_data,
# '1': self.get_user_information,
# '2': self.check_available_data,
'2': self.revoke_access_token,
'-1': self.exit
}.get(choice, self.get_menu_choice)()
def get_all_data(self):
self.get_user_information()
self.check_available_data()
def get_user_information(self):
user_info = self.accesslink.users.get_information(user_id=self.config['user_id'],
access_token=self.config['access_token'])
print('==========\tUSER INFORMATION\t==========')
utils.pretty_print_json(user_info)
utils.save_json_to_file(user_info, f'user_data/user_data_{datetime.today().strftime("%Y-%m-%d")}.json')
def check_available_data(self):
available_data = self.accesslink.pull_notifications.list()
print('==========\tDATA\t==========')
if not available_data:
print('No new data available.')
return
print('Available data:')
utils.pretty_print_json(available_data)
for item in available_data['available-user-data']:
if item['data-type'] == 'EXERCISE':
self.get_exercises()
elif item['data-type'] == 'ACTIVITY_SUMMARY':
self.get_daily_activity()
elif item['data-type'] == 'PHYSICAL_INFORMATION':
self.get_physical_info()
def revoke_access_token(self):
self.accesslink.users.delete(user_id=self.config['user_id'],
access_token=self.config['access_token'])
del self.config['access_token']
del self.config['user_id']
utils.save_config(self.config, CONFIG_FILENAME)
print('Access token was successfully revoked.')
self.exit()
def exit(self):
self.running = False
def get_exercises(self):
transaction = self.accesslink.training_data.create_transaction(user_id=self.config['user_id'],
access_token=self.config['access_token'])
if not transaction:
print('No new exercises available.')
return
resource_urls = transaction.list_exercises()['exercises']
for url in resource_urls:
exercise_summary = transaction.get_exercise_summary(url)
gpx_data = transaction.get_gpx(url)
tcx_data = transaction.get_tcx(url)
hr_data = transaction.get_heart_rate_zones(url)
samples_data = transaction.get_available_samples(url)
sample_data = transaction.get_samples(url)
print('Exercise summary:')
utils.pretty_print_json(exercise_summary)
time = utils.polar_datetime_to_python_datetime_str(str(exercise_summary['start-time']))
utils.save_json_to_file(exercise_summary, f'exercises_data/summary_data_{time}.json')
if gpx_data: # not empty dict. If there is no data, this variable will have '{}' value
utils.save_json_to_file(utils.xml_to_dict(gpx_data), f'exercises_data/gpx_data_{time}.json')
if tcx_data:
utils.save_json_to_file(utils.xml_to_dict(tcx_data), f'exercises_data/tcx_data_{time}.json')
if hr_data:
utils.save_json_to_file(hr_data, f'exercises_data/hr_data_{time}.json')
if samples_data:
utils.save_json_to_file(samples_data, f'exercises_data/samples_data_{time}.json')
if sample_data:
utils.save_json_to_file(sample_data, f'exercises_data/sample_data_{time}.json')
transaction.commit()
def get_daily_activity(self):
transaction = self.accesslink.daily_activity.create_transaction(user_id=self.config['user_id'],
access_token=self.config['access_token'])
if not transaction:
print('No new daily activity available.')
return
resource_urls = transaction.list_activities()['activity-log']
for url in resource_urls:
activity_summary = transaction.get_activity_summary(url)
print('Activity summary:')
utils.pretty_print_json(activity_summary)
utils.save_json_to_file(activity_summary, f'daily_activity_data/daily_activity_data_{str(activity_summary["date"])}.json')
transaction.commit()
def get_physical_info(self):
transaction = self.accesslink.physical_info.create_transaction(user_id=self.config['user_id'],
access_token=self.config['access_token'])
if not transaction:
print('No new physical information available.')
return
resource_urls = transaction.list_physical_infos()['physical-informations']
for url in resource_urls:
physical_info = transaction.get_physical_info(url)
print('Physical info:')
utils.pretty_print_json(physical_info)
time = utils.polar_datetime_to_python_datetime_str(str(physical_info['created']))
utils.save_json_to_file(physical_info, f'physical_data/physical_data{time}.json')
transaction.commit()
if __name__ == '__main__':
PolarAccessLinkExample()
|
# Copyright 2021 EMQ Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import traceback
from . import reg
from .connection import PairChannel
from .contextimpl import ContextImpl
from .symbol import SymbolRuntime
from ..function import Function
class FunctionRuntime(SymbolRuntime):
def __init__(self, ctrl: dict, s: Function):
ch = PairChannel(ctrl['symbolName'], 1)
self.s = s
self.ch = ch
self.running = False
self.key = "func_{}".format(ctrl['symbolName'])
self.funcs = {}
def run(self):
self.running = True
reg.setr(self.key, self)
# noinspection PyBroadException
try:
self.ch.run(self.do_run)
except Exception:
if self.running:
logging.error(traceback.format_exc())
finally:
self.stop()
def do_run(self, req: bytes):
# noinspection PyBroadException
try:
c = json.loads(req)
logging.debug("running func with ", c)
name = c['func']
if name == "Validate":
err = self.s.validate(c['arg'])
if err != "":
return encode_reply(False, err)
else:
return encode_reply(True, "")
elif name == "Exec":
args = c['arg']
if isinstance(args, list) is False or len(args) < 1:
return encode_reply(False, 'invalid arg')
fmeta = json.loads(args[-1])
if 'ruleId' in fmeta and 'opId' in fmeta and 'instanceId' in fmeta \
and 'funcId' in fmeta:
key = f"{fmeta["ruleId"]}_{fmeta["opId"]}_{fmeta["instanceId"]}" \
f"_{fmeta["funcId"]}"
if key in self.funcs:
fctx = self.funcs[key]
else:
fctx = ContextImpl(fmeta)
self.funcs[key] = fctx
else:
return encode_reply(False,
f'invalid arg: {fmeta} ruleId, opId, instanceId and funcId'
f' are required')
r = self.s.exec(args[:-1], fctx)
return encode_reply(True, r)
elif name == "IsAggregate":
r = self.s.is_aggregate()
return encode_reply(True, r)
else:
return encode_reply(False, "invalid func {}".format(name))
except Exception:
"""two occasions: normal stop will close socket to raise an error
OR stopped by unexpected error"""
if self.running:
logging.error(traceback.format_exc())
return encode_reply(False, traceback.format_exc())
def stop(self):
self.running = False
# noinspection PyBroadException
try:
self.ch.close()
reg.delete(self.key)
except Exception:
logging.error(traceback.format_exc())
def is_running(self) -> bool:
return self.running
def encode_reply(state: bool, arg: any):
try:
return str.encode(json.dumps({'state': state, 'result': arg}))
except Exception:
return str.encode(json.dumps({'state': False, 'result': traceback.format_exc()}))
| # Copyright 2021 EMQ Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import traceback
from . import reg
from .connection import PairChannel
from .contextimpl import ContextImpl
from .symbol import SymbolRuntime
from ..function import Function
class FunctionRuntime(SymbolRuntime):
def __init__(self, ctrl: dict, s: Function):
ch = PairChannel(ctrl['symbolName'], 1)
self.s = s
self.ch = ch
self.running = False
self.key = "func_{}".format(ctrl['symbolName'])
self.funcs = {}
def run(self):
self.running = True
reg.setr(self.key, self)
# noinspection PyBroadException
try:
self.ch.run(self.do_run)
except Exception:
if self.running:
logging.error(traceback.format_exc())
finally:
self.stop()
def do_run(self, req: bytes):
# noinspection PyBroadException
try:
c = json.loads(req)
logging.debug("running func with ", c)
name = c['func']
if name == "Validate":
err = self.s.validate(c['arg'])
if err != "":
return encode_reply(False, err)
else:
return encode_reply(True, "")
elif name == "Exec":
args = c['arg']
if isinstance(args, list) is False or len(args) < 1:
return encode_reply(False, 'invalid arg')
fmeta = json.loads(args[-1])
if 'ruleId' in fmeta and 'opId' in fmeta and 'instanceId' in fmeta \
and 'funcId' in fmeta:
key = f"{fmeta['ruleId']}_{fmeta['opId']}_{fmeta['instanceId']}" \
f"_{fmeta['funcId']}"
if key in self.funcs:
fctx = self.funcs[key]
else:
fctx = ContextImpl(fmeta)
self.funcs[key] = fctx
else:
return encode_reply(False,
f'invalid arg: {fmeta} ruleId, opId, instanceId and funcId'
f' are required')
r = self.s.exec(args[:-1], fctx)
return encode_reply(True, r)
elif name == "IsAggregate":
r = self.s.is_aggregate()
return encode_reply(True, r)
else:
return encode_reply(False, "invalid func {}".format(name))
except Exception:
"""two occasions: normal stop will close socket to raise an error
OR stopped by unexpected error"""
if self.running:
logging.error(traceback.format_exc())
return encode_reply(False, traceback.format_exc())
def stop(self):
self.running = False
# noinspection PyBroadException
try:
self.ch.close()
reg.delete(self.key)
except Exception:
logging.error(traceback.format_exc())
def is_running(self) -> bool:
return self.running
def encode_reply(state: bool, arg: any):
try:
return str.encode(json.dumps({'state': state, 'result': arg}))
except Exception:
return str.encode(json.dumps({'state': False, 'result': traceback.format_exc()}))
|
import subprocess
import click
import scphylo as scp
from scphylo.ul._servers import cmd, write_cmds_get_main
@click.command(short_help="Run MuTect2.")
@click.argument(
"outdir",
required=True,
type=click.Path(
exists=True, file_okay=False, dir_okay=True, readable=True, resolve_path=True
),
)
@click.argument(
"normal",
required=True,
type=str,
)
@click.argument(
"ref",
required=True,
type=click.Choice(scp.settings.refs),
)
@click.option(
"--time",
default="0-10:00:00",
type=str,
show_default=True,
help="Time.",
)
@click.option(
"--mem",
default="50",
type=str,
show_default=True,
help="Memory.",
)
@click.option(
"--afterok",
default=None,
type=str,
show_default=True,
help="Afterok.",
)
def mutect2(outdir, normal, ref, time, mem, afterok):
"""Run MuTect2.
scphylo mutect2 /path/to/in/dir normal_name hg19|hg38|mm10
BAM files (*.markdup_bqsr.bam) --> VCF files (*.mutect2.vcf)
"""
if ref == "hg19":
config = scp.settings.hg19
elif ref == "mm10":
config = scp.settings.mm10
elif ref == "hg38":
config = scp.settings.hg38
def get_command(sample):
cmds = ""
cmds += cmd([f"module load {scp.settings.tools["gatk"]}"])
cmds += cmd(
[
f'gatk --java-options "-Xmx{int(mem)-10}g"',
"Mutect2",
f"--reference {config["ref"]}",
f"--input {outdir}/{sample}.markdup_bqsr.bam",
f"--input {outdir}/{normal}.markdup_bqsr.bam",
f"--normal-sample {normal}",
f"--output {outdir}/{sample}.mutect2.vcf",
]
)
cmds += cmd(
[
"rm -rf",
f"{outdir}/{sample}.mutect2.vcf.stats",
f"{outdir}/{sample}.mutect2.vcf.idx",
]
)
cmds += cmd(["echo Done!"], islast=True)
return cmds
df_cmds = scp.ul.get_samples_df(outdir, normal)
df_cmds["cmd"] = df_cmds.apply(lambda x: get_command(x["sample"]), axis=1)
cmdmain = write_cmds_get_main(
df_cmds,
"mutect2",
time,
mem,
None,
1,
scp.settings.tools["email"],
f"{outdir}/_tmp",
afterok,
)
code = subprocess.getoutput(cmdmain)
scp.logg.info(code)
return None
| import subprocess
import click
import scphylo as scp
from scphylo.ul._servers import cmd, write_cmds_get_main
@click.command(short_help="Run MuTect2.")
@click.argument(
"outdir",
required=True,
type=click.Path(
exists=True, file_okay=False, dir_okay=True, readable=True, resolve_path=True
),
)
@click.argument(
"normal",
required=True,
type=str,
)
@click.argument(
"ref",
required=True,
type=click.Choice(scp.settings.refs),
)
@click.option(
"--time",
default="0-10:00:00",
type=str,
show_default=True,
help="Time.",
)
@click.option(
"--mem",
default="50",
type=str,
show_default=True,
help="Memory.",
)
@click.option(
"--afterok",
default=None,
type=str,
show_default=True,
help="Afterok.",
)
def mutect2(outdir, normal, ref, time, mem, afterok):
"""Run MuTect2.
scphylo mutect2 /path/to/in/dir normal_name hg19|hg38|mm10
BAM files (*.markdup_bqsr.bam) --> VCF files (*.mutect2.vcf)
"""
if ref == "hg19":
config = scp.settings.hg19
elif ref == "mm10":
config = scp.settings.mm10
elif ref == "hg38":
config = scp.settings.hg38
def get_command(sample):
cmds = ""
cmds += cmd([f"module load {scp.settings.tools['gatk']}"])
cmds += cmd(
[
f'gatk --java-options "-Xmx{int(mem)-10}g"',
"Mutect2",
f"--reference {config['ref']}",
f"--input {outdir}/{sample}.markdup_bqsr.bam",
f"--input {outdir}/{normal}.markdup_bqsr.bam",
f"--normal-sample {normal}",
f"--output {outdir}/{sample}.mutect2.vcf",
]
)
cmds += cmd(
[
"rm -rf",
f"{outdir}/{sample}.mutect2.vcf.stats",
f"{outdir}/{sample}.mutect2.vcf.idx",
]
)
cmds += cmd(["echo Done!"], islast=True)
return cmds
df_cmds = scp.ul.get_samples_df(outdir, normal)
df_cmds["cmd"] = df_cmds.apply(lambda x: get_command(x["sample"]), axis=1)
cmdmain = write_cmds_get_main(
df_cmds,
"mutect2",
time,
mem,
None,
1,
scp.settings.tools["email"],
f"{outdir}/_tmp",
afterok,
)
code = subprocess.getoutput(cmdmain)
scp.logg.info(code)
return None
|
#!/usr/bin/env python
import csv
import datetime
from argparse import Namespace
from pathlib import Path
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import RunningAverage
from ignite.handlers import EarlyStopping, ModelCheckpoint, Timer
from ignite.contrib.handlers import ProgressBar
from .model import ModelContainer
from .dataset import DataContainer
class IgniteTrainer(object):
def __init__(self, mc: ModelContainer, dc: DataContainer, consts: Namespace, pbar:
ProgressBar, metrics: dict={}) -> None:
# retreive required constants from consts
self.model_dir = consts.model_dir
self.metrics_file = consts.metric_file
self.patience = consts.early_stopping_criteria
self.n_epochs = consts.num_epochs
self.device = consts.device
self.prefix = consts.checkpointer_prefix
self.model_name = consts.checkpointer_name
self.save_interval = consts.save_every
self.n_saved = consts.save_total
# get model and data details
self.model = mc.model
self.optimizer = mc.optimizer
self.scheduler = mc.scheduler
self.loss_fn = mc.loss_fn
self.train_dl = dc.train_dl
self.val_dl = dc.val_dl
# create trainers and evaluators
self.trainer = create_supervised_trainer(self.model, self.optimizer, self.loss_fn,
device=self.device)
self.train_eval = create_supervised_evaluator(self.model, metrics=metrics, device=self.device)
self.val_eval = create_supervised_evaluator(self.model, metrics=metrics, device=self.device)
# set loss to be shown in progress bar
self.pbar = pbar
RunningAverage(output_transform=lambda x: x).attach(self.trainer, 'loss')
self.pbar.attach(self.trainer, ['loss'])
# setup timers
self.epoch_timer = Timer(average=True)
self.epoch_timer.attach(self.trainer, start=Events.EPOCH_COMPLETED, resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)
self.training_timer = Timer()
self.training_timer.attach(self.trainer)
# setup early stopping and checkpointer
early_stopping = EarlyStopping(patience=self.patience, score_function=self.score_fn,
trainer=self.trainer)
checkpointer = ModelCheckpoint(self.model_dir, self.prefix, require_empty=False,
save_interval=self.save_interval, n_saved=self.n_saved, save_as_state_dict=True)
# add all the event handlers
self.trainer.add_event_handler(Events.STARTED, self._open_csv)
self.trainer.add_event_handler(Events.EPOCH_COMPLETED, self._log_epoch)
self.trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {self.model_name:
self.model})
self.trainer.add_event_handler(Events.COMPLETED, self._close_csv)
# self.trainer.add_event_handler(Events.ITERATION_COMPLETED, self._log_training_loss)
self.val_eval.add_event_handler(Events.COMPLETED, early_stopping)
self.val_eval.add_event_handler(Events.COMPLETED, self._scheduler_step)
def _open_csv(self, engine):
self.fp = open(self.metrics_file, 'w')
self.writer = csv.writer(self.fp)
row = ['epoch', 'training_loss', 'training_acc', 'validation_loss', 'validation_acc']
self.writer.writerow(row)
def _scheduler_step(self, engine):
self.scheduler.step(engine.state.metrics['loss'])
def _log_training_loss(self, engine):
iteration = (engine.state.iteration-1) % len(self.train_dl) + 1
if iteration % 100 == 0:
self.pbar.log_message(f"ITERATION - loss: {engine.state.output:0.4f}")
def _log_epoch(self, engine):
self.epoch_timer.reset()
self.train_eval.run(self.train_dl)
self.val_eval.run(self.val_dl)
epoch = engine.state.epoch
train_metric = self.train_eval.state.metrics
valid_metric = self.val_eval.state.metrics
train_loss = f"{self.train_eval.state.metrics["loss"]:0.3f}"
train_acc = f"{self.train_eval.state.metrics["accuracy"]:0.3f}"
valid_loss = f"{self.val_eval.state.metrics["loss"]:0.3f}"
valid_acc = f"{self.val_eval.state.metrics["accuracy"]:0.3f}"
self.pbar.log_message(f"Epoch: {epoch}")
self.pbar.log_message(f"Training - Loss: {train_loss}, Accuracy: {train_acc}")
self.pbar.log_message(f"Validation - Loss: {valid_loss}, Accuracy: {valid_acc}")
self.pbar.log_message(f"Time per batch {self.epoch_timer.value():0.3f}[s]")
row = [epoch, f"{train_loss}", f"{train_acc}", f"{valid_loss}", f"{valid_acc}"]
self.writer.writerow(row)
def _close_csv(self, engine):
train_time = str(datetime.timedelta(seconds=self.training_timer.value()))
self.pbar.log_message(f"Training Done. Total training time: {train_time}")
self.fp.write(f"{train_time}\n")
self.fp.close()
def run(self):
self.trainer.run(self.train_dl, self.n_epochs)
@staticmethod
def score_fn(engine):
valid_loss = engine.state.metrics['loss']
return -valid_loss
| #!/usr/bin/env python
import csv
import datetime
from argparse import Namespace
from pathlib import Path
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import RunningAverage
from ignite.handlers import EarlyStopping, ModelCheckpoint, Timer
from ignite.contrib.handlers import ProgressBar
from .model import ModelContainer
from .dataset import DataContainer
class IgniteTrainer(object):
def __init__(self, mc: ModelContainer, dc: DataContainer, consts: Namespace, pbar:
ProgressBar, metrics: dict={}) -> None:
# retreive required constants from consts
self.model_dir = consts.model_dir
self.metrics_file = consts.metric_file
self.patience = consts.early_stopping_criteria
self.n_epochs = consts.num_epochs
self.device = consts.device
self.prefix = consts.checkpointer_prefix
self.model_name = consts.checkpointer_name
self.save_interval = consts.save_every
self.n_saved = consts.save_total
# get model and data details
self.model = mc.model
self.optimizer = mc.optimizer
self.scheduler = mc.scheduler
self.loss_fn = mc.loss_fn
self.train_dl = dc.train_dl
self.val_dl = dc.val_dl
# create trainers and evaluators
self.trainer = create_supervised_trainer(self.model, self.optimizer, self.loss_fn,
device=self.device)
self.train_eval = create_supervised_evaluator(self.model, metrics=metrics, device=self.device)
self.val_eval = create_supervised_evaluator(self.model, metrics=metrics, device=self.device)
# set loss to be shown in progress bar
self.pbar = pbar
RunningAverage(output_transform=lambda x: x).attach(self.trainer, 'loss')
self.pbar.attach(self.trainer, ['loss'])
# setup timers
self.epoch_timer = Timer(average=True)
self.epoch_timer.attach(self.trainer, start=Events.EPOCH_COMPLETED, resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)
self.training_timer = Timer()
self.training_timer.attach(self.trainer)
# setup early stopping and checkpointer
early_stopping = EarlyStopping(patience=self.patience, score_function=self.score_fn,
trainer=self.trainer)
checkpointer = ModelCheckpoint(self.model_dir, self.prefix, require_empty=False,
save_interval=self.save_interval, n_saved=self.n_saved, save_as_state_dict=True)
# add all the event handlers
self.trainer.add_event_handler(Events.STARTED, self._open_csv)
self.trainer.add_event_handler(Events.EPOCH_COMPLETED, self._log_epoch)
self.trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {self.model_name:
self.model})
self.trainer.add_event_handler(Events.COMPLETED, self._close_csv)
# self.trainer.add_event_handler(Events.ITERATION_COMPLETED, self._log_training_loss)
self.val_eval.add_event_handler(Events.COMPLETED, early_stopping)
self.val_eval.add_event_handler(Events.COMPLETED, self._scheduler_step)
def _open_csv(self, engine):
self.fp = open(self.metrics_file, 'w')
self.writer = csv.writer(self.fp)
row = ['epoch', 'training_loss', 'training_acc', 'validation_loss', 'validation_acc']
self.writer.writerow(row)
def _scheduler_step(self, engine):
self.scheduler.step(engine.state.metrics['loss'])
def _log_training_loss(self, engine):
iteration = (engine.state.iteration-1) % len(self.train_dl) + 1
if iteration % 100 == 0:
self.pbar.log_message(f"ITERATION - loss: {engine.state.output:0.4f}")
def _log_epoch(self, engine):
self.epoch_timer.reset()
self.train_eval.run(self.train_dl)
self.val_eval.run(self.val_dl)
epoch = engine.state.epoch
train_metric = self.train_eval.state.metrics
valid_metric = self.val_eval.state.metrics
train_loss = f"{self.train_eval.state.metrics['loss']:0.3f}"
train_acc = f"{self.train_eval.state.metrics['accuracy']:0.3f}"
valid_loss = f"{self.val_eval.state.metrics['loss']:0.3f}"
valid_acc = f"{self.val_eval.state.metrics['accuracy']:0.3f}"
self.pbar.log_message(f"Epoch: {epoch}")
self.pbar.log_message(f"Training - Loss: {train_loss}, Accuracy: {train_acc}")
self.pbar.log_message(f"Validation - Loss: {valid_loss}, Accuracy: {valid_acc}")
self.pbar.log_message(f"Time per batch {self.epoch_timer.value():0.3f}[s]")
row = [epoch, f"{train_loss}", f"{train_acc}", f"{valid_loss}", f"{valid_acc}"]
self.writer.writerow(row)
def _close_csv(self, engine):
train_time = str(datetime.timedelta(seconds=self.training_timer.value()))
self.pbar.log_message(f"Training Done. Total training time: {train_time}")
self.fp.write(f"{train_time}\n")
self.fp.close()
def run(self):
self.trainer.run(self.train_dl, self.n_epochs)
@staticmethod
def score_fn(engine):
valid_loss = engine.state.metrics['loss']
return -valid_loss
|
#! /usr/bin/python3
import logging
import multiprocessing
from concurrent.futures.thread import ThreadPoolExecutor
from multiprocessing import Process
import statsd
import Adafruit_DHT
import time
import boto3
import sys
import subprocess
import os
from timeit import default_timer as timer
from threading import Thread
from threading import Lock
from queue import Queue
from dotenv import load_dotenv
load_dotenv()
DHT_PIN = 4
STATSD_ENDPOINT = os.environ['statsd_url']
statsd = statsd.StatsClient(STATSD_ENDPOINT, 8125, prefix='totomz.homelab')
skip_ipmi = dict()
q = Queue()
HOSTS = {
'zione': {'ipmi': False},
'ziobob': {'ipmi': '192.168.10.30', 'lock': Lock()},
'ziocharlie': {'ipmi': '192.168.10.31', 'lock': Lock()},
}
vgpulock = Lock()
sensorlock = Lock()
def str2float(string, default=0.0):
res = default
try:
res = float(string)
except Exception:
res = default
return res
def collect_sensor():
log = multiprocessing.get_logger()
log.info(" --> Collecting temperature and humidity")
global q
lock = sensorlock.acquire(blocking=False)
if lock is False:
log.info(f" --> Collecting sensors :: still being queried....skipping")
return
try:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, DHT_PIN)
# humidity, temperature = 0, 1
finally:
sensorlock.release()
results = dict()
results['rack.humidity'] = humidity
results['rack.temperature'] = temperature
log.info(f" --> Temperature: {temperature} Humidity: {humidity}")
if len(results) > 0:
q.put(results)
def collect_ipmi():
log = multiprocessing.get_logger()
global q
results = dict()
log.info(" --> Collecting ipmi")
def ipmi_poll(hostname):
if skip_ipmi.get(hostname, 0) > 0:
print(f"Host {hostname} is in the skipped list")
skip_ipmi[hostname] = skip_ipmi.get(hostname, 0) - 1
return results
lock = HOSTS[hostname]['lock'].acquire(blocking=False)
if lock is False:
log.info(f" --> Collecting ipmi :: {hostname} still being queried....skipping")
return
try:
log.info(f" --> Collecting ipmi :: {hostname} querying")
out = subprocess.check_output("ipmitool -P root -U root -H {ip} sensor".format(ip=HOSTS[hostname]['ipmi']),
stderr=subprocess.STDOUT,
shell=True)
stdout = str(out.decode('utf-8'))
log.info(f" --> Collecting ipmi :: {hostname} got readings")
metrics = stdout.split("\n")
for line in metrics:
metric_line = line.lower()
if "temp" not in metric_line:
continue
p = metric_line.split("|")
metric_name = f"host.{hostname}.{str.lower(str.strip(str.strip(p[0]))).replace(" ", "_")}"
metric_value = str2float(str.strip(p[1]), 0)
results[metric_name] = metric_value
except Exception as e:
step = 5
print(f"Error processing IPMI for {hostname} - slpeeping for {step} steps")
skip_ipmi[hostname] = step
finally:
HOSTS[hostname]['lock'].release()
with ThreadPoolExecutor(max_workers=2) as pool:
pool.map(ipmi_poll, ['ziobob', 'ziocharlie'])
log.info(" --> Collecting ipmi done")
if len(results) > 0:
q.put(results)
def collect_vgpu():
log = multiprocessing.get_logger()
global q
global vgpulock
hostname = "zione"
log.info(" --> Collecting vGPU")
results = dict()
lock = vgpulock.acquire(blocking=False)
if lock is False:
log.info(f" --> Collecting vGPU :: still being queried....skipping")
return
try:
out = subprocess.check_output(f"ssh root@{hostname} \"nvidia-smi -q\"",
stderr=subprocess.STDOUT,
shell=True)
stdout = str(out.decode('utf-8'))
except Exception as e:
log.error(f"Error vGPU", e)
finally:
vgpulock.release()
lines = stdout.split("\n")
current_gpu = None
def pop_metric(name_prefix):
m = lines.pop(0).lower().split(":")
metric_name = f"{name_prefix}.{m[0].strip().replace(" ", "_")}"
metric_value = m[1].split()[0].strip()
results[f"host.zione.gpu.{metric_name}"] = str2float(metric_value)
while len(lines):
line = lines.pop(0)
if line.startswith('GPU 0000:'):
current_gpu = line.split('GPU ')[1].split(':')[1]
if current_gpu is None:
continue
if line.startswith(" FB Memory Usage"):
pop_metric(f"{current_gpu}.memory.framebuffer") # total
pop_metric(f"{current_gpu}.memory.framebuffer") # used
pop_metric(f"{current_gpu}.memory.framebuffer") # free
if line.startswith(" BAR1 Memory Usage"):
pop_metric(f"{current_gpu}.memory.bar") # total
pop_metric(f"{current_gpu}.memory.bar") # used
pop_metric(f"{current_gpu}.memory.bar") # free
line = lines.pop(0)
if line.startswith(" Utilization"):
pop_metric(f"{current_gpu}.utilization") # gpu
pop_metric(f"{current_gpu}.utilization") # memory
pop_metric(f"{current_gpu}.utilization") # encoder
pop_metric(f"{current_gpu}.utilization") # decoder
line = lines.pop(0)
if line.startswith(" Temperature"):
pop_metric(f"{current_gpu}.temp") # gpu
if line.startswith(" Power Readings"):
lines.pop(0) # Skip Power Management
pop_metric(f"{current_gpu}.power") # Draw
if line == " Clocks":
pop_metric(f"{current_gpu}.power") # Graphics
pop_metric(f"{current_gpu}.power") # SM
pop_metric(f"{current_gpu}.power") # Memory
pop_metric(f"{current_gpu}.power") # Video
log.info(f" --> Collecting vGPU :: {len(results)}")
if len(results) > 0:
q.put(results)
def statsd_writer():
log = multiprocessing.get_logger()
global q
while True:
log.info("Waiting for metrics")
metrics = q.get(block=True)
for k in metrics:
log.info(f":statsd {k} ==> {metrics[k]}")
statsd.gauge(k, metrics[k])
log.info(f"--> Bobmaaaa {len(metrics)}")
print("Starting temperature and humidity monitoring service....")
sys.stdout.flush()
if __name__ == '__main__':
log = multiprocessing.get_logger()
log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(' --> [%(asctime)s] - %(processName)s - %(message)s'))
log.addHandler(handler)
log.info("# Starting statsd writer")
worker = Thread(target=statsd_writer)
worker.daemon = True # Die with your parent
worker.start()
while True:
log.info("# waking up workers")
for func in [
collect_vgpu,
collect_ipmi,
collect_sensor
]:
worker = Thread(target=func)
worker.daemon = True # Die with your parent
worker.start()
time.sleep(5)
| #! /usr/bin/python3
import logging
import multiprocessing
from concurrent.futures.thread import ThreadPoolExecutor
from multiprocessing import Process
import statsd
import Adafruit_DHT
import time
import boto3
import sys
import subprocess
import os
from timeit import default_timer as timer
from threading import Thread
from threading import Lock
from queue import Queue
from dotenv import load_dotenv
load_dotenv()
DHT_PIN = 4
STATSD_ENDPOINT = os.environ['statsd_url']
statsd = statsd.StatsClient(STATSD_ENDPOINT, 8125, prefix='totomz.homelab')
skip_ipmi = dict()
q = Queue()
HOSTS = {
'zione': {'ipmi': False},
'ziobob': {'ipmi': '192.168.10.30', 'lock': Lock()},
'ziocharlie': {'ipmi': '192.168.10.31', 'lock': Lock()},
}
vgpulock = Lock()
sensorlock = Lock()
def str2float(string, default=0.0):
res = default
try:
res = float(string)
except Exception:
res = default
return res
def collect_sensor():
log = multiprocessing.get_logger()
log.info(" --> Collecting temperature and humidity")
global q
lock = sensorlock.acquire(blocking=False)
if lock is False:
log.info(f" --> Collecting sensors :: still being queried....skipping")
return
try:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, DHT_PIN)
# humidity, temperature = 0, 1
finally:
sensorlock.release()
results = dict()
results['rack.humidity'] = humidity
results['rack.temperature'] = temperature
log.info(f" --> Temperature: {temperature} Humidity: {humidity}")
if len(results) > 0:
q.put(results)
def collect_ipmi():
log = multiprocessing.get_logger()
global q
results = dict()
log.info(" --> Collecting ipmi")
def ipmi_poll(hostname):
if skip_ipmi.get(hostname, 0) > 0:
print(f"Host {hostname} is in the skipped list")
skip_ipmi[hostname] = skip_ipmi.get(hostname, 0) - 1
return results
lock = HOSTS[hostname]['lock'].acquire(blocking=False)
if lock is False:
log.info(f" --> Collecting ipmi :: {hostname} still being queried....skipping")
return
try:
log.info(f" --> Collecting ipmi :: {hostname} querying")
out = subprocess.check_output("ipmitool -P root -U root -H {ip} sensor".format(ip=HOSTS[hostname]['ipmi']),
stderr=subprocess.STDOUT,
shell=True)
stdout = str(out.decode('utf-8'))
log.info(f" --> Collecting ipmi :: {hostname} got readings")
metrics = stdout.split("\n")
for line in metrics:
metric_line = line.lower()
if "temp" not in metric_line:
continue
p = metric_line.split("|")
metric_name = f"host.{hostname}.{str.lower(str.strip(str.strip(p[0]))).replace(' ', '_')}"
metric_value = str2float(str.strip(p[1]), 0)
results[metric_name] = metric_value
except Exception as e:
step = 5
print(f"Error processing IPMI for {hostname} - slpeeping for {step} steps")
skip_ipmi[hostname] = step
finally:
HOSTS[hostname]['lock'].release()
with ThreadPoolExecutor(max_workers=2) as pool:
pool.map(ipmi_poll, ['ziobob', 'ziocharlie'])
log.info(" --> Collecting ipmi done")
if len(results) > 0:
q.put(results)
def collect_vgpu():
log = multiprocessing.get_logger()
global q
global vgpulock
hostname = "zione"
log.info(" --> Collecting vGPU")
results = dict()
lock = vgpulock.acquire(blocking=False)
if lock is False:
log.info(f" --> Collecting vGPU :: still being queried....skipping")
return
try:
out = subprocess.check_output(f"ssh root@{hostname} \"nvidia-smi -q\"",
stderr=subprocess.STDOUT,
shell=True)
stdout = str(out.decode('utf-8'))
except Exception as e:
log.error(f"Error vGPU", e)
finally:
vgpulock.release()
lines = stdout.split("\n")
current_gpu = None
def pop_metric(name_prefix):
m = lines.pop(0).lower().split(":")
metric_name = f"{name_prefix}.{m[0].strip().replace(' ', '_')}"
metric_value = m[1].split()[0].strip()
results[f"host.zione.gpu.{metric_name}"] = str2float(metric_value)
while len(lines):
line = lines.pop(0)
if line.startswith('GPU 0000:'):
current_gpu = line.split('GPU ')[1].split(':')[1]
if current_gpu is None:
continue
if line.startswith(" FB Memory Usage"):
pop_metric(f"{current_gpu}.memory.framebuffer") # total
pop_metric(f"{current_gpu}.memory.framebuffer") # used
pop_metric(f"{current_gpu}.memory.framebuffer") # free
if line.startswith(" BAR1 Memory Usage"):
pop_metric(f"{current_gpu}.memory.bar") # total
pop_metric(f"{current_gpu}.memory.bar") # used
pop_metric(f"{current_gpu}.memory.bar") # free
line = lines.pop(0)
if line.startswith(" Utilization"):
pop_metric(f"{current_gpu}.utilization") # gpu
pop_metric(f"{current_gpu}.utilization") # memory
pop_metric(f"{current_gpu}.utilization") # encoder
pop_metric(f"{current_gpu}.utilization") # decoder
line = lines.pop(0)
if line.startswith(" Temperature"):
pop_metric(f"{current_gpu}.temp") # gpu
if line.startswith(" Power Readings"):
lines.pop(0) # Skip Power Management
pop_metric(f"{current_gpu}.power") # Draw
if line == " Clocks":
pop_metric(f"{current_gpu}.power") # Graphics
pop_metric(f"{current_gpu}.power") # SM
pop_metric(f"{current_gpu}.power") # Memory
pop_metric(f"{current_gpu}.power") # Video
log.info(f" --> Collecting vGPU :: {len(results)}")
if len(results) > 0:
q.put(results)
def statsd_writer():
log = multiprocessing.get_logger()
global q
while True:
log.info("Waiting for metrics")
metrics = q.get(block=True)
for k in metrics:
log.info(f":statsd {k} ==> {metrics[k]}")
statsd.gauge(k, metrics[k])
log.info(f"--> Bobmaaaa {len(metrics)}")
print("Starting temperature and humidity monitoring service....")
sys.stdout.flush()
if __name__ == '__main__':
log = multiprocessing.get_logger()
log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(' --> [%(asctime)s] - %(processName)s - %(message)s'))
log.addHandler(handler)
log.info("# Starting statsd writer")
worker = Thread(target=statsd_writer)
worker.daemon = True # Die with your parent
worker.start()
while True:
log.info("# waking up workers")
for func in [
collect_vgpu,
collect_ipmi,
collect_sensor
]:
worker = Thread(target=func)
worker.daemon = True # Die with your parent
worker.start()
time.sleep(5)
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
import numpy as np
import torch
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.states import RunningStage, TrainerState
from pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing
from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(self, trainer, multiple_trainloader_mode):
self.trainer = trainer
self.early_stopping_accumulator = None
self.checkpoint_accumulator = None
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self.automatic_optimization = True
self._curr_step_result = None
self._cur_grad_norm_dict = None
self._multiple_trainloader_mode = multiple_trainloader_mode
self._skip_backward = False
self.trainer._multiple_trainloader_mode = multiple_trainloader_mode
def on_trainer_init(
self,
max_epochs,
min_epochs,
max_steps,
min_steps,
num_sanity_val_steps,
automatic_optimization,
weights_summary,
):
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.interrupted = False
self.trainer.should_stop = False
self.trainer._state = TrainerState.INITIALIZING
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
self.automatic_optimization = automatic_optimization
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
# If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1
self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
self.trainer.weights_summary = weights_summary
if weights_summary is not None and weights_summary not in ModelSummary.MODES:
raise MisconfigurationException(
f"`weights_summary` can be None, {", ".join(ModelSummary.MODES)}, got {weights_summary}"
)
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
def should_skip_training(self):
should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps
should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
# hook
self.trainer.call_hook("on_train_start")
# provide rank to profiler
self.trainer.profile_connector.on_train_start(self.trainer)
def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):
# clean hparams
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
# links data to the trainer
self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)
# check that model is configured correctly
self.trainer.config_validator.verify_loop_configurations(model)
# attach model log function to callback
self.trainer.callback_connector.attach_model_logging_functions(model)
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.trainer.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu
# kill loggers
if self.trainer.logger is not None and self.trainer.training_type_plugin.should_finalize:
self.trainer.logger.finalize("success")
# summarize profile results
if self.trainer.global_rank == 0:
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator.on_train_end()
def check_checkpoint_callback(self, should_update, is_last=False):
# TODO bake this logic into the ModelCheckpoint callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def check_early_stopping_callback(self, should_update):
# TODO bake this logic into the EarlyStopping callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = [c for c in self.trainer.callbacks if isinstance(c, EarlyStopping)]
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.trainer.current_epoch = epoch
model = self.trainer.get_model()
# reset train dataloader
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# todo: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# structured result accumulators for callbacks
self.early_stopping_accumulator = Accumulator()
self.checkpoint_accumulator = Accumulator()
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
# hook
self.trainer.call_hook('on_train_batch_end', batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model):
if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
hook_overridden = (
is_overridden("training_epoch_end", model=self.trainer.get_model())
or is_overridden("on_train_epoch_end", model=self.trainer.get_model())
)
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if not (hook_overridden or auto_reduce_tng_result):
continue
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def get_optimizers_iterable(self):
"""
Generates an iterable with (idx, optimizer) for each optimizer.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
optimizers_loop_length = optimizer_freq_cumsum[-1]
current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
is_result_obj = isinstance(training_step_output, Result)
if is_result_obj:
training_step_output.detach()
else:
training_step_output.batch_loss = training_step_output.batch_loss.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator_backend.training_step(args)
self.trainer.accelerator_backend.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
is_result_obj = isinstance(training_step_output, Result)
if training_step_output_for_epoch_end is None:
return None
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.train_loop.automatic_optimization:
# accumulate loss
# (if accumulate_grad_batches = 1 no effect)
if is_result_obj:
closure_loss = training_step_output.minimize
else:
closure_loss = training_step_output.batch_loss
closure_loss = closure_loss / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
hiddens=training_step_output.hiddens,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
# -----------------------------------------
# process result return (DEPRECATE in 1.0)
# -----------------------------------------
if isinstance(training_step_output, Result):
training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)
return training_step_output_for_epoch_end, training_step_output
# -----------------------------------------
# process hybrid (1.0)
# -----------------------------------------
# no need for these checks in 1.0.0
# TODO: remove checks in 1.0.0
is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)
is_1_0_output = is_tensor or ("log" not in training_step_output and "progress_bar" not in training_step_output)
if is_1_0_output:
return self._process_training_step_output_1_0(training_step_output, split_batch)
# -----------------------------------------
# process old dict (deprecate 1.0)
# -----------------------------------------
training_step_output = self.trainer.process_dict_result(training_step_output, train=True)
training_step_output = AttributeDict(
batch_loss=training_step_output[0],
pbar_on_batch_end=training_step_output[1],
log_metrics=training_step_output[2],
callback_metrics=training_step_output[3],
hiddens=training_step_output[4],
)
# if the user decides to finally reduce things in epoch_end, save raw output without graphs
if isinstance(training_step_output_for_epoch_end, torch.Tensor):
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
else:
training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)
return training_step_output_for_epoch_end, training_step_output
def _process_training_step_output_1_0(self, training_step_output, split_batch):
result = self.trainer.get_model()._results
loss = None
hiddens = None
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
result["extra"] = {}
# map to results under the hood
result.minimize = loss
result.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end.cpu()
# what flows back into the system
training_step_output = result
return training_step_output_for_epoch_end, training_step_output
def _process_result(self, training_step_output, split_batch):
training_step_output.track_batch_size(len(split_batch))
m = """
TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.
Use self.log and .write from the LightningModule to log metrics and write predictions.
training_step can now only return a scalar (for the loss) or a dictionary with anything you want.
Option 1:
return loss
Option 2:
return {'loss': loss, 'anything_else': ...}
Option 3:
return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}
"""
rank_zero_warn(m)
training_step_output_for_epoch_end = copy(training_step_output)
training_step_output_for_epoch_end.detach()
return training_step_output_for_epoch_end
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.get_model()
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator_backend.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator_backend.clip_gradients(optimizer, self.trainer.gradient_clip_val)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.get_model()
grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)
return grad_norm_dict
def process_hiddens(self, opt_closure_result):
hiddens = opt_closure_result.hiddens
if isinstance(opt_closure_result.training_step_output, Result):
opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()
return hiddens
def tbptt_split_batch(self, batch):
splits = [batch]
if self.trainer.truncated_bptt_steps is not None:
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
should_check_val = False
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
batch_end_outputs = self.process_train_step_outputs(
batch_output.training_step_output_for_epoch_end,
self.early_stopping_accumulator,
self.checkpoint_accumulator,
)
# hook
# TODO: add outputs to batches
self.on_train_batch_end(epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
# -----------------------------------------
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.run_evaluation()
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if self._num_training_batches_reached(is_last_batch):
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
# epoch end hook
self.run_on_epoch_end_hook(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(
epoch_output, self.checkpoint_accumulator, self.early_stopping_accumulator, self.num_optimizers
)
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
if should_check_val:
self.trainer.run_evaluation(on_epoch=True)
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
if should_train_only:
# update epoch level lr_schedulers
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
self.check_checkpoint_callback(True)
self.check_early_stopping_callback(True)
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dic = {}
# bookkeeping
self.trainer.hiddens = None
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]
if batch is None:
return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# lightning module hook
splits = self.tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in self.prepare_optimizers():
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform dpp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch, batch_idx, opt_idx, self.trainer.hiddens
)
if self._curr_step_result is None:
# user decided to skip optimization
# make sure to zero grad.
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
# update running loss + reset accumulated loss
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
"""
automatic_optimization = True
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
automatic_optimization = False
do not block ddp gradient sync when using manual optimization
as gradients are needed within the training step
Returns:
context manager with sync behaviour off
"""
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# track hiddens
self.trainer.hiddens = self.process_hiddens(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if result is None:
if self.automatic_optimization:
self.warning_cache.warn("training_step returned None if it was on purpose, ignore this warning...")
return None
if not self._skip_backward and self.trainer.train_loop.automatic_optimization:
# backward pass
with self.trainer.profiler.profile("model_backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(result.loss)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.get_model().untoggle_optimizer(opt_idx)
return result
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator_backend.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(interval="step", monitor_metrics=monitor_metrics)
def run_on_epoch_end_hook(self, epoch_output):
# inform logger the batch loop has finished
self.trainer.logger_connector.on_train_epoch_end()
self.trainer.call_hook('on_train_epoch_end', epoch_output)
self.trainer.call_hook('on_epoch_end')
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step += 1
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False):
# decide if we should run validation
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
can_check_val = self.trainer.enable_validation and is_val_check_epoch
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
epoch_end_val_check = self.trainer.val_check_batch == self.trainer.num_training_batches
should_check_val = ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop
or is_last_batch_for_infinite_dataset
) if on_epoch else (is_val_check_batch and not epoch_end_val_check)
return should_check_val and can_check_val
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
args.append(opt_idx)
else:
num_opts = len(self.trainer.optimizers)
raise ValueError(
f"Your LightningModule defines {num_opts} optimizers but "
f'training_step is missing the "optimizer_idx" argument.'
)
# pass hiddens if using tbptt
if self.trainer.truncated_bptt_steps is not None:
args.append(hiddens)
return args
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):
"""
Figure out what needs to be tracked/logged at the end of the epoch
"""
# the training step outputs a list per optimizer. The list contains the outputs at each time step
# when no TBPTT is used, then the list has 1 item per batch
# when TBPTT IS used, then the list has n items (1 per time step)
batch_end_outputs = []
for optimizer_idx_outputs in all_train_step_outputs:
# extract one representative sample from each time step (1 if no tbptt) and 0th optimizer
if len(optimizer_idx_outputs) == 0:
continue
sample_output = optimizer_idx_outputs[-1]
# pull out callback info if available (ie: Results object)
if isinstance(sample_output, dict) and "early_stop_on" in sample_output:
early_stopping_accumulator.accumulate(sample_output["early_stop_on"])
if isinstance(sample_output, dict) and "checkpoint_on" in sample_output:
checkpoint_accumulator.accumulate(sample_output["checkpoint_on"])
batch_end_outputs.append(optimizer_idx_outputs)
return batch_end_outputs
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.get_model()
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
| # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
import numpy as np
import torch
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.states import RunningStage, TrainerState
from pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing
from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(self, trainer, multiple_trainloader_mode):
self.trainer = trainer
self.early_stopping_accumulator = None
self.checkpoint_accumulator = None
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self.automatic_optimization = True
self._curr_step_result = None
self._cur_grad_norm_dict = None
self._multiple_trainloader_mode = multiple_trainloader_mode
self._skip_backward = False
self.trainer._multiple_trainloader_mode = multiple_trainloader_mode
def on_trainer_init(
self,
max_epochs,
min_epochs,
max_steps,
min_steps,
num_sanity_val_steps,
automatic_optimization,
weights_summary,
):
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.interrupted = False
self.trainer.should_stop = False
self.trainer._state = TrainerState.INITIALIZING
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
self.automatic_optimization = automatic_optimization
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
# If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1
self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
self.trainer.weights_summary = weights_summary
if weights_summary is not None and weights_summary not in ModelSummary.MODES:
raise MisconfigurationException(
f"`weights_summary` can be None, {', '.join(ModelSummary.MODES)}, got {weights_summary}"
)
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
def should_skip_training(self):
should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps
should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
# hook
self.trainer.call_hook("on_train_start")
# provide rank to profiler
self.trainer.profile_connector.on_train_start(self.trainer)
def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):
# clean hparams
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
# links data to the trainer
self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)
# check that model is configured correctly
self.trainer.config_validator.verify_loop_configurations(model)
# attach model log function to callback
self.trainer.callback_connector.attach_model_logging_functions(model)
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.trainer.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu
# kill loggers
if self.trainer.logger is not None and self.trainer.training_type_plugin.should_finalize:
self.trainer.logger.finalize("success")
# summarize profile results
if self.trainer.global_rank == 0:
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator.on_train_end()
def check_checkpoint_callback(self, should_update, is_last=False):
# TODO bake this logic into the ModelCheckpoint callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def check_early_stopping_callback(self, should_update):
# TODO bake this logic into the EarlyStopping callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = [c for c in self.trainer.callbacks if isinstance(c, EarlyStopping)]
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.trainer.current_epoch = epoch
model = self.trainer.get_model()
# reset train dataloader
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# todo: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# structured result accumulators for callbacks
self.early_stopping_accumulator = Accumulator()
self.checkpoint_accumulator = Accumulator()
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
# hook
self.trainer.call_hook('on_train_batch_end', batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model):
if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
hook_overridden = (
is_overridden("training_epoch_end", model=self.trainer.get_model())
or is_overridden("on_train_epoch_end", model=self.trainer.get_model())
)
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if not (hook_overridden or auto_reduce_tng_result):
continue
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def get_optimizers_iterable(self):
"""
Generates an iterable with (idx, optimizer) for each optimizer.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
optimizers_loop_length = optimizer_freq_cumsum[-1]
current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
is_result_obj = isinstance(training_step_output, Result)
if is_result_obj:
training_step_output.detach()
else:
training_step_output.batch_loss = training_step_output.batch_loss.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator_backend.training_step(args)
self.trainer.accelerator_backend.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
is_result_obj = isinstance(training_step_output, Result)
if training_step_output_for_epoch_end is None:
return None
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.train_loop.automatic_optimization:
# accumulate loss
# (if accumulate_grad_batches = 1 no effect)
if is_result_obj:
closure_loss = training_step_output.minimize
else:
closure_loss = training_step_output.batch_loss
closure_loss = closure_loss / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
hiddens=training_step_output.hiddens,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
# -----------------------------------------
# process result return (DEPRECATE in 1.0)
# -----------------------------------------
if isinstance(training_step_output, Result):
training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)
return training_step_output_for_epoch_end, training_step_output
# -----------------------------------------
# process hybrid (1.0)
# -----------------------------------------
# no need for these checks in 1.0.0
# TODO: remove checks in 1.0.0
is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)
is_1_0_output = is_tensor or ("log" not in training_step_output and "progress_bar" not in training_step_output)
if is_1_0_output:
return self._process_training_step_output_1_0(training_step_output, split_batch)
# -----------------------------------------
# process old dict (deprecate 1.0)
# -----------------------------------------
training_step_output = self.trainer.process_dict_result(training_step_output, train=True)
training_step_output = AttributeDict(
batch_loss=training_step_output[0],
pbar_on_batch_end=training_step_output[1],
log_metrics=training_step_output[2],
callback_metrics=training_step_output[3],
hiddens=training_step_output[4],
)
# if the user decides to finally reduce things in epoch_end, save raw output without graphs
if isinstance(training_step_output_for_epoch_end, torch.Tensor):
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
else:
training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)
return training_step_output_for_epoch_end, training_step_output
def _process_training_step_output_1_0(self, training_step_output, split_batch):
result = self.trainer.get_model()._results
loss = None
hiddens = None
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
result["extra"] = {}
# map to results under the hood
result.minimize = loss
result.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end.cpu()
# what flows back into the system
training_step_output = result
return training_step_output_for_epoch_end, training_step_output
def _process_result(self, training_step_output, split_batch):
training_step_output.track_batch_size(len(split_batch))
m = """
TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.
Use self.log and .write from the LightningModule to log metrics and write predictions.
training_step can now only return a scalar (for the loss) or a dictionary with anything you want.
Option 1:
return loss
Option 2:
return {'loss': loss, 'anything_else': ...}
Option 3:
return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}
"""
rank_zero_warn(m)
training_step_output_for_epoch_end = copy(training_step_output)
training_step_output_for_epoch_end.detach()
return training_step_output_for_epoch_end
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.get_model()
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator_backend.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator_backend.clip_gradients(optimizer, self.trainer.gradient_clip_val)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.get_model()
grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)
return grad_norm_dict
def process_hiddens(self, opt_closure_result):
hiddens = opt_closure_result.hiddens
if isinstance(opt_closure_result.training_step_output, Result):
opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()
return hiddens
def tbptt_split_batch(self, batch):
splits = [batch]
if self.trainer.truncated_bptt_steps is not None:
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
should_check_val = False
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
batch_end_outputs = self.process_train_step_outputs(
batch_output.training_step_output_for_epoch_end,
self.early_stopping_accumulator,
self.checkpoint_accumulator,
)
# hook
# TODO: add outputs to batches
self.on_train_batch_end(epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
# -----------------------------------------
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.run_evaluation()
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if self._num_training_batches_reached(is_last_batch):
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
# epoch end hook
self.run_on_epoch_end_hook(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(
epoch_output, self.checkpoint_accumulator, self.early_stopping_accumulator, self.num_optimizers
)
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
if should_check_val:
self.trainer.run_evaluation(on_epoch=True)
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
if should_train_only:
# update epoch level lr_schedulers
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
self.check_checkpoint_callback(True)
self.check_early_stopping_callback(True)
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dic = {}
# bookkeeping
self.trainer.hiddens = None
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]
if batch is None:
return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# lightning module hook
splits = self.tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in self.prepare_optimizers():
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform dpp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch, batch_idx, opt_idx, self.trainer.hiddens
)
if self._curr_step_result is None:
# user decided to skip optimization
# make sure to zero grad.
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
# update running loss + reset accumulated loss
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
"""
automatic_optimization = True
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
automatic_optimization = False
do not block ddp gradient sync when using manual optimization
as gradients are needed within the training step
Returns:
context manager with sync behaviour off
"""
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# track hiddens
self.trainer.hiddens = self.process_hiddens(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if result is None:
if self.automatic_optimization:
self.warning_cache.warn("training_step returned None if it was on purpose, ignore this warning...")
return None
if not self._skip_backward and self.trainer.train_loop.automatic_optimization:
# backward pass
with self.trainer.profiler.profile("model_backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(result.loss)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.get_model().untoggle_optimizer(opt_idx)
return result
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator_backend.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(interval="step", monitor_metrics=monitor_metrics)
def run_on_epoch_end_hook(self, epoch_output):
# inform logger the batch loop has finished
self.trainer.logger_connector.on_train_epoch_end()
self.trainer.call_hook('on_train_epoch_end', epoch_output)
self.trainer.call_hook('on_epoch_end')
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step += 1
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False):
# decide if we should run validation
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
can_check_val = self.trainer.enable_validation and is_val_check_epoch
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
epoch_end_val_check = self.trainer.val_check_batch == self.trainer.num_training_batches
should_check_val = ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop
or is_last_batch_for_infinite_dataset
) if on_epoch else (is_val_check_batch and not epoch_end_val_check)
return should_check_val and can_check_val
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
args.append(opt_idx)
else:
num_opts = len(self.trainer.optimizers)
raise ValueError(
f"Your LightningModule defines {num_opts} optimizers but "
f'training_step is missing the "optimizer_idx" argument.'
)
# pass hiddens if using tbptt
if self.trainer.truncated_bptt_steps is not None:
args.append(hiddens)
return args
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):
"""
Figure out what needs to be tracked/logged at the end of the epoch
"""
# the training step outputs a list per optimizer. The list contains the outputs at each time step
# when no TBPTT is used, then the list has 1 item per batch
# when TBPTT IS used, then the list has n items (1 per time step)
batch_end_outputs = []
for optimizer_idx_outputs in all_train_step_outputs:
# extract one representative sample from each time step (1 if no tbptt) and 0th optimizer
if len(optimizer_idx_outputs) == 0:
continue
sample_output = optimizer_idx_outputs[-1]
# pull out callback info if available (ie: Results object)
if isinstance(sample_output, dict) and "early_stop_on" in sample_output:
early_stopping_accumulator.accumulate(sample_output["early_stop_on"])
if isinstance(sample_output, dict) and "checkpoint_on" in sample_output:
checkpoint_accumulator.accumulate(sample_output["checkpoint_on"])
batch_end_outputs.append(optimizer_idx_outputs)
return batch_end_outputs
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.get_model()
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
|
from output import log_qs_summary, log_query_results
from display import display_qs_summary, display_qs_results
from executors import ParallelQSExecutor, SerialQSExecutor
class TestMode():
def __init__(self,config, options):
self.config = config
self.options = options
self.verbose = options['verbose']
self.log_output = options['log_output']
def run_queryset(self, target, query_set):
if 'execution_mode' in target:
mode = target['execution_mode']
else:
mode = 'serial'
if mode == 'parallel':
executor = ParallelQSExecutor(target, query_set)
elif mode == 'serial':
executor = SerialQSExecutor(target, query_set)
else:
print(f"Unexpected query set execution mode {mode}")
return None
return executor.run()
def obfuscate_apikey(self, config):
# We obfuscate the apikey once we are done executing any queries to prevent it from being leaked in any reports
last4 = config['target']['api_key'][-4:]
config['target']['api_key'] = '******' + last4
def summarize_qs_results(self, config, results):
total, query, queued, network = 0,0,0,0
warnings = []
clean = True
for result in results['query_results']:
if result['status'] == 'success':
total += result['round_trip_ms']
query += result['query_ms']
queued += round(result['queued_ns']/1000)
network += result['network_ms']
if result['row_count'] == 0:
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = 'Returned no rows'
warnings.append(warning)
elif result['status'] == 'error':
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = f"Errored with message: {result["message"]}"
warnings.append(warning)
elif result['status'] == 'timeout':
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = 'Query timed out'
warnings.append(warning)
elif result['status'] == 'exhausted':
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = 'Resources exhausted'
warnings.append(warning)
return {
'total_ms': total,
'query_ms': query,
'queued_ms': queued,
'network_ms': network,
'warnings': warnings,
'clean': clean
}
class QPSTestMode(TestMode):
def __init__(self,config, options):
super().__init__(config,options)
class IterationsTestMode(TestMode):
def __init__(self,config, options):
super().__init__(config,options)
def run(self):
query_results = self.run_queryset(self.config['target'], self.config['queries'])
self.obfuscate_apikey(self.config)
query_set_summary = self.summarize_qs_results(self.config, query_results)
if self.verbose:
display_qs_results(self.config, query_results)
display_qs_summary(self.config, query_set_summary)
if self.log_output:
log_query_results(self.options, self.config, query_results)
log_qs_summary(self.options, self.config, query_set_summary)
| from output import log_qs_summary, log_query_results
from display import display_qs_summary, display_qs_results
from executors import ParallelQSExecutor, SerialQSExecutor
class TestMode():
def __init__(self,config, options):
self.config = config
self.options = options
self.verbose = options['verbose']
self.log_output = options['log_output']
def run_queryset(self, target, query_set):
if 'execution_mode' in target:
mode = target['execution_mode']
else:
mode = 'serial'
if mode == 'parallel':
executor = ParallelQSExecutor(target, query_set)
elif mode == 'serial':
executor = SerialQSExecutor(target, query_set)
else:
print(f"Unexpected query set execution mode {mode}")
return None
return executor.run()
def obfuscate_apikey(self, config):
# We obfuscate the apikey once we are done executing any queries to prevent it from being leaked in any reports
last4 = config['target']['api_key'][-4:]
config['target']['api_key'] = '******' + last4
def summarize_qs_results(self, config, results):
total, query, queued, network = 0,0,0,0
warnings = []
clean = True
for result in results['query_results']:
if result['status'] == 'success':
total += result['round_trip_ms']
query += result['query_ms']
queued += round(result['queued_ns']/1000)
network += result['network_ms']
if result['row_count'] == 0:
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = 'Returned no rows'
warnings.append(warning)
elif result['status'] == 'error':
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = f"Errored with message: {result['message']}"
warnings.append(warning)
elif result['status'] == 'timeout':
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = 'Query timed out'
warnings.append(warning)
elif result['status'] == 'exhausted':
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = 'Resources exhausted'
warnings.append(warning)
return {
'total_ms': total,
'query_ms': query,
'queued_ms': queued,
'network_ms': network,
'warnings': warnings,
'clean': clean
}
class QPSTestMode(TestMode):
def __init__(self,config, options):
super().__init__(config,options)
class IterationsTestMode(TestMode):
def __init__(self,config, options):
super().__init__(config,options)
def run(self):
query_results = self.run_queryset(self.config['target'], self.config['queries'])
self.obfuscate_apikey(self.config)
query_set_summary = self.summarize_qs_results(self.config, query_results)
if self.verbose:
display_qs_results(self.config, query_results)
display_qs_summary(self.config, query_set_summary)
if self.log_output:
log_query_results(self.options, self.config, query_results)
log_qs_summary(self.options, self.config, query_set_summary)
|
"""
difference.py
Visualize the difference between the python and the cython file.
"""
import difflib
import os
# name, path to python file, path to cython file
files = [
('multi environment', 'environment/env_multi.py', 'environment/cy/env_multi_cy.pyx'),
('game', 'environment/entities/game.py', 'environment/entities/cy/game_cy.pyx'),
('robots', 'environment/entities/robots.py', 'environment/entities/cy/robots_cy.pyx'),
('sensors', 'environment/entities/sensors.py', 'environment/entities/cy/sensors_cy.pyx'),
('intersection', 'utils/intersection.py', 'utils/cy/intersection_cy.pyx'),
('line2d', 'utils/line2d.py', 'utils/cy/line2d_cy.pyx'),
('vec2d', 'utils/vec2d.py', 'utils/cy/vec2d_cy.pyx'),
('test drive', 'tests/drive_test.py', 'tests/cy/drive_test_cy.py'),
('test intersection', 'tests/intersection_test.py', 'tests/cy/intersection_test_cy.py'),
('test sensors', 'tests/sensors_test.py', 'tests/cy/sensors_test_cy.py'),
]
def match(python_file, cython_file):
"""
Match the cython-file to the (original) python file.
:param python_file: String representing the python-file
:param cython_file: String representing the cython-file
:return: Difference-lists
"""
# Get git-wise diff file
diff = difflib.unified_diff(python_file, cython_file, fromfile='py', tofile='cy', lineterm='')
lines = [l for l in diff][2:]
# Python-code (minus)
py = []
concat = False
for l in lines:
if not concat and l[0] in ['+', '-']:
py.append("")
concat = True
elif l[0] not in ['+', '-']:
concat = False
# Add if necessary
if l[0] == '-': py[-1] += f'\n{l[1:]}' if len(py[-1]) > 0 else f'{l[1:]}'
# Cython-code (plus)
cy = []
concat = False
for l in lines:
if not concat and l[0] in ['+', '-']:
cy.append("")
concat = True
elif l[0] not in ['+', '-']:
concat = False
# Add if necessary
if l[0] == '+': cy[-1] += f'\n{l[1:]}' if len(cy[-1]) > 0 else f'{l[1:]}'
# Both lists must be equally long
assert len(py) == len(cy)
# Remove empty segments
to_remove = []
for i_block in range(len(py)):
if (py[i_block].replace(" ", "") == "") and (cy[i_block].replace(" ", "") == ""): to_remove.append(i_block)
for rm in reversed(to_remove):
del py[rm]
del cy[rm]
return py, cy
def pretty_print(py, cy):
"""Pretty print the two lists."""
# Enroll the diff-blocks
py_unrolled = [line.split("\n") for line in py]
cy_unrolled = [line.split("\n") for line in cy]
# Define the maximum length of a single line for both the py and cy segments
max_py = max({len(line) for block in py_unrolled for line in block})
max_cy = max({len(line) for block in cy_unrolled for line in block})
# Enlarge the blocks such that they contain an equal amount of lines
for i_block in range(len(py_unrolled)):
while len(py_unrolled[i_block]) > len(cy_unrolled[i_block]):
cy_unrolled[i_block].append("")
while len(py_unrolled[i_block]) < len(cy_unrolled[i_block]):
py_unrolled[i_block].append("")
assert len(py_unrolled[i_block]) == len(cy_unrolled[i_block])
# Print out the differences
print(f"{"PYTHON":^{max_py}} | {"CYTHON":^{max_cy}}")
print("-" * (max_py + 3 + max_cy))
for i_block in range(len(py_unrolled)):
for i_line in range(len(py_unrolled[i_block])):
print(f"{py_unrolled[i_block][i_line]:{max_py}} | {cy_unrolled[i_block][i_line]:{max_cy}}")
print("-" * (max_py + 3 + max_cy))
if __name__ == '__main__':
os.chdir("..")
for name, f_py, f_cy in files:
print(f"\n\n\n==> ANALYZING: {name}\n")
# Load in the files as a list, split on the new-line symbol
with open(f_py, 'r') as f:
contents_py = f.read().split('\n')
with open(f_cy, 'r') as f:
contents_cy = f.read().split('\n')
# Match the two files with each other
diff_py, diff_cy = match(contents_py, contents_cy)
# Pretty print the difference of the two files
pretty_print(diff_py, diff_cy)
| """
difference.py
Visualize the difference between the python and the cython file.
"""
import difflib
import os
# name, path to python file, path to cython file
files = [
('multi environment', 'environment/env_multi.py', 'environment/cy/env_multi_cy.pyx'),
('game', 'environment/entities/game.py', 'environment/entities/cy/game_cy.pyx'),
('robots', 'environment/entities/robots.py', 'environment/entities/cy/robots_cy.pyx'),
('sensors', 'environment/entities/sensors.py', 'environment/entities/cy/sensors_cy.pyx'),
('intersection', 'utils/intersection.py', 'utils/cy/intersection_cy.pyx'),
('line2d', 'utils/line2d.py', 'utils/cy/line2d_cy.pyx'),
('vec2d', 'utils/vec2d.py', 'utils/cy/vec2d_cy.pyx'),
('test drive', 'tests/drive_test.py', 'tests/cy/drive_test_cy.py'),
('test intersection', 'tests/intersection_test.py', 'tests/cy/intersection_test_cy.py'),
('test sensors', 'tests/sensors_test.py', 'tests/cy/sensors_test_cy.py'),
]
def match(python_file, cython_file):
"""
Match the cython-file to the (original) python file.
:param python_file: String representing the python-file
:param cython_file: String representing the cython-file
:return: Difference-lists
"""
# Get git-wise diff file
diff = difflib.unified_diff(python_file, cython_file, fromfile='py', tofile='cy', lineterm='')
lines = [l for l in diff][2:]
# Python-code (minus)
py = []
concat = False
for l in lines:
if not concat and l[0] in ['+', '-']:
py.append("")
concat = True
elif l[0] not in ['+', '-']:
concat = False
# Add if necessary
if l[0] == '-': py[-1] += f'\n{l[1:]}' if len(py[-1]) > 0 else f'{l[1:]}'
# Cython-code (plus)
cy = []
concat = False
for l in lines:
if not concat and l[0] in ['+', '-']:
cy.append("")
concat = True
elif l[0] not in ['+', '-']:
concat = False
# Add if necessary
if l[0] == '+': cy[-1] += f'\n{l[1:]}' if len(cy[-1]) > 0 else f'{l[1:]}'
# Both lists must be equally long
assert len(py) == len(cy)
# Remove empty segments
to_remove = []
for i_block in range(len(py)):
if (py[i_block].replace(" ", "") == "") and (cy[i_block].replace(" ", "") == ""): to_remove.append(i_block)
for rm in reversed(to_remove):
del py[rm]
del cy[rm]
return py, cy
def pretty_print(py, cy):
"""Pretty print the two lists."""
# Enroll the diff-blocks
py_unrolled = [line.split("\n") for line in py]
cy_unrolled = [line.split("\n") for line in cy]
# Define the maximum length of a single line for both the py and cy segments
max_py = max({len(line) for block in py_unrolled for line in block})
max_cy = max({len(line) for block in cy_unrolled for line in block})
# Enlarge the blocks such that they contain an equal amount of lines
for i_block in range(len(py_unrolled)):
while len(py_unrolled[i_block]) > len(cy_unrolled[i_block]):
cy_unrolled[i_block].append("")
while len(py_unrolled[i_block]) < len(cy_unrolled[i_block]):
py_unrolled[i_block].append("")
assert len(py_unrolled[i_block]) == len(cy_unrolled[i_block])
# Print out the differences
print(f"{'PYTHON':^{max_py}} | {'CYTHON':^{max_cy}}")
print("-" * (max_py + 3 + max_cy))
for i_block in range(len(py_unrolled)):
for i_line in range(len(py_unrolled[i_block])):
print(f"{py_unrolled[i_block][i_line]:{max_py}} | {cy_unrolled[i_block][i_line]:{max_cy}}")
print("-" * (max_py + 3 + max_cy))
if __name__ == '__main__':
os.chdir("..")
for name, f_py, f_cy in files:
print(f"\n\n\n==> ANALYZING: {name}\n")
# Load in the files as a list, split on the new-line symbol
with open(f_py, 'r') as f:
contents_py = f.read().split('\n')
with open(f_cy, 'r') as f:
contents_cy = f.read().split('\n')
# Match the two files with each other
diff_py, diff_cy = match(contents_py, contents_cy)
# Pretty print the difference of the two files
pretty_print(diff_py, diff_cy)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import subprocess
from uuid import uuid4
from urllib.parse import urlparse, urlunparse
import airflow
from airflow.models import Connection
from airflow.utils.db import provide_session
from openlineage.airflow.facets import AirflowVersionRunFacet, AirflowRunArgsRunFacet
try:
# Import from pendulum 1.x version
from pendulum import Pendulum, from_timestamp
except ImportError:
# Import for Pendulum 2.x version
from pendulum import DateTime as Pendulum, from_timestamp
log = logging.getLogger(__name__)
_NOMINAL_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
class JobIdMapping:
# job_name here is OL job name - aka combination of dag_id and task_id
@staticmethod
def set(job_name: str, dag_run_id: str, task_run_id: str):
airflow.models.Variable.set(
JobIdMapping.make_key(job_name, dag_run_id),
json.dumps(task_run_id)
)
@staticmethod
def pop(job_name, dag_run_id, session):
return JobIdMapping.get(job_name, dag_run_id, session, delete=True)
@staticmethod
def get(job_name, dag_run_id, session, delete=False):
key = JobIdMapping.make_key(job_name, dag_run_id)
if session:
q = session.query(airflow.models.Variable).filter(
airflow.models.Variable.key == key)
if not q.first():
return None
else:
val = q.first().val
if delete:
q.delete(synchronize_session=False)
if val:
return json.loads(val)
return None
@staticmethod
def make_key(job_name, run_id):
return "openlineage_id_mapping-{}-{}".format(job_name, run_id)
def url_to_https(url) -> str:
# Ensure URL exists
if not url:
return None
base_url = None
if url.startswith('git@'):
part = url.split('git@')[1:2]
if part:
base_url = f'https://{part[0].replace(':', '/', 1)}'
elif url.startswith('https://'):
base_url = url
if not base_url:
raise ValueError(f"Unable to extract location from: {url}")
if base_url.endswith('.git'):
base_url = base_url[:-4]
return base_url
def get_location(file_path) -> str:
# Ensure file path exists
if not file_path:
return None
# move to the file directory
abs_path = os.path.abspath(file_path)
file_name = os.path.basename(file_path)
cwd = os.path.dirname(abs_path)
# get the repo url
repo_url = execute_git(cwd, ['config', '--get', 'remote.origin.url'])
# get the repo relative path
repo_relative_path = execute_git(cwd, ['rev-parse', '--show-prefix'])
# get the commitId for the particular file
commit_id = execute_git(cwd, ['rev-list', 'HEAD', '-1', '--', file_name])
# build the URL
base_url = url_to_https(repo_url)
if not base_url:
return None
return f'{base_url}/blob/{commit_id}/{repo_relative_path}{file_name}'
def execute_git(cwd, params):
p = subprocess.Popen(['git'] + params,
cwd=cwd, stdout=subprocess.PIPE, stderr=None)
p.wait(timeout=0.5)
out, err = p.communicate()
return out.decode('utf8').strip()
def get_connection_uri(conn: Connection):
"""
Return the connection URI for the given ID. We first attempt to lookup
the connection URI via AIRFLOW_CONN_<conn_id>, else fallback on querying
the Airflow's connection table.
"""
conn_uri = conn.get_uri()
parsed = urlparse(conn_uri)
# Remove username and password
parsed = parsed._replace(netloc=f'{parsed.hostname}:{parsed.port}')
return urlunparse(parsed)
def get_normalized_postgres_connection_uri(conn: Connection):
"""
URIs starting with postgresql:// and postgres:// are both valid
PostgreSQL connection strings. This function normalizes it to
postgres:// as canonical name according to OpenLineage spec.
"""
uri = get_connection_uri(conn)
if uri.startswith('postgresql'):
uri = uri.replace('postgresql', 'postgres', 1)
return uri
@provide_session
def get_connection(conn_id, session=None) -> Connection:
# TODO: We may want to throw an exception if the connection
# does not exist (ex: AirflowConnectionException). The connection
# URI is required when collecting metadata for a data source.
conn_uri = os.environ.get('AIRFLOW_CONN_' + conn_id.upper())
if conn_uri:
conn = Connection()
conn.parse_from_uri(conn_uri)
return conn
return (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
def get_job_name(task):
return f'{task.dag_id}.{task.task_id}'
def get_custom_facets(task, is_external_trigger: bool):
return {
"airflow_runArgs": AirflowRunArgsRunFacet(is_external_trigger),
"airflow_version": AirflowVersionRunFacet.from_task(task)
}
def new_lineage_run_id(dag_run_id: str, task_id: str) -> str:
return str(uuid4())
class DagUtils:
def get_execution_date(**kwargs):
return kwargs.get('execution_date')
@staticmethod
def get_start_time(execution_date=None):
if execution_date:
return DagUtils.to_iso_8601(execution_date)
else:
return None
@staticmethod
def get_end_time(execution_date, default):
if execution_date:
end_time = default
else:
end_time = None
if end_time:
end_time = DagUtils.to_iso_8601(end_time)
return end_time
@staticmethod
def to_iso_8601(dt):
if not dt:
return None
if isinstance(dt, int):
dt = from_timestamp(dt/1000.0)
if isinstance(dt, Pendulum):
return dt.format(_NOMINAL_TIME_FORMAT)
else:
return dt.strftime(_NOMINAL_TIME_FORMAT)
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import subprocess
from uuid import uuid4
from urllib.parse import urlparse, urlunparse
import airflow
from airflow.models import Connection
from airflow.utils.db import provide_session
from openlineage.airflow.facets import AirflowVersionRunFacet, AirflowRunArgsRunFacet
try:
# Import from pendulum 1.x version
from pendulum import Pendulum, from_timestamp
except ImportError:
# Import for Pendulum 2.x version
from pendulum import DateTime as Pendulum, from_timestamp
log = logging.getLogger(__name__)
_NOMINAL_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
class JobIdMapping:
# job_name here is OL job name - aka combination of dag_id and task_id
@staticmethod
def set(job_name: str, dag_run_id: str, task_run_id: str):
airflow.models.Variable.set(
JobIdMapping.make_key(job_name, dag_run_id),
json.dumps(task_run_id)
)
@staticmethod
def pop(job_name, dag_run_id, session):
return JobIdMapping.get(job_name, dag_run_id, session, delete=True)
@staticmethod
def get(job_name, dag_run_id, session, delete=False):
key = JobIdMapping.make_key(job_name, dag_run_id)
if session:
q = session.query(airflow.models.Variable).filter(
airflow.models.Variable.key == key)
if not q.first():
return None
else:
val = q.first().val
if delete:
q.delete(synchronize_session=False)
if val:
return json.loads(val)
return None
@staticmethod
def make_key(job_name, run_id):
return "openlineage_id_mapping-{}-{}".format(job_name, run_id)
def url_to_https(url) -> str:
# Ensure URL exists
if not url:
return None
base_url = None
if url.startswith('git@'):
part = url.split('git@')[1:2]
if part:
base_url = f'https://{part[0].replace(":", "/", 1)}'
elif url.startswith('https://'):
base_url = url
if not base_url:
raise ValueError(f"Unable to extract location from: {url}")
if base_url.endswith('.git'):
base_url = base_url[:-4]
return base_url
def get_location(file_path) -> str:
# Ensure file path exists
if not file_path:
return None
# move to the file directory
abs_path = os.path.abspath(file_path)
file_name = os.path.basename(file_path)
cwd = os.path.dirname(abs_path)
# get the repo url
repo_url = execute_git(cwd, ['config', '--get', 'remote.origin.url'])
# get the repo relative path
repo_relative_path = execute_git(cwd, ['rev-parse', '--show-prefix'])
# get the commitId for the particular file
commit_id = execute_git(cwd, ['rev-list', 'HEAD', '-1', '--', file_name])
# build the URL
base_url = url_to_https(repo_url)
if not base_url:
return None
return f'{base_url}/blob/{commit_id}/{repo_relative_path}{file_name}'
def execute_git(cwd, params):
p = subprocess.Popen(['git'] + params,
cwd=cwd, stdout=subprocess.PIPE, stderr=None)
p.wait(timeout=0.5)
out, err = p.communicate()
return out.decode('utf8').strip()
def get_connection_uri(conn: Connection):
"""
Return the connection URI for the given ID. We first attempt to lookup
the connection URI via AIRFLOW_CONN_<conn_id>, else fallback on querying
the Airflow's connection table.
"""
conn_uri = conn.get_uri()
parsed = urlparse(conn_uri)
# Remove username and password
parsed = parsed._replace(netloc=f'{parsed.hostname}:{parsed.port}')
return urlunparse(parsed)
def get_normalized_postgres_connection_uri(conn: Connection):
"""
URIs starting with postgresql:// and postgres:// are both valid
PostgreSQL connection strings. This function normalizes it to
postgres:// as canonical name according to OpenLineage spec.
"""
uri = get_connection_uri(conn)
if uri.startswith('postgresql'):
uri = uri.replace('postgresql', 'postgres', 1)
return uri
@provide_session
def get_connection(conn_id, session=None) -> Connection:
# TODO: We may want to throw an exception if the connection
# does not exist (ex: AirflowConnectionException). The connection
# URI is required when collecting metadata for a data source.
conn_uri = os.environ.get('AIRFLOW_CONN_' + conn_id.upper())
if conn_uri:
conn = Connection()
conn.parse_from_uri(conn_uri)
return conn
return (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
def get_job_name(task):
return f'{task.dag_id}.{task.task_id}'
def get_custom_facets(task, is_external_trigger: bool):
return {
"airflow_runArgs": AirflowRunArgsRunFacet(is_external_trigger),
"airflow_version": AirflowVersionRunFacet.from_task(task)
}
def new_lineage_run_id(dag_run_id: str, task_id: str) -> str:
return str(uuid4())
class DagUtils:
def get_execution_date(**kwargs):
return kwargs.get('execution_date')
@staticmethod
def get_start_time(execution_date=None):
if execution_date:
return DagUtils.to_iso_8601(execution_date)
else:
return None
@staticmethod
def get_end_time(execution_date, default):
if execution_date:
end_time = default
else:
end_time = None
if end_time:
end_time = DagUtils.to_iso_8601(end_time)
return end_time
@staticmethod
def to_iso_8601(dt):
if not dt:
return None
if isinstance(dt, int):
dt = from_timestamp(dt/1000.0)
if isinstance(dt, Pendulum):
return dt.format(_NOMINAL_TIME_FORMAT)
else:
return dt.strftime(_NOMINAL_TIME_FORMAT)
|
import typing
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
from json import dumps
from warnings import warn
from .models import Node, APIRequest, APIResponse
__all__ = (
"API",
"HTTP_METHODS",
"ALLOWED_LIBS",
)
HTTP_METHODS = [
"GET",
"HEAD",
"POST",
"PUT",
"DELETE",
"CONNECT",
"OPTIONS",
"TRACE",
"PATCH",
]
ALLOWED_LIBS = {
"AlbertUnruhUtils": "https://github.com/AlbertUnruh/AlbertUnruhUtils.py",
}
def _default_endpoint(*_):
return APIResponse(404, {"message": "No Path!"})
class API:
_version_pattern = "v{version}"
_version_default = None
_current_version = None
_checks_request_global: dict[str, list[tuple[callable, int]]]
_checks_response_global: dict[str, list[callable]]
_versions: dict[str, Node]
def __init__(
self,
*,
host="127.0.0.1",
port=3333,
name=None,
default=1,
version_pattern="v{version}",
used_libs=None,
):
"""
Parameters
----------
host: str
The host of the server.
port: int
The port of the server.
name: str, optional
The name of the server.
default: int
The default version.
version_pattern: str
The pattern for the versions.
used_libs: list[str], optional
Additional used libraries to adapt the code to them.
"""
self._host = host
self._port = port
self._name = name or "NAA API"
self._checks_request_global = {}
self._checks_response_global = {}
self._versions = {}
self._default_endpoint = _default_endpoint
assert (
"{version}" in version_pattern
), "'{version}' must be present in 'version_pattern'!"
self._version_pattern = version_pattern
self._version_default = self._version_pattern.format(version=default)
if used_libs is None:
used_libs = []
assert all(lib in ALLOWED_LIBS for lib in used_libs), (
f"You can only use supported libraries! You can use one of these: "
f"{", ".join(f"{k} ({ALLOWED_LIBS[k]})' for k in ALLOWED_LIBS)}"
)
if len(used_libs):
if len(used_libs) == 1:
lib = used_libs[0]
warn(RuntimeWarning(f"Used Library {lib} must be used everywhere!"))
else:
libs = ", ".join(used_libs[:-1]) + f" and {used_libs[-1]}"
warn(RuntimeWarning(f"Used Libraries {libs} must be used everywhere!"))
self._used_libs = used_libs
@Request.application
def _application(self, request):
"""
Parameters
----------
request: Request
"""
path = request.path[1:]
version = self._version_default
p = path.split("/")
if p:
for v in self._versions:
if v == p[0]:
version = v
# to get rid of the version in path
path = path[len(v) + 1 :] # noqa: E203
break
del p
request = APIRequest(
method=request.method,
headers=dict(request.headers),
ip=request.remote_addr,
url=path,
version=version,
)
for check, status in self._checks_request_global.get(version):
if not check(request):
return Response(
status=status,
response=dumps({"message": APIResponse.DEFAULT_MESSAGES[status]}),
content_type="application/json",
)
if not path:
result = self._default_endpoint(request)
# format result from
# AlbertUnruhUtils.ratelimit.server.ServerRateLimit.__call__.decorator()
# Notes
# -----
# - decorator is in this case nested and not direct accessible
# - library: https://github.com/AlbertUnruh/AlbertUnruhUtils.py
if "AlbertUnruhUtils" in self._used_libs:
auu, result = result
if not auu[0]:
result = APIResponse(429)
result._response.update(auu[1]) # noqa
else:
path = path.split("/")
result = self._versions[version].find_node(
path=path, request=request
) # type: APIResponse
for check in self._checks_response_global.get(version):
check(result)
status = result.status_code
response = result.response
response.update(message=result.message)
response = dumps(response)
return Response(
status=status, response=response, content_type="application/json"
)
def add_version(self, version, *, fallback: list[callable] = None):
"""
Parameters
----------
version: int
fallback: list[callable]
"""
for fb in fallback or []:
self.add_version(version)(fb)
def decorator(clb):
"""
Parameters
----------
clb: callable
"""
self._current_version = self._version_pattern.format(version=version)
self._checks_request_global[
self._current_version
] = self._checks_request_global.get(self._current_version, [])
self._checks_response_global[
self._current_version
] = self._checks_response_global.get(self._current_version, [])
version_node = self._versions.get(
self._current_version, Node(*HTTP_METHODS, used_libs=self._used_libs)
) # type: Node
node = Node(*HTTP_METHODS, used_libs=self._used_libs)(clb)
node._children.update(version_node._children) # noqa
self._versions[self._current_version] = node
clb(self)
self._current_version = None
return clb
return decorator
def add(self, *methods, ignore_invalid_methods=False):
"""
Parameters
----------
methods: str
ignore_invalid_methods: bool
"""
def decorator(clb):
"""
Parameters
----------
clb: callable
The function/method which should be added as a node.
Returns
-------
Node
The new node.
"""
version = self._get_version()
node = Node(
*methods,
ignore_invalid_methods=ignore_invalid_methods,
used_libs=self._used_libs,
)
node(clb)
self._versions[version]._children[clb.__name__] = node # noqa
return node
return decorator
def add_global_request_check(self, default_return_value):
"""
If the check returns False the `default_return_value`
is returned and the request 'll not be processed.
Parameters
----------
default_return_value: int
"""
def decorator(clb):
"""
Parameters
----------
clb: callable
"""
version = self._get_version()
self._checks_request_global[version].append((clb, default_return_value))
return clb
return decorator
def add_global_response_check(self):
"""
Can be used to edit responses before sending them.
"""
def decorator(clb):
"""
Parameters
----------
clb: callable
"""
version = self._get_version()
self._checks_response_global[version].append(clb)
return clb
return decorator
def default_endpoint(
self,
clb: typing.Callable[[APIRequest], APIResponse],
) -> typing.Callable[[APIRequest], APIResponse]:
"""
Adds a default endpoint. 'll be displayed if no path is given.
Parameters
----------
clb: typing.Callable[[APIRequest], APIResponse]
The endpoint.
Returns
-------
typing.Callable[[APIRequest], APIResponse]
"""
self._default_endpoint = clb
return clb
@property
def host(self):
"""
Returns
-------
str
"""
return self._host
@property
def port(self):
"""
Returns
-------
int
"""
return self._port
def run_api(self, *, debug=False, reload=False, processes=1):
"""
Parameters
----------
debug, reload: bool
Whether it should debug/reload.
processes: int
The number of processes which can be used by the server.
"""
if self._versions and (default := self._version_default) is not None:
if default not in self._versions:
raise RuntimeError(
f"Can't have {default!r} as default version, because this version is not set!"
)
run_simple(
self.host,
self.port,
self._application,
use_reloader=reload,
use_debugger=debug,
processes=processes,
)
__call__ = run_api
def _get_version(self):
"""
Returns
-------
str
Raises
------
AssertionError
"""
assert (
version := self._current_version
) is not None, (
"You can only add an endpoint if you are in a version (API.add_version)"
)
return version
| import typing
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
from json import dumps
from warnings import warn
from .models import Node, APIRequest, APIResponse
__all__ = (
"API",
"HTTP_METHODS",
"ALLOWED_LIBS",
)
HTTP_METHODS = [
"GET",
"HEAD",
"POST",
"PUT",
"DELETE",
"CONNECT",
"OPTIONS",
"TRACE",
"PATCH",
]
ALLOWED_LIBS = {
"AlbertUnruhUtils": "https://github.com/AlbertUnruh/AlbertUnruhUtils.py",
}
def _default_endpoint(*_):
return APIResponse(404, {"message": "No Path!"})
class API:
_version_pattern = "v{version}"
_version_default = None
_current_version = None
_checks_request_global: dict[str, list[tuple[callable, int]]]
_checks_response_global: dict[str, list[callable]]
_versions: dict[str, Node]
def __init__(
self,
*,
host="127.0.0.1",
port=3333,
name=None,
default=1,
version_pattern="v{version}",
used_libs=None,
):
"""
Parameters
----------
host: str
The host of the server.
port: int
The port of the server.
name: str, optional
The name of the server.
default: int
The default version.
version_pattern: str
The pattern for the versions.
used_libs: list[str], optional
Additional used libraries to adapt the code to them.
"""
self._host = host
self._port = port
self._name = name or "NAA API"
self._checks_request_global = {}
self._checks_response_global = {}
self._versions = {}
self._default_endpoint = _default_endpoint
assert (
"{version}" in version_pattern
), "'{version}' must be present in 'version_pattern'!"
self._version_pattern = version_pattern
self._version_default = self._version_pattern.format(version=default)
if used_libs is None:
used_libs = []
assert all(lib in ALLOWED_LIBS for lib in used_libs), (
f"You can only use supported libraries! You can use one of these: "
f"{', '.join(f'{k} ({ALLOWED_LIBS[k]})' for k in ALLOWED_LIBS)}"
)
if len(used_libs):
if len(used_libs) == 1:
lib = used_libs[0]
warn(RuntimeWarning(f"Used Library {lib} must be used everywhere!"))
else:
libs = ", ".join(used_libs[:-1]) + f" and {used_libs[-1]}"
warn(RuntimeWarning(f"Used Libraries {libs} must be used everywhere!"))
self._used_libs = used_libs
@Request.application
def _application(self, request):
"""
Parameters
----------
request: Request
"""
path = request.path[1:]
version = self._version_default
p = path.split("/")
if p:
for v in self._versions:
if v == p[0]:
version = v
# to get rid of the version in path
path = path[len(v) + 1 :] # noqa: E203
break
del p
request = APIRequest(
method=request.method,
headers=dict(request.headers),
ip=request.remote_addr,
url=path,
version=version,
)
for check, status in self._checks_request_global.get(version):
if not check(request):
return Response(
status=status,
response=dumps({"message": APIResponse.DEFAULT_MESSAGES[status]}),
content_type="application/json",
)
if not path:
result = self._default_endpoint(request)
# format result from
# AlbertUnruhUtils.ratelimit.server.ServerRateLimit.__call__.decorator()
# Notes
# -----
# - decorator is in this case nested and not direct accessible
# - library: https://github.com/AlbertUnruh/AlbertUnruhUtils.py
if "AlbertUnruhUtils" in self._used_libs:
auu, result = result
if not auu[0]:
result = APIResponse(429)
result._response.update(auu[1]) # noqa
else:
path = path.split("/")
result = self._versions[version].find_node(
path=path, request=request
) # type: APIResponse
for check in self._checks_response_global.get(version):
check(result)
status = result.status_code
response = result.response
response.update(message=result.message)
response = dumps(response)
return Response(
status=status, response=response, content_type="application/json"
)
def add_version(self, version, *, fallback: list[callable] = None):
"""
Parameters
----------
version: int
fallback: list[callable]
"""
for fb in fallback or []:
self.add_version(version)(fb)
def decorator(clb):
"""
Parameters
----------
clb: callable
"""
self._current_version = self._version_pattern.format(version=version)
self._checks_request_global[
self._current_version
] = self._checks_request_global.get(self._current_version, [])
self._checks_response_global[
self._current_version
] = self._checks_response_global.get(self._current_version, [])
version_node = self._versions.get(
self._current_version, Node(*HTTP_METHODS, used_libs=self._used_libs)
) # type: Node
node = Node(*HTTP_METHODS, used_libs=self._used_libs)(clb)
node._children.update(version_node._children) # noqa
self._versions[self._current_version] = node
clb(self)
self._current_version = None
return clb
return decorator
def add(self, *methods, ignore_invalid_methods=False):
"""
Parameters
----------
methods: str
ignore_invalid_methods: bool
"""
def decorator(clb):
"""
Parameters
----------
clb: callable
The function/method which should be added as a node.
Returns
-------
Node
The new node.
"""
version = self._get_version()
node = Node(
*methods,
ignore_invalid_methods=ignore_invalid_methods,
used_libs=self._used_libs,
)
node(clb)
self._versions[version]._children[clb.__name__] = node # noqa
return node
return decorator
def add_global_request_check(self, default_return_value):
"""
If the check returns False the `default_return_value`
is returned and the request 'll not be processed.
Parameters
----------
default_return_value: int
"""
def decorator(clb):
"""
Parameters
----------
clb: callable
"""
version = self._get_version()
self._checks_request_global[version].append((clb, default_return_value))
return clb
return decorator
def add_global_response_check(self):
"""
Can be used to edit responses before sending them.
"""
def decorator(clb):
"""
Parameters
----------
clb: callable
"""
version = self._get_version()
self._checks_response_global[version].append(clb)
return clb
return decorator
def default_endpoint(
self,
clb: typing.Callable[[APIRequest], APIResponse],
) -> typing.Callable[[APIRequest], APIResponse]:
"""
Adds a default endpoint. 'll be displayed if no path is given.
Parameters
----------
clb: typing.Callable[[APIRequest], APIResponse]
The endpoint.
Returns
-------
typing.Callable[[APIRequest], APIResponse]
"""
self._default_endpoint = clb
return clb
@property
def host(self):
"""
Returns
-------
str
"""
return self._host
@property
def port(self):
"""
Returns
-------
int
"""
return self._port
def run_api(self, *, debug=False, reload=False, processes=1):
"""
Parameters
----------
debug, reload: bool
Whether it should debug/reload.
processes: int
The number of processes which can be used by the server.
"""
if self._versions and (default := self._version_default) is not None:
if default not in self._versions:
raise RuntimeError(
f"Can't have {default!r} as default version, because this version is not set!"
)
run_simple(
self.host,
self.port,
self._application,
use_reloader=reload,
use_debugger=debug,
processes=processes,
)
__call__ = run_api
def _get_version(self):
"""
Returns
-------
str
Raises
------
AssertionError
"""
assert (
version := self._current_version
) is not None, (
"You can only add an endpoint if you are in a version (API.add_version)"
)
return version
|
import logging
from collections import defaultdict
from enum import Enum
from typing import Dict, List
from requests import Response
from requests.exceptions import RequestException
from dagster import Failure, RetryRequested
from dagster.core.execution.context.compute import SolidExecutionContext
def fmt_rpc_logs(logs: List[Dict[str, str]]) -> Dict[int, str]:
d = defaultdict(list)
for log in logs:
levelname = log["levelname"]
d[getattr(logging, levelname)].append(
f"{log.get("timestamp")} - {levelname} - {log.get("message")}"
)
return {level: "\n".join(logs) for level, logs in d.items()}
def log_rpc(context: SolidExecutionContext, logs: List[Dict]) -> None:
if len(logs) > 0:
logs_fmt = fmt_rpc_logs(logs)
for level, logs_str in logs_fmt.items():
context.log.log(level=level, msg=logs_str)
class DBTErrors(Enum):
project_currently_compiling_error = 10010
runtime_error = 10001
server_error = -32000
project_compile_failure_error = 10011
rpc_process_killed_error = 10009
rpc_timeout_error = 10008
def raise_for_rpc_error(context: SolidExecutionContext, resp: Response) -> None:
error = resp.json().get("error")
if error is not None:
if error["code"] in [
DBTErrors.project_currently_compiling_error.value,
DBTErrors.runtime_error.value,
DBTErrors.server_error.value,
]:
context.log.warning(error["message"])
raise RetryRequested(max_retries=5, seconds_to_wait=30)
elif error["code"] == DBTErrors.project_compile_failure_error.value:
raise Failure(
description=error["message"],
metadata={
"RPC Error Code": str(error["code"]),
"RPC Error Cause": error["data"]["cause"]["message"],
},
)
elif error["code"] == DBTErrors.rpc_process_killed_error.value:
raise Failure(
description=error["message"],
metadata={
"RPC Error Code": str(error["code"]),
"RPC Signum": str(error["data"]["signum"]),
"RPC Error Message": error["data"]["message"],
},
)
elif error["code"] == DBTErrors.rpc_timeout_error.value:
raise Failure(
description=error["message"],
metadata={
"RPC Error Code": str(error["code"]),
"RPC Timeout": str(error["data"]["timeout"]),
"RPC Error Message": error["data"]["message"],
},
)
else:
raise Failure(
description=error["message"],
metadata={"RPC Error Code": str(error["code"])},
)
def is_fatal_code(e: RequestException) -> bool:
"""Helper function to determine if a Requests reponse status code
is a "fatal" status code. If it is, we will not request a solid retry."""
return 400 <= e.response.status_code < 500 and e.response.status_code != 429
| import logging
from collections import defaultdict
from enum import Enum
from typing import Dict, List
from requests import Response
from requests.exceptions import RequestException
from dagster import Failure, RetryRequested
from dagster.core.execution.context.compute import SolidExecutionContext
def fmt_rpc_logs(logs: List[Dict[str, str]]) -> Dict[int, str]:
d = defaultdict(list)
for log in logs:
levelname = log["levelname"]
d[getattr(logging, levelname)].append(
f"{log.get('timestamp')} - {levelname} - {log.get('message')}"
)
return {level: "\n".join(logs) for level, logs in d.items()}
def log_rpc(context: SolidExecutionContext, logs: List[Dict]) -> None:
if len(logs) > 0:
logs_fmt = fmt_rpc_logs(logs)
for level, logs_str in logs_fmt.items():
context.log.log(level=level, msg=logs_str)
class DBTErrors(Enum):
project_currently_compiling_error = 10010
runtime_error = 10001
server_error = -32000
project_compile_failure_error = 10011
rpc_process_killed_error = 10009
rpc_timeout_error = 10008
def raise_for_rpc_error(context: SolidExecutionContext, resp: Response) -> None:
error = resp.json().get("error")
if error is not None:
if error["code"] in [
DBTErrors.project_currently_compiling_error.value,
DBTErrors.runtime_error.value,
DBTErrors.server_error.value,
]:
context.log.warning(error["message"])
raise RetryRequested(max_retries=5, seconds_to_wait=30)
elif error["code"] == DBTErrors.project_compile_failure_error.value:
raise Failure(
description=error["message"],
metadata={
"RPC Error Code": str(error["code"]),
"RPC Error Cause": error["data"]["cause"]["message"],
},
)
elif error["code"] == DBTErrors.rpc_process_killed_error.value:
raise Failure(
description=error["message"],
metadata={
"RPC Error Code": str(error["code"]),
"RPC Signum": str(error["data"]["signum"]),
"RPC Error Message": error["data"]["message"],
},
)
elif error["code"] == DBTErrors.rpc_timeout_error.value:
raise Failure(
description=error["message"],
metadata={
"RPC Error Code": str(error["code"]),
"RPC Timeout": str(error["data"]["timeout"]),
"RPC Error Message": error["data"]["message"],
},
)
else:
raise Failure(
description=error["message"],
metadata={"RPC Error Code": str(error["code"])},
)
def is_fatal_code(e: RequestException) -> bool:
"""Helper function to determine if a Requests reponse status code
is a "fatal" status code. If it is, we will not request a solid retry."""
return 400 <= e.response.status_code < 500 and e.response.status_code != 429
|
import copy
import os
import re
from textwrap import dedent
from typing import Any, Dict, List, Optional, Set, Tuple, cast
from unittest import mock
import orjson
from django.conf import settings
from django.test import override_settings
from markdown import Markdown
from zerver.lib.actions import (
change_user_is_active,
do_add_alert_words,
do_change_user_setting,
do_create_realm,
do_remove_realm_emoji,
do_set_realm_property,
)
from zerver.lib.alert_words import get_alert_word_automaton
from zerver.lib.camo import get_camo_url
from zerver.lib.create_user import create_user
from zerver.lib.emoji import get_emoji_url
from zerver.lib.exceptions import JsonableError, MarkdownRenderingException
from zerver.lib.markdown import (
MarkdownListPreprocessor,
MessageRenderingResult,
clear_state_for_testing,
content_has_emoji_syntax,
fetch_tweet_data,
get_tweet_id,
image_preview_enabled,
markdown_convert,
maybe_update_markdown_engines,
possible_linked_stream_names,
topic_links,
url_embed_preview_enabled,
url_to_a,
)
from zerver.lib.markdown.fenced_code import FencedBlockPreprocessor
from zerver.lib.mdiff import diff_strings
from zerver.lib.mention import (
MentionData,
get_possible_mentions_info,
possible_mentions,
possible_user_group_mentions,
)
from zerver.lib.message import render_markdown
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.tex import render_tex
from zerver.lib.user_groups import create_user_group
from zerver.models import (
Message,
RealmEmoji,
RealmFilter,
Stream,
UserGroup,
UserMessage,
UserProfile,
flush_linkifiers,
flush_per_request_caches,
get_client,
get_realm,
get_stream,
linkifiers_for_realm,
realm_in_local_linkifiers_cache,
)
class SimulatedFencedBlockPreprocessor(FencedBlockPreprocessor):
# Simulate code formatting.
def format_code(self, lang: Optional[str], code: str) -> str:
return (lang or "") + ":" + code
def placeholder(self, s: str) -> str:
return "**" + s.strip("\n") + "**"
class FencedBlockPreprocessorTest(ZulipTestCase):
def test_simple_quoting(self) -> None:
processor = FencedBlockPreprocessor(Markdown())
markdown_input = [
"~~~ quote",
"hi",
"bye",
"",
"",
]
expected = [
"",
"> hi",
"> bye",
"> ",
"> ",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def test_serial_quoting(self) -> None:
processor = FencedBlockPreprocessor(Markdown())
markdown_input = [
"~~~ quote",
"hi",
"~~~",
"",
"~~~ quote",
"bye",
"",
"",
]
expected = [
"",
"> hi",
"",
"",
"",
"> bye",
"> ",
"> ",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def test_serial_code(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
markdown_input = [
"``` .py",
"hello()",
"```",
"",
"```vb.net",
"goodbye()",
"```",
"",
"```c#",
"weirdchar()",
"```",
"",
"```",
"no-highlight()",
"```",
"",
]
expected = [
"",
"**py:hello()**",
"",
"",
"",
"**vb.net:goodbye()**",
"",
"",
"",
"**c#:weirdchar()**",
"",
"",
"",
"**:no-highlight()**",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def test_nested_code(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
markdown_input = [
"~~~ quote",
"hi",
"``` .py",
"hello()",
"```",
"",
"",
]
expected = [
"",
"> hi",
"> ",
"> **py:hello()**",
"> ",
"> ",
"> ",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def markdown_convert_wrapper(content: str) -> str:
return markdown_convert(
content=content,
message_realm=get_realm("zulip"),
).rendered_content
class MarkdownMiscTest(ZulipTestCase):
def test_diffs_work_as_expected(self) -> None:
str1 = "<p>The quick brown fox jumps over the lazy dog. Animal stories are fun, yeah</p>"
str2 = "<p>The fast fox jumps over the lazy dogs and cats. Animal stories are fun</p>"
expected_diff = "\u001b[34m-\u001b[0m <p>The \u001b[33mquick brown\u001b[0m fox jumps over the lazy dog. Animal stories are fun\u001b[31m, yeah\u001b[0m</p>\n\u001b[34m+\u001b[0m <p>The \u001b[33mfast\u001b[0m fox jumps over the lazy dog\u001b[32ms and cats\u001b[0m. Animal stories are fun</p>\n"
self.assertEqual(diff_strings(str1, str2), expected_diff)
def test_get_possible_mentions_info(self) -> None:
realm = get_realm("zulip")
def make_user(email: str, full_name: str) -> UserProfile:
return create_user(
email=email,
password="whatever",
realm=realm,
full_name=full_name,
)
fred1 = make_user("fred1@example.com", "Fred Flintstone")
change_user_is_active(fred1, False)
fred2 = make_user("fred2@example.com", "Fred Flintstone")
fred3 = make_user("fred3@example.com", "Fred Flintstone")
change_user_is_active(fred3, False)
fred4 = make_user("fred4@example.com", "Fred Flintstone")
lst = get_possible_mentions_info(
realm.id, {"Fred Flintstone", "Cordelia, LEAR's daughter", "Not A User"}
)
set_of_names = set(map(lambda x: x["full_name"].lower(), lst))
self.assertEqual(set_of_names, {"fred flintstone", "cordelia, lear's daughter"})
by_id = {row["id"]: row for row in lst}
self.assertEqual(
by_id.get(fred2.id),
dict(
email=fred2.email,
full_name="Fred Flintstone",
id=fred2.id,
),
)
self.assertEqual(
by_id.get(fred4.id),
dict(
email=fred4.email,
full_name="Fred Flintstone",
id=fred4.id,
),
)
def test_mention_data(self) -> None:
realm = get_realm("zulip")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
content = "@**King Hamlet** @**Cordelia, lear's daughter**"
mention_data = MentionData(realm.id, content)
self.assertEqual(mention_data.get_user_ids(), {hamlet.id, cordelia.id})
self.assertEqual(
mention_data.get_user_by_id(hamlet.id),
dict(
email=hamlet.email,
full_name=hamlet.full_name,
id=hamlet.id,
),
)
user = mention_data.get_user_by_name("king hamLET")
assert user is not None
self.assertEqual(user["email"], hamlet.email)
self.assertFalse(mention_data.message_has_wildcards())
content = "@**King Hamlet** @**Cordelia, lear's daughter** @**all**"
mention_data = MentionData(realm.id, content)
self.assertTrue(mention_data.message_has_wildcards())
def test_invalid_katex_path(self) -> None:
with self.settings(DEPLOY_ROOT="/nonexistent"):
with self.assertLogs(level="ERROR") as m:
render_tex("random text")
self.assertEqual(m.output, ["ERROR:root:Cannot find KaTeX for latex rendering!"])
class MarkdownListPreprocessorTest(ZulipTestCase):
# We test that the preprocessor inserts blank lines at correct places.
# We use <> to indicate that we need to insert a blank line here.
def split_message(self, msg: str) -> Tuple[List[str], List[str]]:
original = msg.replace("<>", "").split("\n")
expected = re.split(r"\n|<>", msg)
return original, expected
def test_basic_list(self) -> None:
preprocessor = MarkdownListPreprocessor()
original, expected = self.split_message("List without a gap\n<>* One\n* Two")
self.assertEqual(preprocessor.run(original), expected)
def test_list_after_quotes(self) -> None:
preprocessor = MarkdownListPreprocessor()
original, expected = self.split_message(
"```quote\nSomething\n```\n\nList without a gap\n<>* One\n* Two"
)
self.assertEqual(preprocessor.run(original), expected)
def test_list_in_code(self) -> None:
preprocessor = MarkdownListPreprocessor()
original, expected = self.split_message("```\nList without a gap\n* One\n* Two\n```")
self.assertEqual(preprocessor.run(original), expected)
def test_complex_nesting_with_different_fences(self) -> None:
preprocessor = MarkdownListPreprocessor()
msg = """```quote
In quote. We should convert a list here:<>
* one
* two
~~~
This is a nested code fence, do not make changes here:
* one
* two
````quote
Quote in code fence. Should not convert:
* one
* two
````
~~~
Back in the quote. We should convert:<>
* one
* two
```
Outside. Should convert:<>
* one
* two
"""
original, expected = self.split_message(msg)
self.assertEqual(preprocessor.run(original), expected)
def test_complex_nesting_with_same_fence(self) -> None:
preprocessor = MarkdownListPreprocessor()
msg = """```quote
In quote. We should convert a list here:<>
* one
* two
```python
This is a nested code fence, do not make changes here:
* one
* two
```quote
Quote in code fence. Should not convert:
* one
* two
```
```
Back in the quote. We should convert:<>
* one
* two
```
Outside. Should convert:<>
* one
* two
"""
original, expected = self.split_message(msg)
self.assertEqual(preprocessor.run(original), expected)
class MarkdownTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
clear_state_for_testing()
def assertEqual(self, first: Any, second: Any, msg: str = "") -> None:
if isinstance(first, str) and isinstance(second, str):
if first != second:
raise AssertionError(
"Actual and expected outputs do not match; showing diff.\n"
+ diff_strings(first, second)
+ msg
)
else:
super().assertEqual(first, second)
def load_markdown_tests(self) -> Tuple[Dict[str, Any], List[List[str]]]:
test_fixtures = {}
with open(
os.path.join(os.path.dirname(__file__), "fixtures/markdown_test_cases.json"), "rb"
) as f:
data = orjson.loads(f.read())
for test in data["regular_tests"]:
test_fixtures[test["name"]] = test
return test_fixtures, data["linkify_tests"]
def test_markdown_no_ignores(self) -> None:
# We do not want any ignored tests to be committed and merged.
format_tests, linkify_tests = self.load_markdown_tests()
for name, test in format_tests.items():
message = f'Test "{name}" shouldn\'t be ignored.'
is_ignored = test.get("ignore", False)
self.assertFalse(is_ignored, message)
def test_markdown_fixtures(self) -> None:
format_tests, linkify_tests = self.load_markdown_tests()
valid_keys = {
"name",
"input",
"expected_output",
"backend_only_rendering",
"marked_expected_output",
"text_content",
"translate_emoticons",
"ignore",
}
for name, test in format_tests.items():
with self.subTest(markdown_test_case=name):
# Check that there aren't any unexpected keys as those are often typos
self.assert_length(set(test.keys()) - valid_keys, 0)
# Ignore tests if specified
if test.get("ignore", False):
continue # nocoverage
if test.get("translate_emoticons", False):
# Create a userprofile and send message with it.
user_profile = self.example_user("othello")
do_change_user_setting(user_profile, "translate_emoticons", True)
msg = Message(sender=user_profile, sending_client=get_client("test"))
rendering_result = render_markdown(msg, test["input"])
converted = rendering_result.rendered_content
else:
converted = markdown_convert_wrapper(test["input"])
self.assertEqual(converted, test["expected_output"])
def replaced(payload: str, url: str, phrase: str = "") -> str:
if url[:4] == "http":
href = url
elif "@" in url:
href = "mailto:" + url
else:
href = "http://" + url
return payload % (f'<a href="{href}">{url}</a>',)
with mock.patch(
"zerver.lib.url_preview.preview.link_embed_data_from_cache", return_value=None
):
for inline_url, reference, url in linkify_tests:
try:
match = replaced(reference, url, phrase=inline_url)
except TypeError:
match = reference
converted = markdown_convert_wrapper(inline_url)
self.assertEqual(match, converted)
def test_inline_file(self) -> None:
msg = "Check out this file file:///Volumes/myserver/Users/Shared/pi.py"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Check out this file <a href="file:///Volumes/myserver/Users/Shared/pi.py">file:///Volumes/myserver/Users/Shared/pi.py</a></p>',
)
clear_state_for_testing()
with self.settings(ENABLE_FILE_LINKS=False):
realm = do_create_realm(string_id="file_links_test", name="file_links_test")
maybe_update_markdown_engines(realm.id, False)
self.assertEqual(
markdown_convert(msg, message_realm=realm).rendered_content,
"<p>Check out this file file:///Volumes/myserver/Users/Shared/pi.py</p>",
)
def test_inline_bitcoin(self) -> None:
msg = "To bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa or not to bitcoin"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>To <a href="bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa">bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa</a> or not to bitcoin</p>',
)
def test_inline_youtube(self) -> None:
msg = "Check out the debate: http://www.youtube.com/watch?v=hx1mjT73xYE"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p>Check out the debate: <a href="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="{get_camo_url("https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg")}"></a></div>""",
)
msg = "http://www.youtube.com/watch?v=hx1mjT73xYE"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="{get_camo_url("https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg")}"></a></div>""",
)
msg = "https://youtu.be/hx1mjT73xYE"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://youtu.be/hx1mjT73xYE">https://youtu.be/hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="https://youtu.be/hx1mjT73xYE"><img src="{get_camo_url("https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg")}"></a></div>""",
)
msg = "https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"
not_converted = markdown_convert_wrapper(msg)
self.assertEqual(
not_converted,
'<p><a href="https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo">https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo</a></p>',
)
msg = (
"https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo">https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo</a></p>\n<div class="youtube-video message_inline_image"><a data-id="O5nskjZ_GoI" href="https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"><img src="{get_camo_url("https://i.ytimg.com/vi/O5nskjZ_GoI/default.jpg")}"></a></div>""",
)
msg = "http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw">http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw</a></p>\n<div class="youtube-video message_inline_image"><a data-id="nOJgD4fcZhI" href="http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw"><img src="{get_camo_url("https://i.ytimg.com/vi/nOJgD4fcZhI/default.jpg")}"></a></div>""",
)
@override_settings(INLINE_URL_EMBED_PREVIEW=False)
def test_inline_vimeo(self) -> None:
msg = "Check out the debate: https://vimeo.com/246979354"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Check out the debate: <a href="https://vimeo.com/246979354">https://vimeo.com/246979354</a></p>',
)
msg = "https://vimeo.com/246979354"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><a href="https://vimeo.com/246979354">https://vimeo.com/246979354</a></p>',
)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_thumbnail_url(self) -> None:
realm = get_realm("zephyr")
msg = "[foobar](/user_uploads/{realm_id}/50/w2G6ok9kr8AMCQCTNAUOFMln/IMG_0677.JPG)"
msg = msg.format(realm_id=realm.id)
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F50%2Fw2G6ok9kr8AMCQCTNAUOFMln%2FIMG_0677.JPG&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F50%2Fw2G6ok9kr8AMCQCTNAUOFMln%2FIMG_0677.JPG&size=thumbnail"><'
thumbnail_img = thumbnail_img.format(realm_id=realm.id)
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "https://www.google.com/images/srpr/logo4w.png"
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail">'
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "www.google.com/images/srpr/logo4w.png"
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=http%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail">'
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "https://www.google.com/images/srpr/logo4w.png"
thumbnail_img = f"""<div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png"><img src="{get_camo_url("https://www.google.com/images/srpr/logo4w.png")}"></a></div>"""
with self.settings(THUMBNAIL_IMAGES=False):
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
# Any URL which is not an external link and doesn't start with
# /user_uploads/ is not thumbnailed
msg = "[foobar](/static/images/cute/turtle.png)"
thumbnail_img = '<div class="message_inline_image"><a href="/static/images/cute/turtle.png" title="foobar"><img src="/static/images/cute/turtle.png"></a></div>'
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "[foobar](/user_avatars/{realm_id}/emoji/images/50.png)"
msg = msg.format(realm_id=realm.id)
thumbnail_img = '<div class="message_inline_image"><a href="/user_avatars/{realm_id}/emoji/images/50.png" title="foobar"><img src="/user_avatars/{realm_id}/emoji/images/50.png"></a></div>'
thumbnail_img = thumbnail_img.format(realm_id=realm.id)
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_preview(self) -> None:
with_preview = '<div class="message_inline_image"><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=full" src="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=thumbnail"></a></div>'
without_preview = '<p><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></p>'
content = "http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, with_preview)
realm = msg.get_realm()
setattr(realm, "inline_image_preview", False)
realm.save()
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, without_preview)
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_external_image_preview_use_camo(self) -> None:
content = "https://example.com/thing.jpeg"
thumbnail_img = f"""<div class="message_inline_image"><a href="{content}"><img src="{get_camo_url(content)}"></a></div>"""
converted = markdown_convert_wrapper(content)
self.assertIn(converted, thumbnail_img)
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_static_image_preview_skip_camo(self) -> None:
content = f"{ settings.STATIC_URL }/thing.jpeg"
thumbnail_img = f"""<div class="message_inline_image"><a href="{content}"><img src="{content}"></a></div>"""
converted = markdown_convert_wrapper(content)
self.assertIn(converted, thumbnail_img)
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_realm_image_preview_skip_camo(self) -> None:
content = f"https://zulip.{ settings.EXTERNAL_HOST }/thing.jpeg"
converted = markdown_convert_wrapper(content)
self.assertNotIn(converted, get_camo_url(content))
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_cross_realm_image_preview_use_camo(self) -> None:
content = f"https://otherrealm.{ settings.EXTERNAL_HOST }/thing.jpeg"
thumbnail_img = f"""<div class="message_inline_image"><a href="{ content }"><img src="{ get_camo_url(content) }"></a></div>"""
converted = markdown_convert_wrapper(content)
self.assertIn(converted, thumbnail_img)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_quoted_blocks(self) -> None:
content = "http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"
expected = '<div class="message_inline_image"><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=full" src="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = ">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg\n\nAwesome!"
expected = '<blockquote>\n<p><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></p>\n</blockquote>\n<p>Awesome!</p>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = ">* http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg\n\nAwesome!"
expected = '<blockquote>\n<ul>\n<li><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></li>\n</ul>\n</blockquote>\n<p>Awesome!</p>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_preview_order(self) -> None:
realm = get_realm("zulip")
content = "http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg\nhttp://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg\nhttp://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"
expected = '<p><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg</a><br>\n<a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg</a><br>\n<a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg</a></p>\n<div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=thumbnail"></a></div><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_02.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_02.jpg&size=thumbnail"></a></div><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = "http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg\n\n>http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg\n\n* http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg\n* https://www.google.com/images/srpr/logo4w.png"
expected = '<div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=thumbnail"></a></div><blockquote>\n<p><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg</a></p>\n</blockquote>\n<ul>\n<li><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=thumbnail"></a></div></li>\n<li><div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail"></a></div></li>\n</ul>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = "Test 1\n[21136101110_1dde1c1a7e_o.jpg](/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg) \n\nNext image\n[IMG_20161116_023910.jpg](/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg) \n\nAnother screenshot\n[Screenshot-from-2016-06-01-16-22-42.png](/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png)"
content = content.format(realm_id=realm.id)
expected = '<p>Test 1<br>\n<a href="/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg">21136101110_1dde1c1a7e_o.jpg</a> </p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg" title="21136101110_1dde1c1a7e_o.jpg"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F6d%2FF1PX6u16JA2P-nK45PyxHIYZ%2F21136101110_1dde1c1a7e_o.jpg&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F6d%2FF1PX6u16JA2P-nK45PyxHIYZ%2F21136101110_1dde1c1a7e_o.jpg&size=thumbnail"></a></div><p>Next image<br>\n<a href="/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg">IMG_20161116_023910.jpg</a> </p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg" title="IMG_20161116_023910.jpg"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F69%2Fsh7L06e7uH7NaX6d5WFfVYQp%2FIMG_20161116_023910.jpg&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F69%2Fsh7L06e7uH7NaX6d5WFfVYQp%2FIMG_20161116_023910.jpg&size=thumbnail"></a></div><p>Another screenshot<br>\n<a href="/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png">Screenshot-from-2016-06-01-16-22-42.png</a></p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png" title="Screenshot-from-2016-06-01-16-22-42.png"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F70%2F_aZmIEWaN1iUaxwkDjkO7bpj%2FScreenshot-from-2016-06-01-16-22-42.png&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F70%2F_aZmIEWaN1iUaxwkDjkO7bpj%2FScreenshot-from-2016-06-01-16-22-42.png&size=thumbnail"></a></div>'
expected = expected.format(realm_id=realm.id)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_corrected_image_source(self) -> None:
# testing only Wikipedia because linx.li URLs can be expected to expire
content = "https://en.wikipedia.org/wiki/File:Wright_of_Derby,_The_Orrery.jpg"
expected = '<div class="message_inline_image"><a href="https://en.wikipedia.org/wiki/Special:FilePath/File:Wright_of_Derby,_The_Orrery.jpg"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FSpecial%3AFilePath%2FFile%3AWright_of_Derby%2C_The_Orrery.jpg&size=full" src="/thumbnail?url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FSpecial%3AFilePath%2FFile%3AWright_of_Derby%2C_The_Orrery.jpg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
@override_settings(INLINE_IMAGE_PREVIEW=False)
def test_image_preview_enabled(self) -> None:
ret = image_preview_enabled()
self.assertFalse(ret)
settings.INLINE_IMAGE_PREVIEW = True
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm = message.get_realm()
ret = image_preview_enabled()
self.assertTrue(ret)
ret = image_preview_enabled(no_previews=True)
self.assertFalse(ret)
ret = image_preview_enabled(message, realm)
self.assertTrue(ret)
ret = image_preview_enabled(message)
self.assertTrue(ret)
ret = image_preview_enabled(message, realm, no_previews=True)
self.assertFalse(ret)
ret = image_preview_enabled(message, no_previews=True)
self.assertFalse(ret)
@override_settings(INLINE_URL_EMBED_PREVIEW=False)
def test_url_embed_preview_enabled(self) -> None:
sender_user_profile = self.example_user("othello")
message = copy.deepcopy(
Message(sender=sender_user_profile, sending_client=get_client("test"))
)
realm = message.get_realm()
realm.inline_url_embed_preview = True # off by default
realm.save(update_fields=["inline_url_embed_preview"])
ret = url_embed_preview_enabled()
self.assertFalse(ret)
settings.INLINE_URL_EMBED_PREVIEW = True
ret = url_embed_preview_enabled()
self.assertTrue(ret)
ret = image_preview_enabled(no_previews=True)
self.assertFalse(ret)
ret = url_embed_preview_enabled(message, realm)
self.assertTrue(ret)
ret = url_embed_preview_enabled(message)
self.assertTrue(ret)
ret = url_embed_preview_enabled(message, no_previews=True)
self.assertFalse(ret)
def test_inline_dropbox(self) -> None:
msg = "Look at how hilarious our old office was: https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG"
image_info = {
"image": "https://photos-4.dropbox.com/t/2/AABIre1oReJgPYuc_53iv0IHq1vUzRaDg2rrCfTpiWMccQ/12/129/jpeg/1024x1024/2/_/0/4/IMG_0923.JPG/CIEBIAEgAiAHKAIoBw/ymdijjcg67hv2ta/AABz2uuED1ox3vpWWvMpBxu6a/IMG_0923.JPG",
"desc": "Shared with Dropbox",
"title": "IMG_0923.JPG",
}
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=image_info):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p>Look at how hilarious our old office was: <a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG">https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG" title="IMG_0923.JPG"><img src="{get_camo_url("https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG?raw=1")}"></a></div>""",
)
msg = "Look at my hilarious drawing folder: https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl="
image_info = {
"image": "https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png",
"desc": "Shared with Dropbox",
"title": "Saves",
}
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=image_info):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p>Look at my hilarious drawing folder: <a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=">https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=</a></p>\n<div class="message_inline_ref"><a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=" title="Saves"><img src="{get_camo_url("https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png")}"></a><div><div class="message_inline_image_title">Saves</div><desc class="message_inline_image_desc"></desc></div></div>""",
)
def test_inline_dropbox_preview(self) -> None:
# Test photo album previews
msg = "https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5"
image_info = {
"image": "https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0",
"desc": "Shared with Dropbox",
"title": "1 photo",
}
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=image_info):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5">https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5" title="1 photo"><img src="{get_camo_url("https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0")}"></a></div>""",
)
def test_inline_dropbox_negative(self) -> None:
# Make sure we're not overzealous in our conversion:
msg = "Look at the new dropbox logo: https://www.dropbox.com/static/images/home_logo.png"
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=None):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Look at the new dropbox logo: <a href="https://www.dropbox.com/static/images/home_logo.png">https://www.dropbox.com/static/images/home_logo.png</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/static/images/home_logo.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.dropbox.com%2Fstatic%2Fimages%2Fhome_logo.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.dropbox.com%2Fstatic%2Fimages%2Fhome_logo.png&size=thumbnail"></a></div>',
)
def test_inline_dropbox_bad(self) -> None:
# Don't fail on bad dropbox links
msg = "https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM"
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=None):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><a href="https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM">https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM</a></p>',
)
def test_inline_github_preview(self) -> None:
# Test photo album previews
msg = "Test: https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Test: <a href="https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png">https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png</a></p>\n<div class="message_inline_image"><a href="https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fraw.githubusercontent.com%2Fzulip%2Fzulip%2Fmain%2Fstatic%2Fimages%2Flogo%2Fzulip-icon-128x128.png&size=full" src="/thumbnail?url=https%3A%2F%2Fraw.githubusercontent.com%2Fzulip%2Fzulip%2Fmain%2Fstatic%2Fimages%2Flogo%2Fzulip-icon-128x128.png&size=thumbnail"></a></div>',
)
msg = "Test: https://developer.github.com/assets/images/hero-circuit-bg.png"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Test: <a href="https://developer.github.com/assets/images/hero-circuit-bg.png">https://developer.github.com/assets/images/hero-circuit-bg.png</a></p>\n<div class="message_inline_image"><a href="https://developer.github.com/assets/images/hero-circuit-bg.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fdeveloper.github.com%2Fassets%2Fimages%2Fhero-circuit-bg.png&size=full" src="/thumbnail?url=https%3A%2F%2Fdeveloper.github.com%2Fassets%2Fimages%2Fhero-circuit-bg.png&size=thumbnail"></a></div>',
)
def test_inline_youtube_preview(self) -> None:
# Test YouTube URLs in spoilers
msg = """\n```spoiler Check out this PyCon video\nhttps://www.youtube.com/watch?v=0c46YHS3RY8\n```"""
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<div class="spoiler-block"><div class="spoiler-header">\n<p>Check out this PyCon video</p>\n</div><div class="spoiler-content" aria-hidden="true">\n<p><a href="https://www.youtube.com/watch?v=0c46YHS3RY8">https://www.youtube.com/watch?v=0c46YHS3RY8</a></p>\n<div class="youtube-video message_inline_image"><a data-id="0c46YHS3RY8" href="https://www.youtube.com/watch?v=0c46YHS3RY8"><img src="{get_camo_url("https://i.ytimg.com/vi/0c46YHS3RY8/default.jpg")}"></a></div></div></div>""",
)
# Test YouTube URLs in normal messages.
msg = "[YouTube link](https://www.youtube.com/watch?v=0c46YHS3RY8)"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.youtube.com/watch?v=0c46YHS3RY8">YouTube link</a></p>\n<div class="youtube-video message_inline_image"><a data-id="0c46YHS3RY8" href="https://www.youtube.com/watch?v=0c46YHS3RY8"><img src="{get_camo_url("https://i.ytimg.com/vi/0c46YHS3RY8/default.jpg")}"></a></div>""",
)
msg = "https://www.youtube.com/watch?v=0c46YHS3RY8\n\nSample text\n\nhttps://www.youtube.com/watch?v=lXFO2ULktEI"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.youtube.com/watch?v=0c46YHS3RY8">https://www.youtube.com/watch?v=0c46YHS3RY8</a></p>\n<div class="youtube-video message_inline_image"><a data-id="0c46YHS3RY8" href="https://www.youtube.com/watch?v=0c46YHS3RY8"><img src="{get_camo_url("https://i.ytimg.com/vi/0c46YHS3RY8/default.jpg")}"></a></div><p>Sample text</p>\n<p><a href="https://www.youtube.com/watch?v=lXFO2ULktEI">https://www.youtube.com/watch?v=lXFO2ULktEI</a></p>\n<div class="youtube-video message_inline_image"><a data-id="lXFO2ULktEI" href="https://www.youtube.com/watch?v=lXFO2ULktEI"><img src="{get_camo_url("https://i.ytimg.com/vi/lXFO2ULktEI/default.jpg")}"></a></div>""",
)
def test_twitter_id_extraction(self) -> None:
self.assertEqual(
get_tweet_id("http://twitter.com/#!/VizzQuotes/status/409030735191097344"),
"409030735191097344",
)
self.assertEqual(
get_tweet_id("http://twitter.com/VizzQuotes/status/409030735191097344"),
"409030735191097344",
)
self.assertEqual(
get_tweet_id("http://twitter.com/VizzQuotes/statuses/409030735191097344"),
"409030735191097344",
)
self.assertEqual(get_tweet_id("https://twitter.com/wdaher/status/1017581858"), "1017581858")
self.assertEqual(
get_tweet_id("https://twitter.com/wdaher/status/1017581858/"), "1017581858"
)
self.assertEqual(
get_tweet_id("https://twitter.com/windyoona/status/410766290349879296/photo/1"),
"410766290349879296",
)
self.assertEqual(
get_tweet_id("https://twitter.com/windyoona/status/410766290349879296/"),
"410766290349879296",
)
def test_inline_interesting_links(self) -> None:
def make_link(url: str) -> str:
return f'<a href="{url}">{url}</a>'
normal_tweet_html = (
'<a href="https://twitter.com/Twitter"'
">@Twitter</a> "
"meets @seepicturely at #tcdisrupt cc."
'<a href="https://twitter.com/boscomonkey"'
">@boscomonkey</a> "
'<a href="https://twitter.com/episod"'
">@episod</a> "
'<a href="http://t.co/6J2EgYM"'
">http://instagr.am/p/MuW67/</a>"
)
mention_in_link_tweet_html = """<a href="http://t.co/@foo">http://foo.com</a>"""
media_tweet_html = (
'<a href="http://t.co/xo7pAhK6n3">'
"http://twitter.com/NEVNBoston/status/421654515616849920/photo/1</a>"
)
emoji_in_tweet_html = """Zulip is <span aria-label=\"100\" class="emoji emoji-1f4af" role=\"img\" title="100">:100:</span>% open-source!"""
def make_inline_twitter_preview(url: str, tweet_html: str, image_html: str = "") -> str:
## As of right now, all previews are mocked to be the exact same tweet
return (
'<div class="inline-preview-twitter">'
'<div class="twitter-tweet">'
f'<a href="{url}">'
'<img class="twitter-avatar"'
' src="https://external-content.zulipcdn.net/external_content/1f7cd2436976d410eab8189ebceda87ae0b34ead/687474703a2f2f7062732e7477696d672e63'
"6f6d2f70726f66696c655f696d616765732f313338303931323137332f53637265656e5f73686f745f323031312d30362d30335f61745f372e33352e33"
'365f504d5f6e6f726d616c2e706e67">'
"</a>"
f"<p>{tweet_html}</p>"
"<span>- Eoin McMillan (@imeoin)</span>"
f"{image_html}"
"</div>"
"</div>"
)
msg = "http://www.twitter.com"
converted = markdown_convert_wrapper(msg)
self.assertEqual(converted, "<p>{}</p>".format(make_link("http://www.twitter.com")))
msg = "http://www.twitter.com/wdaher/"
converted = markdown_convert_wrapper(msg)
self.assertEqual(converted, "<p>{}</p>".format(make_link("http://www.twitter.com/wdaher/")))
msg = "http://www.twitter.com/wdaher/status/3"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted, "<p>{}</p>".format(make_link("http://www.twitter.com/wdaher/status/3"))
)
# id too long
msg = "http://www.twitter.com/wdaher/status/2879779692873154569"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>".format(
make_link("http://www.twitter.com/wdaher/status/2879779692873154569")
),
)
# id too large (i.e. tweet doesn't exist)
msg = "http://www.twitter.com/wdaher/status/999999999999999999"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>".format(
make_link("http://www.twitter.com/wdaher/status/999999999999999999")
),
)
msg = "http://www.twitter.com/wdaher/status/287977969287315456"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://www.twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://www.twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
msg = "https://www.twitter.com/wdaher/status/287977969287315456"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("https://www.twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"https://www.twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
msg = "http://twitter.com/wdaher/status/287977969287315456"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
# Repeated links will only be converted once
msg = (
"http://twitter.com/wdaher/status/287977969287315456 "
"http://twitter.com/wdaher/status/287977969287315457 "
"http://twitter.com/wdaher/status/287977969287315457 "
"http://twitter.com/wdaher/status/287977969287315457"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{} {} {} {}</p>\n{}{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315457", normal_tweet_html
),
),
)
# A max of 3 will be converted
msg = (
"http://twitter.com/wdaher/status/287977969287315456 "
"http://twitter.com/wdaher/status/287977969287315457 "
"https://twitter.com/wdaher/status/287977969287315456 "
"http://twitter.com/wdaher/status/287977969287315460"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{} {} {} {}</p>\n{}{}{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_link("https://twitter.com/wdaher/status/287977969287315456"),
make_link("http://twitter.com/wdaher/status/287977969287315460"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315457", normal_tweet_html
),
make_inline_twitter_preview(
"https://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
# Test smart in-place inlining behavior:
msg = (
"Paragraph 1: http://twitter.com/wdaher/status/287977969287315456\n\n"
"Paragraph 2\n\n"
"Paragraph 3: http://twitter.com/wdaher/status/287977969287315457"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>Paragraph 1: {}</p>\n{}<p>Paragraph 2</p>\n<p>Paragraph 3: {}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315457", normal_tweet_html
),
),
)
# Tweet has a mention in a URL, only the URL is linked
msg = "http://twitter.com/wdaher/status/287977969287315458"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315458"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315458",
mention_in_link_tweet_html,
),
),
)
# Tweet with an image
msg = "http://twitter.com/wdaher/status/287977969287315459"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315459"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315459",
media_tweet_html,
(
'<div class="twitter-image">'
'<a href="http://t.co/xo7pAhK6n3">'
f"""<img src="{get_camo_url("https://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg:small")}">"""
"</a>"
"</div>"
),
),
),
)
msg = "http://twitter.com/wdaher/status/287977969287315460"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315460"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315460", emoji_in_tweet_html
),
),
)
# Test Twitter previews in spoiler tags.
msg = "```spoiler secret tweet\nTweet: http://twitter.com/wdaher/status/287977969287315456\n```"
converted = markdown_convert_wrapper(msg)
rendered_spoiler = '<div class="spoiler-block"><div class="spoiler-header">\n<p>secret tweet</p>\n</div><div class="spoiler-content" aria-hidden="true">\n<p>Tweet: {}</p>\n{}</div></div>'
self.assertEqual(
converted,
rendered_spoiler.format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
def test_fetch_tweet_data_settings_validation(self) -> None:
with self.settings(TEST_SUITE=False, TWITTER_CONSUMER_KEY=None):
self.assertIs(None, fetch_tweet_data("287977969287315459"))
def test_content_has_emoji(self) -> None:
self.assertFalse(content_has_emoji_syntax("boring"))
self.assertFalse(content_has_emoji_syntax("hello: world"))
self.assertFalse(content_has_emoji_syntax(":foobar"))
self.assertFalse(content_has_emoji_syntax("::: hello :::"))
self.assertTrue(content_has_emoji_syntax("foo :whatever:"))
self.assertTrue(content_has_emoji_syntax("\n:whatever:"))
self.assertTrue(content_has_emoji_syntax(":smile: ::::::"))
def test_realm_emoji(self) -> None:
def emoji_img(name: str, file_name: str, realm_id: int) -> str:
return '<img alt="{}" class="emoji" src="{}" title="{}">'.format(
name, get_emoji_url(file_name, realm_id), name[1:-1].replace("_", " ")
)
realm = get_realm("zulip")
# Needs to mock an actual message because that's how Markdown obtains the realm
msg = Message(sender=self.example_user("hamlet"))
converted = markdown_convert(":green_tick:", message_realm=realm, message=msg)
realm_emoji = RealmEmoji.objects.filter(
realm=realm, name="green_tick", deactivated=False
).get()
self.assertEqual(
converted.rendered_content,
"<p>{}</p>".format(emoji_img(":green_tick:", realm_emoji.file_name, realm.id)),
)
# Deactivate realm emoji.
do_remove_realm_emoji(realm, "green_tick")
converted = markdown_convert(":green_tick:", message_realm=realm, message=msg)
self.assertEqual(converted.rendered_content, "<p>:green_tick:</p>")
def test_deactivated_realm_emoji(self) -> None:
# Deactivate realm emoji.
realm = get_realm("zulip")
do_remove_realm_emoji(realm, "green_tick")
msg = Message(sender=self.example_user("hamlet"))
converted = markdown_convert(":green_tick:", message_realm=realm, message=msg)
self.assertEqual(converted.rendered_content, "<p>:green_tick:</p>")
def test_unicode_emoji(self) -> None:
msg = "\u2615" # ☕
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><span aria-label="coffee" class="emoji emoji-2615" role="img" title="coffee">:coffee:</span></p>',
)
msg = "\u2615\u2615" # ☕☕
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><span aria-label="coffee" class="emoji emoji-2615" role="img" title="coffee">:coffee:</span><span aria-label="coffee" class="emoji emoji-2615" role="img" title="coffee">:coffee:</span></p>',
)
def test_no_translate_emoticons_if_off(self) -> None:
user_profile = self.example_user("othello")
do_change_user_setting(user_profile, "translate_emoticons", False)
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = ":)"
expected = "<p>:)</p>"
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
def test_same_markup(self) -> None:
msg = "\u2615" # ☕
unicode_converted = markdown_convert_wrapper(msg)
msg = ":coffee:" # ☕☕
converted = markdown_convert_wrapper(msg)
self.assertEqual(converted, unicode_converted)
def test_links_in_topic_name(self) -> None:
realm = get_realm("zulip")
msg = Message(sender=self.example_user("othello"))
msg.set_topic_name("https://google.com/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[{"url": "https://google.com/hello-world", "text": "https://google.com/hello-world"}],
)
msg.set_topic_name("http://google.com/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[{"url": "http://google.com/hello-world", "text": "http://google.com/hello-world"}],
)
msg.set_topic_name("Without scheme google.com/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[{"url": "https://google.com/hello-world", "text": "google.com/hello-world"}],
)
msg.set_topic_name("Without scheme random.words/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, [])
msg.set_topic_name(
"Try out http://ftp.debian.org, https://google.com/ and https://google.in/."
)
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[
{"url": "http://ftp.debian.org", "text": "http://ftp.debian.org"},
{"url": "https://google.com/", "text": "https://google.com/"},
{"url": "https://google.in/", "text": "https://google.in/"},
],
)
# test order for links without scheme
msg.set_topic_name("google.in google.com")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[
{"url": "https://google.in", "text": "google.in"},
{"url": "https://google.com", "text": "google.com"},
],
)
def test_realm_patterns(self) -> None:
realm = get_realm("zulip")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
msg = Message(sender=self.example_user("othello"))
msg.set_topic_name("#444")
flush_per_request_caches()
content = "We should fix #224 and #115, but not issue#124 or #1124z or [trac #15](https://trac.example.com/ticket/16) today."
converted = markdown_convert(content, message_realm=realm, message=msg)
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted.rendered_content,
'<p>We should fix <a href="https://trac.example.com/ticket/224">#224</a> and <a href="https://trac.example.com/ticket/115">#115</a>, but not issue#124 or #1124z or <a href="https://trac.example.com/ticket/16">trac #15</a> today.</p>',
)
self.assertEqual(
converted_topic, [{"url": "https://trac.example.com/ticket/444", "text": "#444"}]
)
msg.set_topic_name("#444 https://google.com")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/ticket/444", "text": "#444"},
{"url": "https://google.com", "text": "https://google.com"},
],
)
RealmFilter(
realm=realm,
pattern=r"#(?P<id>[a-zA-Z]+-[0-9]+)",
url_format_string=r"https://trac.example.com/ticket/%(id)s",
).save()
msg = Message(sender=self.example_user("hamlet"))
content = "#ZUL-123 was fixed and code was deployed to production, also #zul-321 was deployed to staging"
converted = markdown_convert(content, message_realm=realm, message=msg)
self.assertEqual(
converted.rendered_content,
'<p><a href="https://trac.example.com/ticket/ZUL-123">#ZUL-123</a> was fixed and code was deployed to production, also <a href="https://trac.example.com/ticket/zul-321">#zul-321</a> was deployed to staging</p>',
)
def assert_conversion(content: str, should_have_converted: bool = True) -> None:
converted = markdown_convert(content, message_realm=realm, message=msg).rendered_content
converted_topic = topic_links(realm.id, content)
if should_have_converted:
self.assertTrue("https://trac.example.com" in converted)
self.assert_length(converted_topic, 1)
self.assertEqual(
converted_topic[0],
{"url": "https://trac.example.com/ticket/123", "text": "#123"},
)
else:
self.assertTrue("https://trac.example.com" not in converted)
self.assert_length(converted_topic, 0)
assert_conversion("Hello #123 World")
assert_conversion("Hello #123World", False)
assert_conversion("Hello#123 World", False)
assert_conversion("Hello#123World", False)
# Ideally, these should be converted, but Markdown doesn't
# handle word boundary detection in languages that don't use
# whitespace for that correctly yet.
assert_conversion("チケットは#123です", False)
assert_conversion("チケットは #123です", False)
assert_conversion("チケットは#123 です", False)
assert_conversion("チケットは #123 です")
assert_conversion("(#123)")
assert_conversion("#123>")
assert_conversion('"#123"')
assert_conversion("#123@")
assert_conversion(")#123(", False)
assert_conversion("##123", False)
# test nested realm patterns should avoid double matching
RealmFilter(
realm=realm,
pattern=r"hello#(?P<id>[0-9]+)",
url_format_string=r"https://trac.example.com/hello/%(id)s",
).save()
converted_topic = topic_links(realm.id, "hello#123 #234")
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/hello/123", "text": "hello#123"},
{"url": "https://trac.example.com/ticket/234", "text": "#234"},
],
)
# test correct order when realm pattern and normal links are both present.
converted_topic = topic_links(realm.id, "#234 https://google.com")
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/ticket/234", "text": "#234"},
{"url": "https://google.com", "text": "https://google.com"},
],
)
def test_multiple_matching_realm_patterns(self) -> None:
realm = get_realm("zulip")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier_1 = RealmFilter(
realm=realm,
pattern=r"(?P<id>ABC\-[0-9]+)(?![A-Z0-9-])",
url_format_string=url_format_string,
)
linkifier_1.save()
self.assertEqual(
linkifier_1.__str__(),
r"<RealmFilter(zulip): (?P<id>ABC\-[0-9]+)(?![A-Z0-9-])"
" https://trac.example.com/ticket/%(id)s>",
)
url_format_string = r"https://other-trac.example.com/ticket/%(id)s"
linkifier_2 = RealmFilter(
realm=realm,
pattern=r"(?P<id>[A-Z][A-Z0-9]*\-[0-9]+)(?![A-Z0-9-])",
url_format_string=url_format_string,
)
linkifier_2.save()
self.assertEqual(
linkifier_2.__str__(),
r"<RealmFilter(zulip): (?P<id>[A-Z][A-Z0-9]*\-[0-9]+)(?![A-Z0-9-])"
" https://other-trac.example.com/ticket/%(id)s>",
)
msg = Message(sender=self.example_user("othello"))
msg.set_topic_name("ABC-123")
flush_per_request_caches()
content = (
"We should fix ABC-123 or [trac ABC-123](https://trac.example.com/ticket/16) today."
)
converted = markdown_convert(content, message_realm=realm, message=msg)
converted_topic = topic_links(realm.id, msg.topic_name())
# The second linkifier (which was saved later) was ignored as the content was marked AtomicString after first conversion.
# There was no easy way to support parsing both linkifiers and not run into an infinite loop, hence the second linkifier is ignored.
self.assertEqual(
converted.rendered_content,
'<p>We should fix <a href="https://trac.example.com/ticket/ABC-123">ABC-123</a> or <a href="https://trac.example.com/ticket/16">trac ABC-123</a> today.</p>',
)
# Both the links should be generated in topics.
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/ticket/ABC-123", "text": "ABC-123"},
{"url": "https://other-trac.example.com/ticket/ABC-123", "text": "ABC-123"},
],
)
def test_flush_linkifier(self) -> None:
realm = get_realm("zulip")
def flush() -> None:
"""
flush_linkifiers is a post-save hook, so calling it
directly for testing is kind of awkward
"""
class Instance:
realm_id: Optional[int] = None
instance = Instance()
instance.realm_id = realm.id
flush_linkifiers(sender=RealmFilter, instance=cast(RealmFilter, instance))
def save_new_linkifier() -> None:
linkifier = RealmFilter(realm=realm, pattern=r"whatever", url_format_string="whatever")
linkifier.save()
# start fresh for our realm
flush()
self.assertFalse(realm_in_local_linkifiers_cache(realm.id))
# call this just for side effects of populating the cache
linkifiers_for_realm(realm.id)
self.assertTrue(realm_in_local_linkifiers_cache(realm.id))
# Saving a new RealmFilter should have the side effect of
# flushing the cache.
save_new_linkifier()
self.assertFalse(realm_in_local_linkifiers_cache(realm.id))
# and flush it one more time, to make sure we don't get a KeyError
flush()
self.assertFalse(realm_in_local_linkifiers_cache(realm.id))
def test_realm_patterns_negative(self) -> None:
realm = get_realm("zulip")
RealmFilter(
realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=r"https://trac.example.com/ticket/%(id)s",
).save()
boring_msg = Message(sender=self.example_user("othello"))
boring_msg.set_topic_name("no match here")
converted_boring_topic = topic_links(realm.id, boring_msg.topic_name())
self.assertEqual(converted_boring_topic, [])
def test_is_status_message(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "/me makes a list\n* one\n* two"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>/me makes a list</p>\n<ul>\n<li>one</li>\n<li>two</li>\n</ul>",
)
self.assertTrue(Message.is_status_message(content, rendering_result.rendered_content))
content = "/me takes a walk"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>/me takes a walk</p>",
)
self.assertTrue(Message.is_status_message(content, rendering_result.rendered_content))
content = "/me writes a second line\nline"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>/me writes a second line<br>\nline</p>",
)
self.assertTrue(Message.is_status_message(content, rendering_result.rendered_content))
def test_alert_words(self) -> None:
user_profile = self.example_user("othello")
do_add_alert_words(user_profile, ["ALERTWORD", "scaryword"])
msg = Message(sender=user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = "We have an ALERTWORD day today!"
rendering_result = render(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>We have an ALERTWORD day today!</p>"
)
self.assertEqual(rendering_result.user_ids_with_alert_words, {user_profile.id})
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "We have a NOTHINGWORD day today!"
rendering_result = render(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>We have a NOTHINGWORD day today!</p>"
)
self.assertEqual(rendering_result.user_ids_with_alert_words, set())
def test_alert_words_returns_user_ids_with_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["how"],
"cordelia": ["this possible"],
"iago": ["hello"],
"prospero": ["hello"],
"othello": ["how are you"],
"aaron": ["hey"],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = "hello how is this possible how are you doing today"
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {
user_profiles["hamlet"].id,
user_profiles["cordelia"].id,
user_profiles["iago"].id,
user_profiles["prospero"].id,
user_profiles["othello"].id,
}
# All users except aaron have their alert word appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_user_ids_with_alert_words_1(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["provisioning", "Prod deployment"],
"cordelia": ["test", "Prod"],
"iago": ["prod"],
"prospero": ["deployment"],
"othello": ["last"],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """Hello, everyone. Prod deployment has been completed
And this is a new line
to test out how Markdown convert this into something line ending split array
and this is a new line
last"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {
user_profiles["hamlet"].id,
user_profiles["cordelia"].id,
user_profiles["iago"].id,
user_profiles["prospero"].id,
user_profiles["othello"].id,
}
# All users have their alert word appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_user_ids_with_alert_words_in_french(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["réglementaire", "une politique", "une merveille"],
"cordelia": ["énormément", "Prod"],
"iago": ["prod"],
"prospero": ["deployment"],
"othello": ["last"],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """This is to test out alert words work in languages with accented characters too
bonjour est (énormément) ce a quoi ressemble le français
et j'espère qu'il n'y n' réglementaire a pas de mots d'alerte dans ce texte français
"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {user_profiles["hamlet"].id, user_profiles["cordelia"].id}
# Only hamlet and cordelia have their alert-words appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_empty_user_ids_with_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": [],
"cordelia": [],
"iago": [],
"prospero": [],
"othello": [],
"aaron": [],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """hello how is this possible how are you doing today
This is to test that the no user_ids who have alrert wourldword is participating
in sending of the message
"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = set()
# None of the users have their alert-words appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def get_mock_alert_words(self, num_words: int, word_length: int) -> List[str]:
alert_words = ["x" * word_length] * num_words # type List[str]
return alert_words
def test_alert_words_with_empty_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": [],
"cordelia": [],
"iago": [],
"othello": [],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """This is to test a empty alert words i.e. no user has any alert-words set"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = set()
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_retuns_user_ids_with_alert_words_with_huge_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["issue124"],
"cordelia": self.get_mock_alert_words(500, 10),
"iago": self.get_mock_alert_words(500, 10),
"othello": self.get_mock_alert_words(500, 10),
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """The code above will print 10 random values of numbers between 1 and 100.
The second line, for x in range(10), determines how many values will be printed (when you use
range(x), the number that you use in place of x will be the amount of values that you'll have
printed. if you want 20 values, use range(20). use range(5) if you only want 5 values returned,
etc.). I was talking abou the issue124 on github. Then the third line: print random.randint(1,101) will automatically select a random integer
between 1 and 100 for you. The process is fairly simple
"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {user_profiles["hamlet"].id}
# Only hamlet has alert-word 'issue124' present in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_default_code_block_language(self) -> None:
realm = get_realm("zulip")
self.assertEqual(realm.default_code_block_language, None)
text = "```{}\nconsole.log('Hello World');\n```\n"
# Render without default language
msg_with_js = markdown_convert_wrapper(text.format("js"))
msg_with_python = markdown_convert_wrapper(text.format("python"))
msg_without_language = markdown_convert_wrapper(text.format(""))
msg_with_quote = markdown_convert_wrapper(text.format("quote"))
msg_with_math = markdown_convert_wrapper(text.format("math"))
msg_with_none = markdown_convert_wrapper(text.format("none"))
# Render with default=javascript
do_set_realm_property(realm, "default_code_block_language", "javascript", acting_user=None)
msg_without_language_default_js = markdown_convert_wrapper(text.format(""))
msg_with_python_default_js = markdown_convert_wrapper(text.format("python"))
# Render with default=python
do_set_realm_property(realm, "default_code_block_language", "python", acting_user=None)
msg_without_language_default_py = markdown_convert_wrapper(text.format(""))
msg_with_none_default_py = markdown_convert_wrapper(text.format("none"))
# Render with default=quote
do_set_realm_property(realm, "default_code_block_language", "quote", acting_user=None)
msg_without_language_default_quote = markdown_convert_wrapper(text.format(""))
# Render with default=math
do_set_realm_property(realm, "default_code_block_language", "math", acting_user=None)
msg_without_language_default_math = markdown_convert_wrapper(text.format(""))
# Render without default language
do_set_realm_property(realm, "default_code_block_language", None, acting_user=None)
msg_without_language_final = markdown_convert_wrapper(text.format(""))
self.assertTrue(msg_with_js == msg_without_language_default_js)
self.assertTrue(
msg_with_python == msg_with_python_default_js == msg_without_language_default_py
)
self.assertTrue(msg_with_quote == msg_without_language_default_quote)
self.assertTrue(msg_with_math == msg_without_language_default_math)
self.assertTrue(msg_without_language == msg_without_language_final)
self.assertTrue(msg_with_none == msg_with_none_default_py)
# Test checking inside nested quotes
nested_text = "````quote\n\n{}\n\n{}````".format(text.format("js"), text.format(""))
do_set_realm_property(realm, "default_code_block_language", "javascript", acting_user=None)
rendered = markdown_convert_wrapper(nested_text)
with_language, without_language = re.findall(r"<pre>(.*?)$", rendered, re.MULTILINE)
self.assertTrue(with_language == without_language)
do_set_realm_property(realm, "default_code_block_language", None, acting_user=None)
rendered = markdown_convert_wrapper(nested_text)
with_language, without_language = re.findall(r"<pre>(.*?)$", rendered, re.MULTILINE)
self.assertFalse(with_language == without_language)
def test_mention_wildcard(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**all** test"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" data-user-id="*">' "@all" "</span> test</p>",
)
self.assertTrue(rendering_result.mentions_wildcard)
def test_mention_everyone(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**everyone** test"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" data-user-id="*">' "@everyone" "</span> test</p>",
)
self.assertTrue(rendering_result.mentions_wildcard)
def test_mention_stream(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**stream** test"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" data-user-id="*">' "@stream" "</span> test</p>",
)
self.assertTrue(rendering_result.mentions_wildcard)
def test_mention_at_wildcard(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@all test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>@all test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_at_everyone(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@everyone test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>@everyone test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_word_starting_with_at_wildcard(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "test @alleycat.com test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>test @alleycat.com test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_at_normal_user(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@aaron test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>@aaron test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_single(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" ' f'data-user-id="{user_id}">' "@King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
content = f"@**|{user_id}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" ' f'data-user-id="{user_id}">' "@King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
def test_mention_silent(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@_**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention silent" '
f'data-user-id="{user_id}">'
"King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_silent_wildcard_mention(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
wildcards = ["all", "everyone", "stream"]
for wildcard in wildcards:
content = f"@_**{wildcard}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
f'<p><span class="user-mention silent" data-user-id="*">{wildcard}</span></p>',
)
self.assertFalse(rendering_result.mentions_wildcard)
def test_mention_invalid_followed_by_valid(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@**Invalid user** and @**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p>@<strong>Invalid user</strong> and <span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
def test_invalid_mention_not_uses_valid_mention_data(self) -> None:
sender_user_profile = self.example_user("othello")
hamlet = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
# Even though King Hamlet will be present in mention data as
# it was was fetched for first mention but second mention is
# incorrect(as it uses hamlet's id) so it should not be able
# to use that data for creating a valid mention.
content = f"@**King Hamlet|{hamlet.id}** and @**aaron|{hamlet.id}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
f'<p><span class="user-mention" data-user-id="{hamlet.id}">'
f"@King Hamlet</span> and @<strong>aaron|{hamlet.id}</strong></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {hamlet.id})
def test_silent_mention_invalid_followed_by_valid(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@_**Invalid user** and @_**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p>@_<strong>Invalid user</strong> and <span class="user-mention silent" '
f'data-user-id="{user_id}">'
"King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = f"@_**|123456789** and @_**|{user_id}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>@_<strong>|123456789</strong> and "
'<span class="user-mention silent" '
f'data-user-id="{user_id}">'
"King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_possible_mentions(self) -> None:
def assert_mentions(content: str, names: Set[str], has_wildcards: bool = False) -> None:
self.assertEqual(possible_mentions(content), (names, has_wildcards))
aaron = self.example_user("aaron")
assert_mentions("", set())
assert_mentions("boring", set())
assert_mentions("@**all**", set(), True)
assert_mentions("smush@**steve**smush", set())
assert_mentions(
f"Hello @**King Hamlet**, @**|{aaron.id}** and @**Cordelia, Lear's daughter**\n@**Foo van Barson|1234** @**all**",
{"King Hamlet", f"|{aaron.id}", "Cordelia, Lear's daughter", "Foo van Barson|1234"},
True,
)
def test_mention_multiple(self) -> None:
sender_user_profile = self.example_user("othello")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "@**King Hamlet** and @**Cordelia, Lear's daughter**, check this out"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>"
'<span class="user-mention" '
f'data-user-id="{hamlet.id}">@King Hamlet</span> and '
'<span class="user-mention" '
f'data-user-id="{cordelia.id}">@Cordelia, Lear\'s daughter</span>, '
"check this out</p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {hamlet.id, cordelia.id})
def test_mention_in_quotes(self) -> None:
othello = self.example_user("othello")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
msg = Message(sender=othello, sending_client=get_client("test"))
content = "> @**King Hamlet** and @**Othello, the Moor of Venice**\n\n @**King Hamlet** and @**Cordelia, Lear's daughter**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<blockquote>\n<p>"
f'<span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span>'
" and "
f'<span class="user-mention silent" data-user-id="{othello.id}">Othello, the Moor of Venice</span>'
"</p>\n</blockquote>\n"
"<p>"
f'<span class="user-mention" data-user-id="{hamlet.id}">@King Hamlet</span>'
" and "
f'<span class="user-mention" data-user-id="{cordelia.id}">@Cordelia, Lear\'s daughter</span>'
"</p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {hamlet.id, cordelia.id})
# Both fenced quote and > quote should be identical for both silent and regular syntax.
expected = (
"<blockquote>\n<p>"
f'<span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span>'
"</p>\n</blockquote>"
)
content = "```quote\n@**King Hamlet**\n```"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = "> @**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = "```quote\n@_**King Hamlet**\n```"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = "> @_**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_wildcard_mention_in_quotes(self) -> None:
user_profile = self.example_user("othello")
message = Message(sender=user_profile, sending_client=get_client("test"))
def assert_silent_mention(content: str, wildcard: str) -> None:
expected = (
"<blockquote>\n<p>"
f'<span class="user-mention silent" data-user-id="*">{wildcard}</span>'
"</p>\n</blockquote>"
)
rendering_result = render_markdown(message, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertFalse(rendering_result.mentions_wildcard)
wildcards = ["all", "everyone", "stream"]
for wildcard in wildcards:
assert_silent_mention(f"> @**{wildcard}**", wildcard)
assert_silent_mention(f"> @_**{wildcard}**", wildcard)
assert_silent_mention(f"```quote\n@**{wildcard}**\n```", wildcard)
assert_silent_mention(f"```quote\n@_**{wildcard}**\n```", wildcard)
def test_mention_duplicate_full_name(self) -> None:
realm = get_realm("zulip")
def make_user(email: str, full_name: str) -> UserProfile:
return create_user(
email=email,
password="whatever",
realm=realm,
full_name=full_name,
)
sender_user_profile = self.example_user("othello")
twin1 = make_user("twin1@example.com", "Mark Twin")
twin2 = make_user("twin2@example.com", "Mark Twin")
cordelia = self.example_user("cordelia")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = f"@**Mark Twin|{twin1.id}**, @**Mark Twin|{twin2.id}** and @**Cordelia, Lear's daughter**, hi."
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>"
'<span class="user-mention" '
f'data-user-id="{twin1.id}">@Mark Twin</span>, '
'<span class="user-mention" '
f'data-user-id="{twin2.id}">@Mark Twin</span> and '
'<span class="user-mention" '
f'data-user-id="{cordelia.id}">@Cordelia, Lear\'s daughter</span>, '
"hi.</p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {twin1.id, twin2.id, cordelia.id})
def test_mention_invalid(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @**Nonexistent User**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>Hey @<strong>Nonexistent User</strong></p>"
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_user_mention_atomic_string(self) -> None:
sender_user_profile = self.example_user("othello")
realm = get_realm("zulip")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
# Create a linkifier.
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a user that potentially interferes with the pattern.
test_user = create_user(
email="atomic@example.com",
password="whatever",
realm=realm,
full_name="Atomic #123",
)
content = "@**Atomic #123**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{test_user.id}">'
"@Atomic #123</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {test_user.id})
content = "@_**Atomic #123**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention silent" '
f'data-user-id="{test_user.id}">'
"Atomic #123</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def create_user_group_for_test(self, user_group_name: str) -> UserGroup:
othello = self.example_user("othello")
return create_user_group(user_group_name, [othello], get_realm("zulip"))
def test_user_group_mention_single(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
user_group = self.create_user_group_for_test("support")
content = "@**King Hamlet** @*support*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span> "
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
"@support</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
self.assertEqual(rendering_result.mentions_user_group_ids, {user_group.id})
def test_invalid_user_group_followed_by_valid_mention_single(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
user_group = self.create_user_group_for_test("support")
content = "@**King Hamlet** @*Invalid user group* @*support*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span> "
"@<em>Invalid user group</em> "
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
"@support</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
self.assertEqual(rendering_result.mentions_user_group_ids, {user_group.id})
def test_user_group_mention_atomic_string(self) -> None:
sender_user_profile = self.example_user("othello")
realm = get_realm("zulip")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_profile = self.example_user("hamlet")
# Create a linkifier.
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a user-group that potentially interferes with the pattern.
user_id = user_profile.id
user_group = self.create_user_group_for_test("support #123")
content = "@**King Hamlet** @*support #123*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span> "
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
"@support #123</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
self.assertEqual(rendering_result.mentions_user_group_ids, {user_group.id})
def test_possible_user_group_mentions(self) -> None:
def assert_mentions(content: str, names: Set[str]) -> None:
self.assertEqual(possible_user_group_mentions(content), names)
assert_mentions("", set())
assert_mentions("boring", set())
assert_mentions("@**all**", set())
assert_mentions("smush@*steve*smush", set())
assert_mentions(
"@*support* Hello @**King Hamlet** and @**Cordelia, Lear's daughter**\n"
"@**Foo van Barson** @**all**",
{"support"},
)
assert_mentions(
"Attention @*support*, @*frontend* and @*backend*\ngroups.",
{"support", "frontend", "backend"},
)
def test_user_group_mention_multiple(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
support = self.create_user_group_for_test("support")
backend = self.create_user_group_for_test("backend")
content = "@*support* and @*backend*, check this out"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>"
'<span class="user-group-mention" '
f'data-user-group-id="{support.id}">'
"@support</span> "
"and "
'<span class="user-group-mention" '
f'data-user-group-id="{backend.id}">'
"@backend</span>, "
"check this out"
"</p>",
)
self.assertEqual(rendering_result.mentions_user_group_ids, {support.id, backend.id})
def test_user_group_mention_edit(self) -> None:
sender_user_profile = self.example_user("hamlet")
user_profile = self.example_user("othello")
self.create_user_group_for_test("support")
self.login("hamlet")
msg_id = self.send_stream_message(
sender_user_profile, "Denmark", topic_name="editing", content="test"
)
def update_message_and_check_flag(content: str, mentioned: bool) -> None:
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"message_id": msg_id,
"content": content,
},
)
self.assert_json_success(result)
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=msg_id,
)
if mentioned:
self.assertIn("mentioned", um.flags_list())
else:
self.assertNotIn("mentioned", um.flags_list())
update_message_and_check_flag("@*support*", True)
update_message_and_check_flag("@*support-invalid* edited", False)
update_message_and_check_flag("@*support* edited", True)
update_message_and_check_flag("edited", False)
update_message_and_check_flag("@*support*", True)
update_message_and_check_flag("@_*support*", False)
def test_user_group_mention_invalid(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @*Nonexistent group*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>Hey @<em>Nonexistent group</em></p>"
)
self.assertEqual(rendering_result.mentions_user_group_ids, set())
def test_user_group_silent_mention(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
support = self.create_user_group_for_test("support")
content = "We'll add you to @_*support* user group."
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>We'll add you to "
f'<span class="user-group-mention silent" data-user-group-id="{support.id}">support</span>'
" user group.</p>",
)
self.assertEqual(rendering_result.mentions_user_group_ids, set())
def test_user_group_mention_in_quotes(self) -> None:
user_profile = self.example_user("othello")
message = Message(sender=user_profile, sending_client=get_client("test"))
backend = self.create_user_group_for_test("backend")
def assert_silent_mention(content: str) -> None:
expected = (
"<blockquote>\n<p>"
f'<span class="user-group-mention silent" data-user-group-id="{backend.id}">backend</span>'
"</p>\n</blockquote>"
)
rendering_result = render_markdown(message, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_group_ids, set())
assert_silent_mention("> @*backend*")
assert_silent_mention("> @_*backend*")
assert_silent_mention("```quote\n@*backend*\n```")
assert_silent_mention("```quote\n@_*backend*\n```")
def test_stream_single(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark">#{d.name}</a></p>'.format(
d=denmark,
),
)
def test_invalid_stream_followed_by_valid_mention(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Invalid** and #**Denmark**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p>#<strong>Invalid</strong> and <a class="stream" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark">#{d.name}</a></p>'.format(
d=denmark,
),
)
def test_stream_multiple(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm = get_realm("zulip")
denmark = get_stream("Denmark", realm)
scotland = get_stream("Scotland", realm)
content = "Look to #**Denmark** and #**Scotland**, there something"
self.assertEqual(
render_markdown(msg, content).rendered_content,
"<p>Look to "
'<a class="stream" '
'data-stream-id="{denmark.id}" '
'href="/#narrow/stream/{denmark.id}-Denmark">#{denmark.name}</a> and '
'<a class="stream" '
'data-stream-id="{scotland.id}" '
'href="/#narrow/stream/{scotland.id}-Scotland">#{scotland.name}</a>, '
"there something</p>".format(denmark=denmark, scotland=scotland),
)
def test_stream_case_sensitivity(self) -> None:
realm = get_realm("zulip")
case_sens = Stream.objects.create(name="CaseSens", realm=realm)
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**CaseSens**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{s.id}" href="/#narrow/stream/{s.id}-{s.name}">#{s.name}</a></p>'.format(
s=case_sens,
),
)
def test_stream_case_sensitivity_nonmatching(self) -> None:
"""#StreamName requires the stream be spelled with the correct case
currently. If we change that in the future, we'll need to change this
test."""
realm = get_realm("zulip")
Stream.objects.create(name="CaseSens", realm=realm)
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**casesens**"
self.assertEqual(
render_markdown(msg, content).rendered_content, "<p>#<strong>casesens</strong></p>"
)
def test_topic_single(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark>some topic**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream-topic" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark/topic/some.20topic">#{d.name} > some topic</a></p>'.format(
d=denmark,
),
)
def test_topic_atomic_string(self) -> None:
realm = get_realm("zulip")
# Create a linkifier.
sender_user_profile = self.example_user("othello")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a topic link that potentially interferes with the pattern.
denmark = get_stream("Denmark", realm)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark>#1234**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream-topic" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark/topic/.231234">#{d.name} > #1234</a></p>'.format(
d=denmark,
),
)
def test_topic_multiple(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
scotland = get_stream("Scotland", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "This has two links: #**Denmark>some topic** and #**Scotland>other topic**."
self.assertEqual(
render_markdown(msg, content).rendered_content,
"<p>This has two links: "
'<a class="stream-topic" data-stream-id="{denmark.id}" '
'href="/#narrow/stream/{denmark.id}-{denmark.name}/topic/some.20topic">'
"#{denmark.name} > some topic</a>"
" and "
'<a class="stream-topic" data-stream-id="{scotland.id}" '
'href="/#narrow/stream/{scotland.id}-{scotland.name}/topic/other.20topic">'
"#{scotland.name} > other topic</a>"
".</p>".format(denmark=denmark, scotland=scotland),
)
def test_possible_stream_names(self) -> None:
content = """#**test here**
This mentions #**Denmark** too.
#**garçon** #**천국** @**Ignore Person**
"""
self.assertEqual(
possible_linked_stream_names(content),
{"test here", "Denmark", "garçon", "천국"},
)
def test_stream_unicode(self) -> None:
realm = get_realm("zulip")
uni = Stream.objects.create(name="привет", realm=realm)
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**привет**"
quoted_name = ".D0.BF.D1.80.D0.B8.D0.B2.D0.B5.D1.82"
href = f"/#narrow/stream/{uni.id}-{quoted_name}"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{s.id}" href="{href}">#{s.name}</a></p>'.format(
s=uni,
href=href,
),
)
def test_stream_atomic_string(self) -> None:
realm = get_realm("zulip")
# Create a linkifier.
sender_user_profile = self.example_user("othello")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a stream that potentially interferes with the pattern.
stream = Stream.objects.create(name="Stream #1234", realm=realm)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Stream #1234**"
href = f"/#narrow/stream/{stream.id}-Stream-.231234"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{s.id}" href="{href}">#{s.name}</a></p>'.format(
s=stream,
href=href,
),
)
def test_stream_invalid(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "There #**Nonexistentstream**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>There #<strong>Nonexistentstream</strong></p>"
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_image_preview_title(self) -> None:
msg = "[My favorite image](https://example.com/testimage.png)"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>"
'<a href="https://example.com/testimage.png">My favorite image</a>'
"</p>\n"
'<div class="message_inline_image">'
'<a href="https://example.com/testimage.png" title="My favorite image">'
'<img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fexample.com%2Ftestimage.png&size=full" src="/thumbnail?url=https%3A%2F%2Fexample.com%2Ftestimage.png&size=thumbnail">'
"</a>"
"</div>",
)
def test_mit_rendering(self) -> None:
"""Test the Markdown configs for the MIT Zephyr mirroring system;
verifies almost all inline patterns are disabled, but
inline_interesting_links is still enabled"""
msg = "**test**"
realm = get_realm("zephyr")
client = get_client("zephyr_mirror")
message = Message(sending_client=client, sender=self.mit_user("sipbtest"))
converted = markdown_convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted.rendered_content,
"<p>**test**</p>",
)
msg = "* test"
converted = markdown_convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted.rendered_content,
"<p>* test</p>",
)
msg = "https://lists.debian.org/debian-ctte/2014/02/msg00173.html"
converted = markdown_convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted.rendered_content,
'<p><a href="https://lists.debian.org/debian-ctte/2014/02/msg00173.html">https://lists.debian.org/debian-ctte/2014/02/msg00173.html</a></p>',
)
def test_url_to_a(self) -> None:
url = "javascript://example.com/invalidURL"
converted = url_to_a(db_data=None, url=url, text=url)
self.assertEqual(
converted,
"javascript://example.com/invalidURL",
)
def test_disabled_code_block_processor(self) -> None:
msg = (
"Hello,\n\n"
+ " I am writing this message to test something. I am writing this message to test something."
)
converted = markdown_convert_wrapper(msg)
expected_output = (
"<p>Hello,</p>\n"
+ '<div class="codehilite"><pre><span></span><code>I am writing this message to test something. I am writing this message to test something.\n'
+ "</code></pre></div>"
)
self.assertEqual(converted, expected_output)
realm = do_create_realm(
string_id="code_block_processor_test", name="code_block_processor_test"
)
maybe_update_markdown_engines(realm.id, True)
rendering_result = markdown_convert(msg, message_realm=realm, email_gateway=True)
expected_output = (
"<p>Hello,</p>\n"
+ "<p>I am writing this message to test something. I am writing this message to test something.</p>"
)
self.assertEqual(rendering_result.rendered_content, expected_output)
def test_normal_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://example.com/#settings/"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="http://example.com/#settings/">http://example.com/#settings/</a></p>',
)
def test_relative_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://zulip.testserver/#narrow/stream/999-hello"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="#narrow/stream/999-hello">http://zulip.testserver/#narrow/stream/999-hello</a></p>',
)
def test_relative_link_streams_page(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://zulip.testserver/#streams/all"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="#streams/all">http://zulip.testserver/#streams/all</a></p>',
)
def test_md_relative_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "[hello](http://zulip.testserver/#narrow/stream/999-hello)"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="#narrow/stream/999-hello">hello</a></p>',
)
def test_html_entity_conversion(self) -> None:
msg = """\
Test raw: Hello, ©
Test inline code: `©`
Test fenced code:
```
©
©
```
Test quote:
~~~quote
©
~~~
Test a list:
* ©
* `©`
* ```©```
Test an indented block:
©"""
expected_output = """\
<p>Test raw: Hello, ©<br>
Test inline code: <code>&copy;</code></p>
<p>Test fenced code:</p>
<div class="codehilite"><pre><span></span><code>&copy;
&copy;
</code></pre></div>
<p>Test quote:</p>
<blockquote>
<p>©</p>
</blockquote>
<p>Test a list:</p>
<ul>
<li>©</li>
<li><code>&copy;</code></li>
<li><code>&copy;</code></li>
</ul>
<p>Test an indented block:</p>
<div class="codehilite"><pre><span></span><code>&copy;
</code></pre></div>"""
converted = markdown_convert_wrapper(dedent(msg))
self.assertEqual(converted, dedent(expected_output))
class MarkdownApiTests(ZulipTestCase):
def test_render_message_api(self) -> None:
content = "That is a **bold** statement"
result = self.api_post(
self.example_user("othello"),
"/api/v1/messages/render",
dict(content=content),
)
self.assert_json_success(result)
self.assertEqual(
result.json()["rendered"], "<p>That is a <strong>bold</strong> statement</p>"
)
def test_render_mention_stream_api(self) -> None:
"""Determines whether we're correctly passing the realm context"""
content = "This mentions #**Denmark** and @**King Hamlet**."
result = self.api_post(
self.example_user("othello"),
"/api/v1/messages/render",
dict(content=content),
)
self.assert_json_success(result)
user_id = self.example_user("hamlet").id
stream_id = get_stream("Denmark", get_realm("zulip")).id
self.assertEqual(
result.json()["rendered"],
f'<p>This mentions <a class="stream" data-stream-id="{stream_id}" href="/#narrow/stream/{stream_id}-Denmark">#Denmark</a> and <span class="user-mention" data-user-id="{user_id}">@King Hamlet</span>.</p>',
)
class MarkdownErrorTests(ZulipTestCase):
def test_markdown_error_handling(self) -> None:
with self.simulated_markdown_failure():
with self.assertRaises(MarkdownRenderingException):
markdown_convert_wrapper("")
def test_send_message_errors(self) -> None:
message = "whatever"
with self.simulated_markdown_failure():
# We don't use assertRaisesRegex because it seems to not
# handle i18n properly here on some systems.
with self.assertRaises(JsonableError):
self.send_stream_message(self.example_user("othello"), "Denmark", message)
@override_settings(MAX_MESSAGE_LENGTH=10)
def test_ultra_long_rendering(self) -> None:
"""A rendered message with an ultra-long length (> 100 * MAX_MESSAGE_LENGTH)
throws an exception"""
msg = "mock rendered message\n" * 10 * settings.MAX_MESSAGE_LENGTH
with mock.patch("zerver.lib.markdown.timeout", return_value=msg), mock.patch(
"zerver.lib.markdown.markdown_logger"
):
with self.assertRaises(MarkdownRenderingException):
markdown_convert_wrapper(msg)
def test_curl_code_block_validation(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
processor.run_content_validators = True
markdown_input = [
"``` curl",
"curl {{ api_url }}/v1/register",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY",
' -d "queue_id=1375801870:2942"',
"```",
]
with self.assertRaises(MarkdownRenderingException):
processor.run(markdown_input)
def test_curl_code_block_without_validation(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
markdown_input = [
"``` curl",
"curl {{ api_url }}/v1/register",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY",
' -d "queue_id=1375801870:2942"',
"```",
]
expected = [
"",
"**curl:curl {{ api_url }}/v1/register",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY",
' -d "queue_id=1375801870:2942"**',
"",
"",
]
result = processor.run(markdown_input)
self.assertEqual(result, expected)
| import copy
import os
import re
from textwrap import dedent
from typing import Any, Dict, List, Optional, Set, Tuple, cast
from unittest import mock
import orjson
from django.conf import settings
from django.test import override_settings
from markdown import Markdown
from zerver.lib.actions import (
change_user_is_active,
do_add_alert_words,
do_change_user_setting,
do_create_realm,
do_remove_realm_emoji,
do_set_realm_property,
)
from zerver.lib.alert_words import get_alert_word_automaton
from zerver.lib.camo import get_camo_url
from zerver.lib.create_user import create_user
from zerver.lib.emoji import get_emoji_url
from zerver.lib.exceptions import JsonableError, MarkdownRenderingException
from zerver.lib.markdown import (
MarkdownListPreprocessor,
MessageRenderingResult,
clear_state_for_testing,
content_has_emoji_syntax,
fetch_tweet_data,
get_tweet_id,
image_preview_enabled,
markdown_convert,
maybe_update_markdown_engines,
possible_linked_stream_names,
topic_links,
url_embed_preview_enabled,
url_to_a,
)
from zerver.lib.markdown.fenced_code import FencedBlockPreprocessor
from zerver.lib.mdiff import diff_strings
from zerver.lib.mention import (
MentionData,
get_possible_mentions_info,
possible_mentions,
possible_user_group_mentions,
)
from zerver.lib.message import render_markdown
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.tex import render_tex
from zerver.lib.user_groups import create_user_group
from zerver.models import (
Message,
RealmEmoji,
RealmFilter,
Stream,
UserGroup,
UserMessage,
UserProfile,
flush_linkifiers,
flush_per_request_caches,
get_client,
get_realm,
get_stream,
linkifiers_for_realm,
realm_in_local_linkifiers_cache,
)
class SimulatedFencedBlockPreprocessor(FencedBlockPreprocessor):
# Simulate code formatting.
def format_code(self, lang: Optional[str], code: str) -> str:
return (lang or "") + ":" + code
def placeholder(self, s: str) -> str:
return "**" + s.strip("\n") + "**"
class FencedBlockPreprocessorTest(ZulipTestCase):
def test_simple_quoting(self) -> None:
processor = FencedBlockPreprocessor(Markdown())
markdown_input = [
"~~~ quote",
"hi",
"bye",
"",
"",
]
expected = [
"",
"> hi",
"> bye",
"> ",
"> ",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def test_serial_quoting(self) -> None:
processor = FencedBlockPreprocessor(Markdown())
markdown_input = [
"~~~ quote",
"hi",
"~~~",
"",
"~~~ quote",
"bye",
"",
"",
]
expected = [
"",
"> hi",
"",
"",
"",
"> bye",
"> ",
"> ",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def test_serial_code(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
markdown_input = [
"``` .py",
"hello()",
"```",
"",
"```vb.net",
"goodbye()",
"```",
"",
"```c#",
"weirdchar()",
"```",
"",
"```",
"no-highlight()",
"```",
"",
]
expected = [
"",
"**py:hello()**",
"",
"",
"",
"**vb.net:goodbye()**",
"",
"",
"",
"**c#:weirdchar()**",
"",
"",
"",
"**:no-highlight()**",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def test_nested_code(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
markdown_input = [
"~~~ quote",
"hi",
"``` .py",
"hello()",
"```",
"",
"",
]
expected = [
"",
"> hi",
"> ",
"> **py:hello()**",
"> ",
"> ",
"> ",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def markdown_convert_wrapper(content: str) -> str:
return markdown_convert(
content=content,
message_realm=get_realm("zulip"),
).rendered_content
class MarkdownMiscTest(ZulipTestCase):
def test_diffs_work_as_expected(self) -> None:
str1 = "<p>The quick brown fox jumps over the lazy dog. Animal stories are fun, yeah</p>"
str2 = "<p>The fast fox jumps over the lazy dogs and cats. Animal stories are fun</p>"
expected_diff = "\u001b[34m-\u001b[0m <p>The \u001b[33mquick brown\u001b[0m fox jumps over the lazy dog. Animal stories are fun\u001b[31m, yeah\u001b[0m</p>\n\u001b[34m+\u001b[0m <p>The \u001b[33mfast\u001b[0m fox jumps over the lazy dog\u001b[32ms and cats\u001b[0m. Animal stories are fun</p>\n"
self.assertEqual(diff_strings(str1, str2), expected_diff)
def test_get_possible_mentions_info(self) -> None:
realm = get_realm("zulip")
def make_user(email: str, full_name: str) -> UserProfile:
return create_user(
email=email,
password="whatever",
realm=realm,
full_name=full_name,
)
fred1 = make_user("fred1@example.com", "Fred Flintstone")
change_user_is_active(fred1, False)
fred2 = make_user("fred2@example.com", "Fred Flintstone")
fred3 = make_user("fred3@example.com", "Fred Flintstone")
change_user_is_active(fred3, False)
fred4 = make_user("fred4@example.com", "Fred Flintstone")
lst = get_possible_mentions_info(
realm.id, {"Fred Flintstone", "Cordelia, LEAR's daughter", "Not A User"}
)
set_of_names = set(map(lambda x: x["full_name"].lower(), lst))
self.assertEqual(set_of_names, {"fred flintstone", "cordelia, lear's daughter"})
by_id = {row["id"]: row for row in lst}
self.assertEqual(
by_id.get(fred2.id),
dict(
email=fred2.email,
full_name="Fred Flintstone",
id=fred2.id,
),
)
self.assertEqual(
by_id.get(fred4.id),
dict(
email=fred4.email,
full_name="Fred Flintstone",
id=fred4.id,
),
)
def test_mention_data(self) -> None:
realm = get_realm("zulip")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
content = "@**King Hamlet** @**Cordelia, lear's daughter**"
mention_data = MentionData(realm.id, content)
self.assertEqual(mention_data.get_user_ids(), {hamlet.id, cordelia.id})
self.assertEqual(
mention_data.get_user_by_id(hamlet.id),
dict(
email=hamlet.email,
full_name=hamlet.full_name,
id=hamlet.id,
),
)
user = mention_data.get_user_by_name("king hamLET")
assert user is not None
self.assertEqual(user["email"], hamlet.email)
self.assertFalse(mention_data.message_has_wildcards())
content = "@**King Hamlet** @**Cordelia, lear's daughter** @**all**"
mention_data = MentionData(realm.id, content)
self.assertTrue(mention_data.message_has_wildcards())
def test_invalid_katex_path(self) -> None:
with self.settings(DEPLOY_ROOT="/nonexistent"):
with self.assertLogs(level="ERROR") as m:
render_tex("random text")
self.assertEqual(m.output, ["ERROR:root:Cannot find KaTeX for latex rendering!"])
class MarkdownListPreprocessorTest(ZulipTestCase):
# We test that the preprocessor inserts blank lines at correct places.
# We use <> to indicate that we need to insert a blank line here.
def split_message(self, msg: str) -> Tuple[List[str], List[str]]:
original = msg.replace("<>", "").split("\n")
expected = re.split(r"\n|<>", msg)
return original, expected
def test_basic_list(self) -> None:
preprocessor = MarkdownListPreprocessor()
original, expected = self.split_message("List without a gap\n<>* One\n* Two")
self.assertEqual(preprocessor.run(original), expected)
def test_list_after_quotes(self) -> None:
preprocessor = MarkdownListPreprocessor()
original, expected = self.split_message(
"```quote\nSomething\n```\n\nList without a gap\n<>* One\n* Two"
)
self.assertEqual(preprocessor.run(original), expected)
def test_list_in_code(self) -> None:
preprocessor = MarkdownListPreprocessor()
original, expected = self.split_message("```\nList without a gap\n* One\n* Two\n```")
self.assertEqual(preprocessor.run(original), expected)
def test_complex_nesting_with_different_fences(self) -> None:
preprocessor = MarkdownListPreprocessor()
msg = """```quote
In quote. We should convert a list here:<>
* one
* two
~~~
This is a nested code fence, do not make changes here:
* one
* two
````quote
Quote in code fence. Should not convert:
* one
* two
````
~~~
Back in the quote. We should convert:<>
* one
* two
```
Outside. Should convert:<>
* one
* two
"""
original, expected = self.split_message(msg)
self.assertEqual(preprocessor.run(original), expected)
def test_complex_nesting_with_same_fence(self) -> None:
preprocessor = MarkdownListPreprocessor()
msg = """```quote
In quote. We should convert a list here:<>
* one
* two
```python
This is a nested code fence, do not make changes here:
* one
* two
```quote
Quote in code fence. Should not convert:
* one
* two
```
```
Back in the quote. We should convert:<>
* one
* two
```
Outside. Should convert:<>
* one
* two
"""
original, expected = self.split_message(msg)
self.assertEqual(preprocessor.run(original), expected)
class MarkdownTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
clear_state_for_testing()
def assertEqual(self, first: Any, second: Any, msg: str = "") -> None:
if isinstance(first, str) and isinstance(second, str):
if first != second:
raise AssertionError(
"Actual and expected outputs do not match; showing diff.\n"
+ diff_strings(first, second)
+ msg
)
else:
super().assertEqual(first, second)
def load_markdown_tests(self) -> Tuple[Dict[str, Any], List[List[str]]]:
test_fixtures = {}
with open(
os.path.join(os.path.dirname(__file__), "fixtures/markdown_test_cases.json"), "rb"
) as f:
data = orjson.loads(f.read())
for test in data["regular_tests"]:
test_fixtures[test["name"]] = test
return test_fixtures, data["linkify_tests"]
def test_markdown_no_ignores(self) -> None:
# We do not want any ignored tests to be committed and merged.
format_tests, linkify_tests = self.load_markdown_tests()
for name, test in format_tests.items():
message = f'Test "{name}" shouldn\'t be ignored.'
is_ignored = test.get("ignore", False)
self.assertFalse(is_ignored, message)
def test_markdown_fixtures(self) -> None:
format_tests, linkify_tests = self.load_markdown_tests()
valid_keys = {
"name",
"input",
"expected_output",
"backend_only_rendering",
"marked_expected_output",
"text_content",
"translate_emoticons",
"ignore",
}
for name, test in format_tests.items():
with self.subTest(markdown_test_case=name):
# Check that there aren't any unexpected keys as those are often typos
self.assert_length(set(test.keys()) - valid_keys, 0)
# Ignore tests if specified
if test.get("ignore", False):
continue # nocoverage
if test.get("translate_emoticons", False):
# Create a userprofile and send message with it.
user_profile = self.example_user("othello")
do_change_user_setting(user_profile, "translate_emoticons", True)
msg = Message(sender=user_profile, sending_client=get_client("test"))
rendering_result = render_markdown(msg, test["input"])
converted = rendering_result.rendered_content
else:
converted = markdown_convert_wrapper(test["input"])
self.assertEqual(converted, test["expected_output"])
def replaced(payload: str, url: str, phrase: str = "") -> str:
if url[:4] == "http":
href = url
elif "@" in url:
href = "mailto:" + url
else:
href = "http://" + url
return payload % (f'<a href="{href}">{url}</a>',)
with mock.patch(
"zerver.lib.url_preview.preview.link_embed_data_from_cache", return_value=None
):
for inline_url, reference, url in linkify_tests:
try:
match = replaced(reference, url, phrase=inline_url)
except TypeError:
match = reference
converted = markdown_convert_wrapper(inline_url)
self.assertEqual(match, converted)
def test_inline_file(self) -> None:
msg = "Check out this file file:///Volumes/myserver/Users/Shared/pi.py"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Check out this file <a href="file:///Volumes/myserver/Users/Shared/pi.py">file:///Volumes/myserver/Users/Shared/pi.py</a></p>',
)
clear_state_for_testing()
with self.settings(ENABLE_FILE_LINKS=False):
realm = do_create_realm(string_id="file_links_test", name="file_links_test")
maybe_update_markdown_engines(realm.id, False)
self.assertEqual(
markdown_convert(msg, message_realm=realm).rendered_content,
"<p>Check out this file file:///Volumes/myserver/Users/Shared/pi.py</p>",
)
def test_inline_bitcoin(self) -> None:
msg = "To bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa or not to bitcoin"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>To <a href="bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa">bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa</a> or not to bitcoin</p>',
)
def test_inline_youtube(self) -> None:
msg = "Check out the debate: http://www.youtube.com/watch?v=hx1mjT73xYE"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p>Check out the debate: <a href="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="{get_camo_url("https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg")}"></a></div>""",
)
msg = "http://www.youtube.com/watch?v=hx1mjT73xYE"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="{get_camo_url("https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg")}"></a></div>""",
)
msg = "https://youtu.be/hx1mjT73xYE"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://youtu.be/hx1mjT73xYE">https://youtu.be/hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="https://youtu.be/hx1mjT73xYE"><img src="{get_camo_url("https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg")}"></a></div>""",
)
msg = "https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"
not_converted = markdown_convert_wrapper(msg)
self.assertEqual(
not_converted,
'<p><a href="https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo">https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo</a></p>',
)
msg = (
"https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo">https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo</a></p>\n<div class="youtube-video message_inline_image"><a data-id="O5nskjZ_GoI" href="https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"><img src="{get_camo_url("https://i.ytimg.com/vi/O5nskjZ_GoI/default.jpg")}"></a></div>""",
)
msg = "http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw">http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw</a></p>\n<div class="youtube-video message_inline_image"><a data-id="nOJgD4fcZhI" href="http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw"><img src="{get_camo_url("https://i.ytimg.com/vi/nOJgD4fcZhI/default.jpg")}"></a></div>""",
)
@override_settings(INLINE_URL_EMBED_PREVIEW=False)
def test_inline_vimeo(self) -> None:
msg = "Check out the debate: https://vimeo.com/246979354"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Check out the debate: <a href="https://vimeo.com/246979354">https://vimeo.com/246979354</a></p>',
)
msg = "https://vimeo.com/246979354"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><a href="https://vimeo.com/246979354">https://vimeo.com/246979354</a></p>',
)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_thumbnail_url(self) -> None:
realm = get_realm("zephyr")
msg = "[foobar](/user_uploads/{realm_id}/50/w2G6ok9kr8AMCQCTNAUOFMln/IMG_0677.JPG)"
msg = msg.format(realm_id=realm.id)
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F50%2Fw2G6ok9kr8AMCQCTNAUOFMln%2FIMG_0677.JPG&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F50%2Fw2G6ok9kr8AMCQCTNAUOFMln%2FIMG_0677.JPG&size=thumbnail"><'
thumbnail_img = thumbnail_img.format(realm_id=realm.id)
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "https://www.google.com/images/srpr/logo4w.png"
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail">'
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "www.google.com/images/srpr/logo4w.png"
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=http%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail">'
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "https://www.google.com/images/srpr/logo4w.png"
thumbnail_img = f"""<div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png"><img src="{get_camo_url("https://www.google.com/images/srpr/logo4w.png")}"></a></div>"""
with self.settings(THUMBNAIL_IMAGES=False):
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
# Any URL which is not an external link and doesn't start with
# /user_uploads/ is not thumbnailed
msg = "[foobar](/static/images/cute/turtle.png)"
thumbnail_img = '<div class="message_inline_image"><a href="/static/images/cute/turtle.png" title="foobar"><img src="/static/images/cute/turtle.png"></a></div>'
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "[foobar](/user_avatars/{realm_id}/emoji/images/50.png)"
msg = msg.format(realm_id=realm.id)
thumbnail_img = '<div class="message_inline_image"><a href="/user_avatars/{realm_id}/emoji/images/50.png" title="foobar"><img src="/user_avatars/{realm_id}/emoji/images/50.png"></a></div>'
thumbnail_img = thumbnail_img.format(realm_id=realm.id)
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_preview(self) -> None:
with_preview = '<div class="message_inline_image"><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=full" src="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=thumbnail"></a></div>'
without_preview = '<p><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></p>'
content = "http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, with_preview)
realm = msg.get_realm()
setattr(realm, "inline_image_preview", False)
realm.save()
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, without_preview)
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_external_image_preview_use_camo(self) -> None:
content = "https://example.com/thing.jpeg"
thumbnail_img = f"""<div class="message_inline_image"><a href="{content}"><img src="{get_camo_url(content)}"></a></div>"""
converted = markdown_convert_wrapper(content)
self.assertIn(converted, thumbnail_img)
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_static_image_preview_skip_camo(self) -> None:
content = f"{ settings.STATIC_URL }/thing.jpeg"
thumbnail_img = f"""<div class="message_inline_image"><a href="{content}"><img src="{content}"></a></div>"""
converted = markdown_convert_wrapper(content)
self.assertIn(converted, thumbnail_img)
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_realm_image_preview_skip_camo(self) -> None:
content = f"https://zulip.{ settings.EXTERNAL_HOST }/thing.jpeg"
converted = markdown_convert_wrapper(content)
self.assertNotIn(converted, get_camo_url(content))
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_cross_realm_image_preview_use_camo(self) -> None:
content = f"https://otherrealm.{ settings.EXTERNAL_HOST }/thing.jpeg"
thumbnail_img = f"""<div class="message_inline_image"><a href="{ content }"><img src="{ get_camo_url(content) }"></a></div>"""
converted = markdown_convert_wrapper(content)
self.assertIn(converted, thumbnail_img)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_quoted_blocks(self) -> None:
content = "http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"
expected = '<div class="message_inline_image"><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=full" src="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = ">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg\n\nAwesome!"
expected = '<blockquote>\n<p><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></p>\n</blockquote>\n<p>Awesome!</p>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = ">* http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg\n\nAwesome!"
expected = '<blockquote>\n<ul>\n<li><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></li>\n</ul>\n</blockquote>\n<p>Awesome!</p>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_preview_order(self) -> None:
realm = get_realm("zulip")
content = "http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg\nhttp://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg\nhttp://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"
expected = '<p><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg</a><br>\n<a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg</a><br>\n<a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg</a></p>\n<div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=thumbnail"></a></div><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_02.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_02.jpg&size=thumbnail"></a></div><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = "http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg\n\n>http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg\n\n* http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg\n* https://www.google.com/images/srpr/logo4w.png"
expected = '<div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=thumbnail"></a></div><blockquote>\n<p><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg</a></p>\n</blockquote>\n<ul>\n<li><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=thumbnail"></a></div></li>\n<li><div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail"></a></div></li>\n</ul>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = "Test 1\n[21136101110_1dde1c1a7e_o.jpg](/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg) \n\nNext image\n[IMG_20161116_023910.jpg](/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg) \n\nAnother screenshot\n[Screenshot-from-2016-06-01-16-22-42.png](/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png)"
content = content.format(realm_id=realm.id)
expected = '<p>Test 1<br>\n<a href="/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg">21136101110_1dde1c1a7e_o.jpg</a> </p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg" title="21136101110_1dde1c1a7e_o.jpg"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F6d%2FF1PX6u16JA2P-nK45PyxHIYZ%2F21136101110_1dde1c1a7e_o.jpg&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F6d%2FF1PX6u16JA2P-nK45PyxHIYZ%2F21136101110_1dde1c1a7e_o.jpg&size=thumbnail"></a></div><p>Next image<br>\n<a href="/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg">IMG_20161116_023910.jpg</a> </p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg" title="IMG_20161116_023910.jpg"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F69%2Fsh7L06e7uH7NaX6d5WFfVYQp%2FIMG_20161116_023910.jpg&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F69%2Fsh7L06e7uH7NaX6d5WFfVYQp%2FIMG_20161116_023910.jpg&size=thumbnail"></a></div><p>Another screenshot<br>\n<a href="/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png">Screenshot-from-2016-06-01-16-22-42.png</a></p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png" title="Screenshot-from-2016-06-01-16-22-42.png"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F70%2F_aZmIEWaN1iUaxwkDjkO7bpj%2FScreenshot-from-2016-06-01-16-22-42.png&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F70%2F_aZmIEWaN1iUaxwkDjkO7bpj%2FScreenshot-from-2016-06-01-16-22-42.png&size=thumbnail"></a></div>'
expected = expected.format(realm_id=realm.id)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_corrected_image_source(self) -> None:
# testing only Wikipedia because linx.li URLs can be expected to expire
content = "https://en.wikipedia.org/wiki/File:Wright_of_Derby,_The_Orrery.jpg"
expected = '<div class="message_inline_image"><a href="https://en.wikipedia.org/wiki/Special:FilePath/File:Wright_of_Derby,_The_Orrery.jpg"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FSpecial%3AFilePath%2FFile%3AWright_of_Derby%2C_The_Orrery.jpg&size=full" src="/thumbnail?url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FSpecial%3AFilePath%2FFile%3AWright_of_Derby%2C_The_Orrery.jpg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
@override_settings(INLINE_IMAGE_PREVIEW=False)
def test_image_preview_enabled(self) -> None:
ret = image_preview_enabled()
self.assertFalse(ret)
settings.INLINE_IMAGE_PREVIEW = True
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm = message.get_realm()
ret = image_preview_enabled()
self.assertTrue(ret)
ret = image_preview_enabled(no_previews=True)
self.assertFalse(ret)
ret = image_preview_enabled(message, realm)
self.assertTrue(ret)
ret = image_preview_enabled(message)
self.assertTrue(ret)
ret = image_preview_enabled(message, realm, no_previews=True)
self.assertFalse(ret)
ret = image_preview_enabled(message, no_previews=True)
self.assertFalse(ret)
@override_settings(INLINE_URL_EMBED_PREVIEW=False)
def test_url_embed_preview_enabled(self) -> None:
sender_user_profile = self.example_user("othello")
message = copy.deepcopy(
Message(sender=sender_user_profile, sending_client=get_client("test"))
)
realm = message.get_realm()
realm.inline_url_embed_preview = True # off by default
realm.save(update_fields=["inline_url_embed_preview"])
ret = url_embed_preview_enabled()
self.assertFalse(ret)
settings.INLINE_URL_EMBED_PREVIEW = True
ret = url_embed_preview_enabled()
self.assertTrue(ret)
ret = image_preview_enabled(no_previews=True)
self.assertFalse(ret)
ret = url_embed_preview_enabled(message, realm)
self.assertTrue(ret)
ret = url_embed_preview_enabled(message)
self.assertTrue(ret)
ret = url_embed_preview_enabled(message, no_previews=True)
self.assertFalse(ret)
def test_inline_dropbox(self) -> None:
msg = "Look at how hilarious our old office was: https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG"
image_info = {
"image": "https://photos-4.dropbox.com/t/2/AABIre1oReJgPYuc_53iv0IHq1vUzRaDg2rrCfTpiWMccQ/12/129/jpeg/1024x1024/2/_/0/4/IMG_0923.JPG/CIEBIAEgAiAHKAIoBw/ymdijjcg67hv2ta/AABz2uuED1ox3vpWWvMpBxu6a/IMG_0923.JPG",
"desc": "Shared with Dropbox",
"title": "IMG_0923.JPG",
}
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=image_info):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p>Look at how hilarious our old office was: <a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG">https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG" title="IMG_0923.JPG"><img src="{get_camo_url("https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG?raw=1")}"></a></div>""",
)
msg = "Look at my hilarious drawing folder: https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl="
image_info = {
"image": "https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png",
"desc": "Shared with Dropbox",
"title": "Saves",
}
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=image_info):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p>Look at my hilarious drawing folder: <a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=">https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=</a></p>\n<div class="message_inline_ref"><a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=" title="Saves"><img src="{get_camo_url("https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png")}"></a><div><div class="message_inline_image_title">Saves</div><desc class="message_inline_image_desc"></desc></div></div>""",
)
def test_inline_dropbox_preview(self) -> None:
# Test photo album previews
msg = "https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5"
image_info = {
"image": "https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0",
"desc": "Shared with Dropbox",
"title": "1 photo",
}
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=image_info):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5">https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5" title="1 photo"><img src="{get_camo_url("https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0")}"></a></div>""",
)
def test_inline_dropbox_negative(self) -> None:
# Make sure we're not overzealous in our conversion:
msg = "Look at the new dropbox logo: https://www.dropbox.com/static/images/home_logo.png"
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=None):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Look at the new dropbox logo: <a href="https://www.dropbox.com/static/images/home_logo.png">https://www.dropbox.com/static/images/home_logo.png</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/static/images/home_logo.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.dropbox.com%2Fstatic%2Fimages%2Fhome_logo.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.dropbox.com%2Fstatic%2Fimages%2Fhome_logo.png&size=thumbnail"></a></div>',
)
def test_inline_dropbox_bad(self) -> None:
# Don't fail on bad dropbox links
msg = "https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM"
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=None):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><a href="https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM">https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM</a></p>',
)
def test_inline_github_preview(self) -> None:
# Test photo album previews
msg = "Test: https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Test: <a href="https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png">https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png</a></p>\n<div class="message_inline_image"><a href="https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fraw.githubusercontent.com%2Fzulip%2Fzulip%2Fmain%2Fstatic%2Fimages%2Flogo%2Fzulip-icon-128x128.png&size=full" src="/thumbnail?url=https%3A%2F%2Fraw.githubusercontent.com%2Fzulip%2Fzulip%2Fmain%2Fstatic%2Fimages%2Flogo%2Fzulip-icon-128x128.png&size=thumbnail"></a></div>',
)
msg = "Test: https://developer.github.com/assets/images/hero-circuit-bg.png"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Test: <a href="https://developer.github.com/assets/images/hero-circuit-bg.png">https://developer.github.com/assets/images/hero-circuit-bg.png</a></p>\n<div class="message_inline_image"><a href="https://developer.github.com/assets/images/hero-circuit-bg.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fdeveloper.github.com%2Fassets%2Fimages%2Fhero-circuit-bg.png&size=full" src="/thumbnail?url=https%3A%2F%2Fdeveloper.github.com%2Fassets%2Fimages%2Fhero-circuit-bg.png&size=thumbnail"></a></div>',
)
def test_inline_youtube_preview(self) -> None:
# Test YouTube URLs in spoilers
msg = """\n```spoiler Check out this PyCon video\nhttps://www.youtube.com/watch?v=0c46YHS3RY8\n```"""
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<div class="spoiler-block"><div class="spoiler-header">\n<p>Check out this PyCon video</p>\n</div><div class="spoiler-content" aria-hidden="true">\n<p><a href="https://www.youtube.com/watch?v=0c46YHS3RY8">https://www.youtube.com/watch?v=0c46YHS3RY8</a></p>\n<div class="youtube-video message_inline_image"><a data-id="0c46YHS3RY8" href="https://www.youtube.com/watch?v=0c46YHS3RY8"><img src="{get_camo_url("https://i.ytimg.com/vi/0c46YHS3RY8/default.jpg")}"></a></div></div></div>""",
)
# Test YouTube URLs in normal messages.
msg = "[YouTube link](https://www.youtube.com/watch?v=0c46YHS3RY8)"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.youtube.com/watch?v=0c46YHS3RY8">YouTube link</a></p>\n<div class="youtube-video message_inline_image"><a data-id="0c46YHS3RY8" href="https://www.youtube.com/watch?v=0c46YHS3RY8"><img src="{get_camo_url("https://i.ytimg.com/vi/0c46YHS3RY8/default.jpg")}"></a></div>""",
)
msg = "https://www.youtube.com/watch?v=0c46YHS3RY8\n\nSample text\n\nhttps://www.youtube.com/watch?v=lXFO2ULktEI"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.youtube.com/watch?v=0c46YHS3RY8">https://www.youtube.com/watch?v=0c46YHS3RY8</a></p>\n<div class="youtube-video message_inline_image"><a data-id="0c46YHS3RY8" href="https://www.youtube.com/watch?v=0c46YHS3RY8"><img src="{get_camo_url("https://i.ytimg.com/vi/0c46YHS3RY8/default.jpg")}"></a></div><p>Sample text</p>\n<p><a href="https://www.youtube.com/watch?v=lXFO2ULktEI">https://www.youtube.com/watch?v=lXFO2ULktEI</a></p>\n<div class="youtube-video message_inline_image"><a data-id="lXFO2ULktEI" href="https://www.youtube.com/watch?v=lXFO2ULktEI"><img src="{get_camo_url("https://i.ytimg.com/vi/lXFO2ULktEI/default.jpg")}"></a></div>""",
)
def test_twitter_id_extraction(self) -> None:
self.assertEqual(
get_tweet_id("http://twitter.com/#!/VizzQuotes/status/409030735191097344"),
"409030735191097344",
)
self.assertEqual(
get_tweet_id("http://twitter.com/VizzQuotes/status/409030735191097344"),
"409030735191097344",
)
self.assertEqual(
get_tweet_id("http://twitter.com/VizzQuotes/statuses/409030735191097344"),
"409030735191097344",
)
self.assertEqual(get_tweet_id("https://twitter.com/wdaher/status/1017581858"), "1017581858")
self.assertEqual(
get_tweet_id("https://twitter.com/wdaher/status/1017581858/"), "1017581858"
)
self.assertEqual(
get_tweet_id("https://twitter.com/windyoona/status/410766290349879296/photo/1"),
"410766290349879296",
)
self.assertEqual(
get_tweet_id("https://twitter.com/windyoona/status/410766290349879296/"),
"410766290349879296",
)
def test_inline_interesting_links(self) -> None:
def make_link(url: str) -> str:
return f'<a href="{url}">{url}</a>'
normal_tweet_html = (
'<a href="https://twitter.com/Twitter"'
">@Twitter</a> "
"meets @seepicturely at #tcdisrupt cc."
'<a href="https://twitter.com/boscomonkey"'
">@boscomonkey</a> "
'<a href="https://twitter.com/episod"'
">@episod</a> "
'<a href="http://t.co/6J2EgYM"'
">http://instagr.am/p/MuW67/</a>"
)
mention_in_link_tweet_html = """<a href="http://t.co/@foo">http://foo.com</a>"""
media_tweet_html = (
'<a href="http://t.co/xo7pAhK6n3">'
"http://twitter.com/NEVNBoston/status/421654515616849920/photo/1</a>"
)
emoji_in_tweet_html = """Zulip is <span aria-label=\"100\" class="emoji emoji-1f4af" role=\"img\" title="100">:100:</span>% open-source!"""
def make_inline_twitter_preview(url: str, tweet_html: str, image_html: str = "") -> str:
## As of right now, all previews are mocked to be the exact same tweet
return (
'<div class="inline-preview-twitter">'
'<div class="twitter-tweet">'
f'<a href="{url}">'
'<img class="twitter-avatar"'
' src="https://external-content.zulipcdn.net/external_content/1f7cd2436976d410eab8189ebceda87ae0b34ead/687474703a2f2f7062732e7477696d672e63'
"6f6d2f70726f66696c655f696d616765732f313338303931323137332f53637265656e5f73686f745f323031312d30362d30335f61745f372e33352e33"
'365f504d5f6e6f726d616c2e706e67">'
"</a>"
f"<p>{tweet_html}</p>"
"<span>- Eoin McMillan (@imeoin)</span>"
f"{image_html}"
"</div>"
"</div>"
)
msg = "http://www.twitter.com"
converted = markdown_convert_wrapper(msg)
self.assertEqual(converted, "<p>{}</p>".format(make_link("http://www.twitter.com")))
msg = "http://www.twitter.com/wdaher/"
converted = markdown_convert_wrapper(msg)
self.assertEqual(converted, "<p>{}</p>".format(make_link("http://www.twitter.com/wdaher/")))
msg = "http://www.twitter.com/wdaher/status/3"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted, "<p>{}</p>".format(make_link("http://www.twitter.com/wdaher/status/3"))
)
# id too long
msg = "http://www.twitter.com/wdaher/status/2879779692873154569"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>".format(
make_link("http://www.twitter.com/wdaher/status/2879779692873154569")
),
)
# id too large (i.e. tweet doesn't exist)
msg = "http://www.twitter.com/wdaher/status/999999999999999999"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>".format(
make_link("http://www.twitter.com/wdaher/status/999999999999999999")
),
)
msg = "http://www.twitter.com/wdaher/status/287977969287315456"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://www.twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://www.twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
msg = "https://www.twitter.com/wdaher/status/287977969287315456"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("https://www.twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"https://www.twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
msg = "http://twitter.com/wdaher/status/287977969287315456"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
# Repeated links will only be converted once
msg = (
"http://twitter.com/wdaher/status/287977969287315456 "
"http://twitter.com/wdaher/status/287977969287315457 "
"http://twitter.com/wdaher/status/287977969287315457 "
"http://twitter.com/wdaher/status/287977969287315457"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{} {} {} {}</p>\n{}{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315457", normal_tweet_html
),
),
)
# A max of 3 will be converted
msg = (
"http://twitter.com/wdaher/status/287977969287315456 "
"http://twitter.com/wdaher/status/287977969287315457 "
"https://twitter.com/wdaher/status/287977969287315456 "
"http://twitter.com/wdaher/status/287977969287315460"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{} {} {} {}</p>\n{}{}{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_link("https://twitter.com/wdaher/status/287977969287315456"),
make_link("http://twitter.com/wdaher/status/287977969287315460"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315457", normal_tweet_html
),
make_inline_twitter_preview(
"https://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
# Test smart in-place inlining behavior:
msg = (
"Paragraph 1: http://twitter.com/wdaher/status/287977969287315456\n\n"
"Paragraph 2\n\n"
"Paragraph 3: http://twitter.com/wdaher/status/287977969287315457"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>Paragraph 1: {}</p>\n{}<p>Paragraph 2</p>\n<p>Paragraph 3: {}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315457", normal_tweet_html
),
),
)
# Tweet has a mention in a URL, only the URL is linked
msg = "http://twitter.com/wdaher/status/287977969287315458"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315458"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315458",
mention_in_link_tweet_html,
),
),
)
# Tweet with an image
msg = "http://twitter.com/wdaher/status/287977969287315459"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315459"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315459",
media_tweet_html,
(
'<div class="twitter-image">'
'<a href="http://t.co/xo7pAhK6n3">'
f"""<img src="{get_camo_url("https://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg:small")}">"""
"</a>"
"</div>"
),
),
),
)
msg = "http://twitter.com/wdaher/status/287977969287315460"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315460"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315460", emoji_in_tweet_html
),
),
)
# Test Twitter previews in spoiler tags.
msg = "```spoiler secret tweet\nTweet: http://twitter.com/wdaher/status/287977969287315456\n```"
converted = markdown_convert_wrapper(msg)
rendered_spoiler = '<div class="spoiler-block"><div class="spoiler-header">\n<p>secret tweet</p>\n</div><div class="spoiler-content" aria-hidden="true">\n<p>Tweet: {}</p>\n{}</div></div>'
self.assertEqual(
converted,
rendered_spoiler.format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
def test_fetch_tweet_data_settings_validation(self) -> None:
with self.settings(TEST_SUITE=False, TWITTER_CONSUMER_KEY=None):
self.assertIs(None, fetch_tweet_data("287977969287315459"))
def test_content_has_emoji(self) -> None:
self.assertFalse(content_has_emoji_syntax("boring"))
self.assertFalse(content_has_emoji_syntax("hello: world"))
self.assertFalse(content_has_emoji_syntax(":foobar"))
self.assertFalse(content_has_emoji_syntax("::: hello :::"))
self.assertTrue(content_has_emoji_syntax("foo :whatever:"))
self.assertTrue(content_has_emoji_syntax("\n:whatever:"))
self.assertTrue(content_has_emoji_syntax(":smile: ::::::"))
def test_realm_emoji(self) -> None:
def emoji_img(name: str, file_name: str, realm_id: int) -> str:
return '<img alt="{}" class="emoji" src="{}" title="{}">'.format(
name, get_emoji_url(file_name, realm_id), name[1:-1].replace("_", " ")
)
realm = get_realm("zulip")
# Needs to mock an actual message because that's how Markdown obtains the realm
msg = Message(sender=self.example_user("hamlet"))
converted = markdown_convert(":green_tick:", message_realm=realm, message=msg)
realm_emoji = RealmEmoji.objects.filter(
realm=realm, name="green_tick", deactivated=False
).get()
self.assertEqual(
converted.rendered_content,
"<p>{}</p>".format(emoji_img(":green_tick:", realm_emoji.file_name, realm.id)),
)
# Deactivate realm emoji.
do_remove_realm_emoji(realm, "green_tick")
converted = markdown_convert(":green_tick:", message_realm=realm, message=msg)
self.assertEqual(converted.rendered_content, "<p>:green_tick:</p>")
def test_deactivated_realm_emoji(self) -> None:
# Deactivate realm emoji.
realm = get_realm("zulip")
do_remove_realm_emoji(realm, "green_tick")
msg = Message(sender=self.example_user("hamlet"))
converted = markdown_convert(":green_tick:", message_realm=realm, message=msg)
self.assertEqual(converted.rendered_content, "<p>:green_tick:</p>")
def test_unicode_emoji(self) -> None:
msg = "\u2615" # ☕
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><span aria-label="coffee" class="emoji emoji-2615" role="img" title="coffee">:coffee:</span></p>',
)
msg = "\u2615\u2615" # ☕☕
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><span aria-label="coffee" class="emoji emoji-2615" role="img" title="coffee">:coffee:</span><span aria-label="coffee" class="emoji emoji-2615" role="img" title="coffee">:coffee:</span></p>',
)
def test_no_translate_emoticons_if_off(self) -> None:
user_profile = self.example_user("othello")
do_change_user_setting(user_profile, "translate_emoticons", False)
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = ":)"
expected = "<p>:)</p>"
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
def test_same_markup(self) -> None:
msg = "\u2615" # ☕
unicode_converted = markdown_convert_wrapper(msg)
msg = ":coffee:" # ☕☕
converted = markdown_convert_wrapper(msg)
self.assertEqual(converted, unicode_converted)
def test_links_in_topic_name(self) -> None:
realm = get_realm("zulip")
msg = Message(sender=self.example_user("othello"))
msg.set_topic_name("https://google.com/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[{"url": "https://google.com/hello-world", "text": "https://google.com/hello-world"}],
)
msg.set_topic_name("http://google.com/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[{"url": "http://google.com/hello-world", "text": "http://google.com/hello-world"}],
)
msg.set_topic_name("Without scheme google.com/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[{"url": "https://google.com/hello-world", "text": "google.com/hello-world"}],
)
msg.set_topic_name("Without scheme random.words/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, [])
msg.set_topic_name(
"Try out http://ftp.debian.org, https://google.com/ and https://google.in/."
)
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[
{"url": "http://ftp.debian.org", "text": "http://ftp.debian.org"},
{"url": "https://google.com/", "text": "https://google.com/"},
{"url": "https://google.in/", "text": "https://google.in/"},
],
)
# test order for links without scheme
msg.set_topic_name("google.in google.com")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[
{"url": "https://google.in", "text": "google.in"},
{"url": "https://google.com", "text": "google.com"},
],
)
def test_realm_patterns(self) -> None:
realm = get_realm("zulip")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
msg = Message(sender=self.example_user("othello"))
msg.set_topic_name("#444")
flush_per_request_caches()
content = "We should fix #224 and #115, but not issue#124 or #1124z or [trac #15](https://trac.example.com/ticket/16) today."
converted = markdown_convert(content, message_realm=realm, message=msg)
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted.rendered_content,
'<p>We should fix <a href="https://trac.example.com/ticket/224">#224</a> and <a href="https://trac.example.com/ticket/115">#115</a>, but not issue#124 or #1124z or <a href="https://trac.example.com/ticket/16">trac #15</a> today.</p>',
)
self.assertEqual(
converted_topic, [{"url": "https://trac.example.com/ticket/444", "text": "#444"}]
)
msg.set_topic_name("#444 https://google.com")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/ticket/444", "text": "#444"},
{"url": "https://google.com", "text": "https://google.com"},
],
)
RealmFilter(
realm=realm,
pattern=r"#(?P<id>[a-zA-Z]+-[0-9]+)",
url_format_string=r"https://trac.example.com/ticket/%(id)s",
).save()
msg = Message(sender=self.example_user("hamlet"))
content = "#ZUL-123 was fixed and code was deployed to production, also #zul-321 was deployed to staging"
converted = markdown_convert(content, message_realm=realm, message=msg)
self.assertEqual(
converted.rendered_content,
'<p><a href="https://trac.example.com/ticket/ZUL-123">#ZUL-123</a> was fixed and code was deployed to production, also <a href="https://trac.example.com/ticket/zul-321">#zul-321</a> was deployed to staging</p>',
)
def assert_conversion(content: str, should_have_converted: bool = True) -> None:
converted = markdown_convert(content, message_realm=realm, message=msg).rendered_content
converted_topic = topic_links(realm.id, content)
if should_have_converted:
self.assertTrue("https://trac.example.com" in converted)
self.assert_length(converted_topic, 1)
self.assertEqual(
converted_topic[0],
{"url": "https://trac.example.com/ticket/123", "text": "#123"},
)
else:
self.assertTrue("https://trac.example.com" not in converted)
self.assert_length(converted_topic, 0)
assert_conversion("Hello #123 World")
assert_conversion("Hello #123World", False)
assert_conversion("Hello#123 World", False)
assert_conversion("Hello#123World", False)
# Ideally, these should be converted, but Markdown doesn't
# handle word boundary detection in languages that don't use
# whitespace for that correctly yet.
assert_conversion("チケットは#123です", False)
assert_conversion("チケットは #123です", False)
assert_conversion("チケットは#123 です", False)
assert_conversion("チケットは #123 です")
assert_conversion("(#123)")
assert_conversion("#123>")
assert_conversion('"#123"')
assert_conversion("#123@")
assert_conversion(")#123(", False)
assert_conversion("##123", False)
# test nested realm patterns should avoid double matching
RealmFilter(
realm=realm,
pattern=r"hello#(?P<id>[0-9]+)",
url_format_string=r"https://trac.example.com/hello/%(id)s",
).save()
converted_topic = topic_links(realm.id, "hello#123 #234")
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/hello/123", "text": "hello#123"},
{"url": "https://trac.example.com/ticket/234", "text": "#234"},
],
)
# test correct order when realm pattern and normal links are both present.
converted_topic = topic_links(realm.id, "#234 https://google.com")
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/ticket/234", "text": "#234"},
{"url": "https://google.com", "text": "https://google.com"},
],
)
def test_multiple_matching_realm_patterns(self) -> None:
realm = get_realm("zulip")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier_1 = RealmFilter(
realm=realm,
pattern=r"(?P<id>ABC\-[0-9]+)(?![A-Z0-9-])",
url_format_string=url_format_string,
)
linkifier_1.save()
self.assertEqual(
linkifier_1.__str__(),
r"<RealmFilter(zulip): (?P<id>ABC\-[0-9]+)(?![A-Z0-9-])"
" https://trac.example.com/ticket/%(id)s>",
)
url_format_string = r"https://other-trac.example.com/ticket/%(id)s"
linkifier_2 = RealmFilter(
realm=realm,
pattern=r"(?P<id>[A-Z][A-Z0-9]*\-[0-9]+)(?![A-Z0-9-])",
url_format_string=url_format_string,
)
linkifier_2.save()
self.assertEqual(
linkifier_2.__str__(),
r"<RealmFilter(zulip): (?P<id>[A-Z][A-Z0-9]*\-[0-9]+)(?![A-Z0-9-])"
" https://other-trac.example.com/ticket/%(id)s>",
)
msg = Message(sender=self.example_user("othello"))
msg.set_topic_name("ABC-123")
flush_per_request_caches()
content = (
"We should fix ABC-123 or [trac ABC-123](https://trac.example.com/ticket/16) today."
)
converted = markdown_convert(content, message_realm=realm, message=msg)
converted_topic = topic_links(realm.id, msg.topic_name())
# The second linkifier (which was saved later) was ignored as the content was marked AtomicString after first conversion.
# There was no easy way to support parsing both linkifiers and not run into an infinite loop, hence the second linkifier is ignored.
self.assertEqual(
converted.rendered_content,
'<p>We should fix <a href="https://trac.example.com/ticket/ABC-123">ABC-123</a> or <a href="https://trac.example.com/ticket/16">trac ABC-123</a> today.</p>',
)
# Both the links should be generated in topics.
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/ticket/ABC-123", "text": "ABC-123"},
{"url": "https://other-trac.example.com/ticket/ABC-123", "text": "ABC-123"},
],
)
def test_flush_linkifier(self) -> None:
realm = get_realm("zulip")
def flush() -> None:
"""
flush_linkifiers is a post-save hook, so calling it
directly for testing is kind of awkward
"""
class Instance:
realm_id: Optional[int] = None
instance = Instance()
instance.realm_id = realm.id
flush_linkifiers(sender=RealmFilter, instance=cast(RealmFilter, instance))
def save_new_linkifier() -> None:
linkifier = RealmFilter(realm=realm, pattern=r"whatever", url_format_string="whatever")
linkifier.save()
# start fresh for our realm
flush()
self.assertFalse(realm_in_local_linkifiers_cache(realm.id))
# call this just for side effects of populating the cache
linkifiers_for_realm(realm.id)
self.assertTrue(realm_in_local_linkifiers_cache(realm.id))
# Saving a new RealmFilter should have the side effect of
# flushing the cache.
save_new_linkifier()
self.assertFalse(realm_in_local_linkifiers_cache(realm.id))
# and flush it one more time, to make sure we don't get a KeyError
flush()
self.assertFalse(realm_in_local_linkifiers_cache(realm.id))
def test_realm_patterns_negative(self) -> None:
realm = get_realm("zulip")
RealmFilter(
realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=r"https://trac.example.com/ticket/%(id)s",
).save()
boring_msg = Message(sender=self.example_user("othello"))
boring_msg.set_topic_name("no match here")
converted_boring_topic = topic_links(realm.id, boring_msg.topic_name())
self.assertEqual(converted_boring_topic, [])
def test_is_status_message(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "/me makes a list\n* one\n* two"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>/me makes a list</p>\n<ul>\n<li>one</li>\n<li>two</li>\n</ul>",
)
self.assertTrue(Message.is_status_message(content, rendering_result.rendered_content))
content = "/me takes a walk"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>/me takes a walk</p>",
)
self.assertTrue(Message.is_status_message(content, rendering_result.rendered_content))
content = "/me writes a second line\nline"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>/me writes a second line<br>\nline</p>",
)
self.assertTrue(Message.is_status_message(content, rendering_result.rendered_content))
def test_alert_words(self) -> None:
user_profile = self.example_user("othello")
do_add_alert_words(user_profile, ["ALERTWORD", "scaryword"])
msg = Message(sender=user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = "We have an ALERTWORD day today!"
rendering_result = render(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>We have an ALERTWORD day today!</p>"
)
self.assertEqual(rendering_result.user_ids_with_alert_words, {user_profile.id})
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "We have a NOTHINGWORD day today!"
rendering_result = render(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>We have a NOTHINGWORD day today!</p>"
)
self.assertEqual(rendering_result.user_ids_with_alert_words, set())
def test_alert_words_returns_user_ids_with_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["how"],
"cordelia": ["this possible"],
"iago": ["hello"],
"prospero": ["hello"],
"othello": ["how are you"],
"aaron": ["hey"],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = "hello how is this possible how are you doing today"
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {
user_profiles["hamlet"].id,
user_profiles["cordelia"].id,
user_profiles["iago"].id,
user_profiles["prospero"].id,
user_profiles["othello"].id,
}
# All users except aaron have their alert word appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_user_ids_with_alert_words_1(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["provisioning", "Prod deployment"],
"cordelia": ["test", "Prod"],
"iago": ["prod"],
"prospero": ["deployment"],
"othello": ["last"],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """Hello, everyone. Prod deployment has been completed
And this is a new line
to test out how Markdown convert this into something line ending split array
and this is a new line
last"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {
user_profiles["hamlet"].id,
user_profiles["cordelia"].id,
user_profiles["iago"].id,
user_profiles["prospero"].id,
user_profiles["othello"].id,
}
# All users have their alert word appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_user_ids_with_alert_words_in_french(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["réglementaire", "une politique", "une merveille"],
"cordelia": ["énormément", "Prod"],
"iago": ["prod"],
"prospero": ["deployment"],
"othello": ["last"],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """This is to test out alert words work in languages with accented characters too
bonjour est (énormément) ce a quoi ressemble le français
et j'espère qu'il n'y n' réglementaire a pas de mots d'alerte dans ce texte français
"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {user_profiles["hamlet"].id, user_profiles["cordelia"].id}
# Only hamlet and cordelia have their alert-words appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_empty_user_ids_with_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": [],
"cordelia": [],
"iago": [],
"prospero": [],
"othello": [],
"aaron": [],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """hello how is this possible how are you doing today
This is to test that the no user_ids who have alrert wourldword is participating
in sending of the message
"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = set()
# None of the users have their alert-words appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def get_mock_alert_words(self, num_words: int, word_length: int) -> List[str]:
alert_words = ["x" * word_length] * num_words # type List[str]
return alert_words
def test_alert_words_with_empty_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": [],
"cordelia": [],
"iago": [],
"othello": [],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """This is to test a empty alert words i.e. no user has any alert-words set"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = set()
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_retuns_user_ids_with_alert_words_with_huge_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["issue124"],
"cordelia": self.get_mock_alert_words(500, 10),
"iago": self.get_mock_alert_words(500, 10),
"othello": self.get_mock_alert_words(500, 10),
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """The code above will print 10 random values of numbers between 1 and 100.
The second line, for x in range(10), determines how many values will be printed (when you use
range(x), the number that you use in place of x will be the amount of values that you'll have
printed. if you want 20 values, use range(20). use range(5) if you only want 5 values returned,
etc.). I was talking abou the issue124 on github. Then the third line: print random.randint(1,101) will automatically select a random integer
between 1 and 100 for you. The process is fairly simple
"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {user_profiles["hamlet"].id}
# Only hamlet has alert-word 'issue124' present in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_default_code_block_language(self) -> None:
realm = get_realm("zulip")
self.assertEqual(realm.default_code_block_language, None)
text = "```{}\nconsole.log('Hello World');\n```\n"
# Render without default language
msg_with_js = markdown_convert_wrapper(text.format("js"))
msg_with_python = markdown_convert_wrapper(text.format("python"))
msg_without_language = markdown_convert_wrapper(text.format(""))
msg_with_quote = markdown_convert_wrapper(text.format("quote"))
msg_with_math = markdown_convert_wrapper(text.format("math"))
msg_with_none = markdown_convert_wrapper(text.format("none"))
# Render with default=javascript
do_set_realm_property(realm, "default_code_block_language", "javascript", acting_user=None)
msg_without_language_default_js = markdown_convert_wrapper(text.format(""))
msg_with_python_default_js = markdown_convert_wrapper(text.format("python"))
# Render with default=python
do_set_realm_property(realm, "default_code_block_language", "python", acting_user=None)
msg_without_language_default_py = markdown_convert_wrapper(text.format(""))
msg_with_none_default_py = markdown_convert_wrapper(text.format("none"))
# Render with default=quote
do_set_realm_property(realm, "default_code_block_language", "quote", acting_user=None)
msg_without_language_default_quote = markdown_convert_wrapper(text.format(""))
# Render with default=math
do_set_realm_property(realm, "default_code_block_language", "math", acting_user=None)
msg_without_language_default_math = markdown_convert_wrapper(text.format(""))
# Render without default language
do_set_realm_property(realm, "default_code_block_language", None, acting_user=None)
msg_without_language_final = markdown_convert_wrapper(text.format(""))
self.assertTrue(msg_with_js == msg_without_language_default_js)
self.assertTrue(
msg_with_python == msg_with_python_default_js == msg_without_language_default_py
)
self.assertTrue(msg_with_quote == msg_without_language_default_quote)
self.assertTrue(msg_with_math == msg_without_language_default_math)
self.assertTrue(msg_without_language == msg_without_language_final)
self.assertTrue(msg_with_none == msg_with_none_default_py)
# Test checking inside nested quotes
nested_text = "````quote\n\n{}\n\n{}````".format(text.format("js"), text.format(""))
do_set_realm_property(realm, "default_code_block_language", "javascript", acting_user=None)
rendered = markdown_convert_wrapper(nested_text)
with_language, without_language = re.findall(r"<pre>(.*?)$", rendered, re.MULTILINE)
self.assertTrue(with_language == without_language)
do_set_realm_property(realm, "default_code_block_language", None, acting_user=None)
rendered = markdown_convert_wrapper(nested_text)
with_language, without_language = re.findall(r"<pre>(.*?)$", rendered, re.MULTILINE)
self.assertFalse(with_language == without_language)
def test_mention_wildcard(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**all** test"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" data-user-id="*">' "@all" "</span> test</p>",
)
self.assertTrue(rendering_result.mentions_wildcard)
def test_mention_everyone(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**everyone** test"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" data-user-id="*">' "@everyone" "</span> test</p>",
)
self.assertTrue(rendering_result.mentions_wildcard)
def test_mention_stream(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**stream** test"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" data-user-id="*">' "@stream" "</span> test</p>",
)
self.assertTrue(rendering_result.mentions_wildcard)
def test_mention_at_wildcard(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@all test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>@all test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_at_everyone(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@everyone test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>@everyone test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_word_starting_with_at_wildcard(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "test @alleycat.com test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>test @alleycat.com test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_at_normal_user(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@aaron test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>@aaron test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_single(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" ' f'data-user-id="{user_id}">' "@King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
content = f"@**|{user_id}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" ' f'data-user-id="{user_id}">' "@King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
def test_mention_silent(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@_**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention silent" '
f'data-user-id="{user_id}">'
"King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_silent_wildcard_mention(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
wildcards = ["all", "everyone", "stream"]
for wildcard in wildcards:
content = f"@_**{wildcard}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
f'<p><span class="user-mention silent" data-user-id="*">{wildcard}</span></p>',
)
self.assertFalse(rendering_result.mentions_wildcard)
def test_mention_invalid_followed_by_valid(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@**Invalid user** and @**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p>@<strong>Invalid user</strong> and <span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
def test_invalid_mention_not_uses_valid_mention_data(self) -> None:
sender_user_profile = self.example_user("othello")
hamlet = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
# Even though King Hamlet will be present in mention data as
# it was was fetched for first mention but second mention is
# incorrect(as it uses hamlet's id) so it should not be able
# to use that data for creating a valid mention.
content = f"@**King Hamlet|{hamlet.id}** and @**aaron|{hamlet.id}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
f'<p><span class="user-mention" data-user-id="{hamlet.id}">'
f"@King Hamlet</span> and @<strong>aaron|{hamlet.id}</strong></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {hamlet.id})
def test_silent_mention_invalid_followed_by_valid(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@_**Invalid user** and @_**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p>@_<strong>Invalid user</strong> and <span class="user-mention silent" '
f'data-user-id="{user_id}">'
"King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = f"@_**|123456789** and @_**|{user_id}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>@_<strong>|123456789</strong> and "
'<span class="user-mention silent" '
f'data-user-id="{user_id}">'
"King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_possible_mentions(self) -> None:
def assert_mentions(content: str, names: Set[str], has_wildcards: bool = False) -> None:
self.assertEqual(possible_mentions(content), (names, has_wildcards))
aaron = self.example_user("aaron")
assert_mentions("", set())
assert_mentions("boring", set())
assert_mentions("@**all**", set(), True)
assert_mentions("smush@**steve**smush", set())
assert_mentions(
f"Hello @**King Hamlet**, @**|{aaron.id}** and @**Cordelia, Lear's daughter**\n@**Foo van Barson|1234** @**all**",
{"King Hamlet", f"|{aaron.id}", "Cordelia, Lear's daughter", "Foo van Barson|1234"},
True,
)
def test_mention_multiple(self) -> None:
sender_user_profile = self.example_user("othello")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "@**King Hamlet** and @**Cordelia, Lear's daughter**, check this out"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>"
'<span class="user-mention" '
f'data-user-id="{hamlet.id}">@King Hamlet</span> and '
'<span class="user-mention" '
f'data-user-id="{cordelia.id}">@Cordelia, Lear\'s daughter</span>, '
"check this out</p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {hamlet.id, cordelia.id})
def test_mention_in_quotes(self) -> None:
othello = self.example_user("othello")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
msg = Message(sender=othello, sending_client=get_client("test"))
content = "> @**King Hamlet** and @**Othello, the Moor of Venice**\n\n @**King Hamlet** and @**Cordelia, Lear's daughter**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<blockquote>\n<p>"
f'<span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span>'
" and "
f'<span class="user-mention silent" data-user-id="{othello.id}">Othello, the Moor of Venice</span>'
"</p>\n</blockquote>\n"
"<p>"
f'<span class="user-mention" data-user-id="{hamlet.id}">@King Hamlet</span>'
" and "
f'<span class="user-mention" data-user-id="{cordelia.id}">@Cordelia, Lear\'s daughter</span>'
"</p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {hamlet.id, cordelia.id})
# Both fenced quote and > quote should be identical for both silent and regular syntax.
expected = (
"<blockquote>\n<p>"
f'<span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span>'
"</p>\n</blockquote>"
)
content = "```quote\n@**King Hamlet**\n```"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = "> @**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = "```quote\n@_**King Hamlet**\n```"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = "> @_**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_wildcard_mention_in_quotes(self) -> None:
user_profile = self.example_user("othello")
message = Message(sender=user_profile, sending_client=get_client("test"))
def assert_silent_mention(content: str, wildcard: str) -> None:
expected = (
"<blockquote>\n<p>"
f'<span class="user-mention silent" data-user-id="*">{wildcard}</span>'
"</p>\n</blockquote>"
)
rendering_result = render_markdown(message, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertFalse(rendering_result.mentions_wildcard)
wildcards = ["all", "everyone", "stream"]
for wildcard in wildcards:
assert_silent_mention(f"> @**{wildcard}**", wildcard)
assert_silent_mention(f"> @_**{wildcard}**", wildcard)
assert_silent_mention(f"```quote\n@**{wildcard}**\n```", wildcard)
assert_silent_mention(f"```quote\n@_**{wildcard}**\n```", wildcard)
def test_mention_duplicate_full_name(self) -> None:
realm = get_realm("zulip")
def make_user(email: str, full_name: str) -> UserProfile:
return create_user(
email=email,
password="whatever",
realm=realm,
full_name=full_name,
)
sender_user_profile = self.example_user("othello")
twin1 = make_user("twin1@example.com", "Mark Twin")
twin2 = make_user("twin2@example.com", "Mark Twin")
cordelia = self.example_user("cordelia")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = f"@**Mark Twin|{twin1.id}**, @**Mark Twin|{twin2.id}** and @**Cordelia, Lear's daughter**, hi."
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>"
'<span class="user-mention" '
f'data-user-id="{twin1.id}">@Mark Twin</span>, '
'<span class="user-mention" '
f'data-user-id="{twin2.id}">@Mark Twin</span> and '
'<span class="user-mention" '
f'data-user-id="{cordelia.id}">@Cordelia, Lear\'s daughter</span>, '
"hi.</p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {twin1.id, twin2.id, cordelia.id})
def test_mention_invalid(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @**Nonexistent User**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>Hey @<strong>Nonexistent User</strong></p>"
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_user_mention_atomic_string(self) -> None:
sender_user_profile = self.example_user("othello")
realm = get_realm("zulip")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
# Create a linkifier.
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a user that potentially interferes with the pattern.
test_user = create_user(
email="atomic@example.com",
password="whatever",
realm=realm,
full_name="Atomic #123",
)
content = "@**Atomic #123**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{test_user.id}">'
"@Atomic #123</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {test_user.id})
content = "@_**Atomic #123**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention silent" '
f'data-user-id="{test_user.id}">'
"Atomic #123</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def create_user_group_for_test(self, user_group_name: str) -> UserGroup:
othello = self.example_user("othello")
return create_user_group(user_group_name, [othello], get_realm("zulip"))
def test_user_group_mention_single(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
user_group = self.create_user_group_for_test("support")
content = "@**King Hamlet** @*support*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span> "
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
"@support</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
self.assertEqual(rendering_result.mentions_user_group_ids, {user_group.id})
def test_invalid_user_group_followed_by_valid_mention_single(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
user_group = self.create_user_group_for_test("support")
content = "@**King Hamlet** @*Invalid user group* @*support*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span> "
"@<em>Invalid user group</em> "
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
"@support</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
self.assertEqual(rendering_result.mentions_user_group_ids, {user_group.id})
def test_user_group_mention_atomic_string(self) -> None:
sender_user_profile = self.example_user("othello")
realm = get_realm("zulip")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_profile = self.example_user("hamlet")
# Create a linkifier.
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a user-group that potentially interferes with the pattern.
user_id = user_profile.id
user_group = self.create_user_group_for_test("support #123")
content = "@**King Hamlet** @*support #123*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span> "
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
"@support #123</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
self.assertEqual(rendering_result.mentions_user_group_ids, {user_group.id})
def test_possible_user_group_mentions(self) -> None:
def assert_mentions(content: str, names: Set[str]) -> None:
self.assertEqual(possible_user_group_mentions(content), names)
assert_mentions("", set())
assert_mentions("boring", set())
assert_mentions("@**all**", set())
assert_mentions("smush@*steve*smush", set())
assert_mentions(
"@*support* Hello @**King Hamlet** and @**Cordelia, Lear's daughter**\n"
"@**Foo van Barson** @**all**",
{"support"},
)
assert_mentions(
"Attention @*support*, @*frontend* and @*backend*\ngroups.",
{"support", "frontend", "backend"},
)
def test_user_group_mention_multiple(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
support = self.create_user_group_for_test("support")
backend = self.create_user_group_for_test("backend")
content = "@*support* and @*backend*, check this out"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>"
'<span class="user-group-mention" '
f'data-user-group-id="{support.id}">'
"@support</span> "
"and "
'<span class="user-group-mention" '
f'data-user-group-id="{backend.id}">'
"@backend</span>, "
"check this out"
"</p>",
)
self.assertEqual(rendering_result.mentions_user_group_ids, {support.id, backend.id})
def test_user_group_mention_edit(self) -> None:
sender_user_profile = self.example_user("hamlet")
user_profile = self.example_user("othello")
self.create_user_group_for_test("support")
self.login("hamlet")
msg_id = self.send_stream_message(
sender_user_profile, "Denmark", topic_name="editing", content="test"
)
def update_message_and_check_flag(content: str, mentioned: bool) -> None:
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"message_id": msg_id,
"content": content,
},
)
self.assert_json_success(result)
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=msg_id,
)
if mentioned:
self.assertIn("mentioned", um.flags_list())
else:
self.assertNotIn("mentioned", um.flags_list())
update_message_and_check_flag("@*support*", True)
update_message_and_check_flag("@*support-invalid* edited", False)
update_message_and_check_flag("@*support* edited", True)
update_message_and_check_flag("edited", False)
update_message_and_check_flag("@*support*", True)
update_message_and_check_flag("@_*support*", False)
def test_user_group_mention_invalid(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @*Nonexistent group*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>Hey @<em>Nonexistent group</em></p>"
)
self.assertEqual(rendering_result.mentions_user_group_ids, set())
def test_user_group_silent_mention(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
support = self.create_user_group_for_test("support")
content = "We'll add you to @_*support* user group."
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>We'll add you to "
f'<span class="user-group-mention silent" data-user-group-id="{support.id}">support</span>'
" user group.</p>",
)
self.assertEqual(rendering_result.mentions_user_group_ids, set())
def test_user_group_mention_in_quotes(self) -> None:
user_profile = self.example_user("othello")
message = Message(sender=user_profile, sending_client=get_client("test"))
backend = self.create_user_group_for_test("backend")
def assert_silent_mention(content: str) -> None:
expected = (
"<blockquote>\n<p>"
f'<span class="user-group-mention silent" data-user-group-id="{backend.id}">backend</span>'
"</p>\n</blockquote>"
)
rendering_result = render_markdown(message, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_group_ids, set())
assert_silent_mention("> @*backend*")
assert_silent_mention("> @_*backend*")
assert_silent_mention("```quote\n@*backend*\n```")
assert_silent_mention("```quote\n@_*backend*\n```")
def test_stream_single(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark">#{d.name}</a></p>'.format(
d=denmark,
),
)
def test_invalid_stream_followed_by_valid_mention(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Invalid** and #**Denmark**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p>#<strong>Invalid</strong> and <a class="stream" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark">#{d.name}</a></p>'.format(
d=denmark,
),
)
def test_stream_multiple(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm = get_realm("zulip")
denmark = get_stream("Denmark", realm)
scotland = get_stream("Scotland", realm)
content = "Look to #**Denmark** and #**Scotland**, there something"
self.assertEqual(
render_markdown(msg, content).rendered_content,
"<p>Look to "
'<a class="stream" '
'data-stream-id="{denmark.id}" '
'href="/#narrow/stream/{denmark.id}-Denmark">#{denmark.name}</a> and '
'<a class="stream" '
'data-stream-id="{scotland.id}" '
'href="/#narrow/stream/{scotland.id}-Scotland">#{scotland.name}</a>, '
"there something</p>".format(denmark=denmark, scotland=scotland),
)
def test_stream_case_sensitivity(self) -> None:
realm = get_realm("zulip")
case_sens = Stream.objects.create(name="CaseSens", realm=realm)
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**CaseSens**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{s.id}" href="/#narrow/stream/{s.id}-{s.name}">#{s.name}</a></p>'.format(
s=case_sens,
),
)
def test_stream_case_sensitivity_nonmatching(self) -> None:
"""#StreamName requires the stream be spelled with the correct case
currently. If we change that in the future, we'll need to change this
test."""
realm = get_realm("zulip")
Stream.objects.create(name="CaseSens", realm=realm)
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**casesens**"
self.assertEqual(
render_markdown(msg, content).rendered_content, "<p>#<strong>casesens</strong></p>"
)
def test_topic_single(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark>some topic**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream-topic" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark/topic/some.20topic">#{d.name} > some topic</a></p>'.format(
d=denmark,
),
)
def test_topic_atomic_string(self) -> None:
realm = get_realm("zulip")
# Create a linkifier.
sender_user_profile = self.example_user("othello")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a topic link that potentially interferes with the pattern.
denmark = get_stream("Denmark", realm)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark>#1234**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream-topic" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark/topic/.231234">#{d.name} > #1234</a></p>'.format(
d=denmark,
),
)
def test_topic_multiple(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
scotland = get_stream("Scotland", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "This has two links: #**Denmark>some topic** and #**Scotland>other topic**."
self.assertEqual(
render_markdown(msg, content).rendered_content,
"<p>This has two links: "
'<a class="stream-topic" data-stream-id="{denmark.id}" '
'href="/#narrow/stream/{denmark.id}-{denmark.name}/topic/some.20topic">'
"#{denmark.name} > some topic</a>"
" and "
'<a class="stream-topic" data-stream-id="{scotland.id}" '
'href="/#narrow/stream/{scotland.id}-{scotland.name}/topic/other.20topic">'
"#{scotland.name} > other topic</a>"
".</p>".format(denmark=denmark, scotland=scotland),
)
def test_possible_stream_names(self) -> None:
content = """#**test here**
This mentions #**Denmark** too.
#**garçon** #**천국** @**Ignore Person**
"""
self.assertEqual(
possible_linked_stream_names(content),
{"test here", "Denmark", "garçon", "천국"},
)
def test_stream_unicode(self) -> None:
realm = get_realm("zulip")
uni = Stream.objects.create(name="привет", realm=realm)
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**привет**"
quoted_name = ".D0.BF.D1.80.D0.B8.D0.B2.D0.B5.D1.82"
href = f"/#narrow/stream/{uni.id}-{quoted_name}"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{s.id}" href="{href}">#{s.name}</a></p>'.format(
s=uni,
href=href,
),
)
def test_stream_atomic_string(self) -> None:
realm = get_realm("zulip")
# Create a linkifier.
sender_user_profile = self.example_user("othello")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a stream that potentially interferes with the pattern.
stream = Stream.objects.create(name="Stream #1234", realm=realm)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Stream #1234**"
href = f"/#narrow/stream/{stream.id}-Stream-.231234"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{s.id}" href="{href}">#{s.name}</a></p>'.format(
s=stream,
href=href,
),
)
def test_stream_invalid(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "There #**Nonexistentstream**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>There #<strong>Nonexistentstream</strong></p>"
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_image_preview_title(self) -> None:
msg = "[My favorite image](https://example.com/testimage.png)"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>"
'<a href="https://example.com/testimage.png">My favorite image</a>'
"</p>\n"
'<div class="message_inline_image">'
'<a href="https://example.com/testimage.png" title="My favorite image">'
'<img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fexample.com%2Ftestimage.png&size=full" src="/thumbnail?url=https%3A%2F%2Fexample.com%2Ftestimage.png&size=thumbnail">'
"</a>"
"</div>",
)
def test_mit_rendering(self) -> None:
"""Test the Markdown configs for the MIT Zephyr mirroring system;
verifies almost all inline patterns are disabled, but
inline_interesting_links is still enabled"""
msg = "**test**"
realm = get_realm("zephyr")
client = get_client("zephyr_mirror")
message = Message(sending_client=client, sender=self.mit_user("sipbtest"))
converted = markdown_convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted.rendered_content,
"<p>**test**</p>",
)
msg = "* test"
converted = markdown_convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted.rendered_content,
"<p>* test</p>",
)
msg = "https://lists.debian.org/debian-ctte/2014/02/msg00173.html"
converted = markdown_convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted.rendered_content,
'<p><a href="https://lists.debian.org/debian-ctte/2014/02/msg00173.html">https://lists.debian.org/debian-ctte/2014/02/msg00173.html</a></p>',
)
def test_url_to_a(self) -> None:
url = "javascript://example.com/invalidURL"
converted = url_to_a(db_data=None, url=url, text=url)
self.assertEqual(
converted,
"javascript://example.com/invalidURL",
)
def test_disabled_code_block_processor(self) -> None:
msg = (
"Hello,\n\n"
+ " I am writing this message to test something. I am writing this message to test something."
)
converted = markdown_convert_wrapper(msg)
expected_output = (
"<p>Hello,</p>\n"
+ '<div class="codehilite"><pre><span></span><code>I am writing this message to test something. I am writing this message to test something.\n'
+ "</code></pre></div>"
)
self.assertEqual(converted, expected_output)
realm = do_create_realm(
string_id="code_block_processor_test", name="code_block_processor_test"
)
maybe_update_markdown_engines(realm.id, True)
rendering_result = markdown_convert(msg, message_realm=realm, email_gateway=True)
expected_output = (
"<p>Hello,</p>\n"
+ "<p>I am writing this message to test something. I am writing this message to test something.</p>"
)
self.assertEqual(rendering_result.rendered_content, expected_output)
def test_normal_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://example.com/#settings/"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="http://example.com/#settings/">http://example.com/#settings/</a></p>',
)
def test_relative_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://zulip.testserver/#narrow/stream/999-hello"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="#narrow/stream/999-hello">http://zulip.testserver/#narrow/stream/999-hello</a></p>',
)
def test_relative_link_streams_page(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://zulip.testserver/#streams/all"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="#streams/all">http://zulip.testserver/#streams/all</a></p>',
)
def test_md_relative_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "[hello](http://zulip.testserver/#narrow/stream/999-hello)"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="#narrow/stream/999-hello">hello</a></p>',
)
def test_html_entity_conversion(self) -> None:
msg = """\
Test raw: Hello, ©
Test inline code: `©`
Test fenced code:
```
©
©
```
Test quote:
~~~quote
©
~~~
Test a list:
* ©
* `©`
* ```©```
Test an indented block:
©"""
expected_output = """\
<p>Test raw: Hello, ©<br>
Test inline code: <code>&copy;</code></p>
<p>Test fenced code:</p>
<div class="codehilite"><pre><span></span><code>&copy;
&copy;
</code></pre></div>
<p>Test quote:</p>
<blockquote>
<p>©</p>
</blockquote>
<p>Test a list:</p>
<ul>
<li>©</li>
<li><code>&copy;</code></li>
<li><code>&copy;</code></li>
</ul>
<p>Test an indented block:</p>
<div class="codehilite"><pre><span></span><code>&copy;
</code></pre></div>"""
converted = markdown_convert_wrapper(dedent(msg))
self.assertEqual(converted, dedent(expected_output))
class MarkdownApiTests(ZulipTestCase):
def test_render_message_api(self) -> None:
content = "That is a **bold** statement"
result = self.api_post(
self.example_user("othello"),
"/api/v1/messages/render",
dict(content=content),
)
self.assert_json_success(result)
self.assertEqual(
result.json()["rendered"], "<p>That is a <strong>bold</strong> statement</p>"
)
def test_render_mention_stream_api(self) -> None:
"""Determines whether we're correctly passing the realm context"""
content = "This mentions #**Denmark** and @**King Hamlet**."
result = self.api_post(
self.example_user("othello"),
"/api/v1/messages/render",
dict(content=content),
)
self.assert_json_success(result)
user_id = self.example_user("hamlet").id
stream_id = get_stream("Denmark", get_realm("zulip")).id
self.assertEqual(
result.json()["rendered"],
f'<p>This mentions <a class="stream" data-stream-id="{stream_id}" href="/#narrow/stream/{stream_id}-Denmark">#Denmark</a> and <span class="user-mention" data-user-id="{user_id}">@King Hamlet</span>.</p>',
)
class MarkdownErrorTests(ZulipTestCase):
def test_markdown_error_handling(self) -> None:
with self.simulated_markdown_failure():
with self.assertRaises(MarkdownRenderingException):
markdown_convert_wrapper("")
def test_send_message_errors(self) -> None:
message = "whatever"
with self.simulated_markdown_failure():
# We don't use assertRaisesRegex because it seems to not
# handle i18n properly here on some systems.
with self.assertRaises(JsonableError):
self.send_stream_message(self.example_user("othello"), "Denmark", message)
@override_settings(MAX_MESSAGE_LENGTH=10)
def test_ultra_long_rendering(self) -> None:
"""A rendered message with an ultra-long length (> 100 * MAX_MESSAGE_LENGTH)
throws an exception"""
msg = "mock rendered message\n" * 10 * settings.MAX_MESSAGE_LENGTH
with mock.patch("zerver.lib.markdown.timeout", return_value=msg), mock.patch(
"zerver.lib.markdown.markdown_logger"
):
with self.assertRaises(MarkdownRenderingException):
markdown_convert_wrapper(msg)
def test_curl_code_block_validation(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
processor.run_content_validators = True
markdown_input = [
"``` curl",
"curl {{ api_url }}/v1/register",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY",
' -d "queue_id=1375801870:2942"',
"```",
]
with self.assertRaises(MarkdownRenderingException):
processor.run(markdown_input)
def test_curl_code_block_without_validation(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
markdown_input = [
"``` curl",
"curl {{ api_url }}/v1/register",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY",
' -d "queue_id=1375801870:2942"',
"```",
]
expected = [
"",
"**curl:curl {{ api_url }}/v1/register",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY",
' -d "queue_id=1375801870:2942"**',
"",
"",
]
result = processor.run(markdown_input)
self.assertEqual(result, expected)
|
import nix_ffi
import os
import pytest
def get_projects_to_test():
tests = nix_ffi.eval(
'subsystems.allTranslators',
wrapper_code = '''
{result}: let
lib = (import <nixpkgs> {}).lib;
l = lib // builtins;
in
l.flatten (
l.map
(
translator:
l.map
(source: {
source = l.toString source;
translator = translator.name;
inherit (translator) subsystem type;
})
(translator.generateUnitTestsForProjects or [])
)
result
)
''',
)
result = []
for test in tests:
if test['type'] == 'all':
continue
result.append(dict(
project = dict(
name="test",
relPath="",
translator=test['translator'],
subsystemInfo={},
),
translator=test['translator'],
source = test['source'],
subsystem = test['subsystem'],
type = test['type'],
))
return result
projects = get_projects_to_test()
def check_format_dependencies(dependencies):
assert isinstance(dependencies, list)
for dep in dependencies:
assert set(dep.keys()) == {'name', 'version'}
assert isinstance(dep['name'], str)
assert len(dep['name']) > 0
assert isinstance(dep['version'], str)
assert len(dep['version']) > 0
def check_format_sourceSpec(sourceSpec):
assert isinstance(sourceSpec, dict)
assert 'type' in sourceSpec
@pytest.mark.parametrize("p", projects)
def test_packageName(p):
defaultPackage = nix_ffi.eval(
f"subsystems.{p["subsystem"]}.translators.{p["translator"]}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.defaultPackage
''',
)
assert isinstance(defaultPackage, str)
assert len(defaultPackage) > 0
@pytest.mark.parametrize("p", projects)
def test_exportedPackages(p):
exportedPackages = nix_ffi.eval(
f"subsystems.{p["subsystem"]}.translators.{p["translator"]}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.exportedPackages
''',
)
assert isinstance(exportedPackages, dict)
assert len(exportedPackages) > 0
@pytest.mark.parametrize("p", projects)
def test_extraObjects(p):
extraObjects = nix_ffi.eval(
f"subsystems.{p["subsystem"]}.translators.{p["translator"]}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.extraObjects
''',
)
assert isinstance(extraObjects, list)
for extra_obj in extraObjects:
assert set(extra_obj.keys()) == \
{'name', 'version', 'dependencies', 'sourceSpec'}
assert isinstance(extra_obj['name'], str)
assert len(extra_obj['name']) > 0
assert isinstance(extra_obj['version'], str)
assert len(extra_obj['version']) > 0
check_format_dependencies(extra_obj['dependencies'])
check_format_sourceSpec(extra_obj['sourceSpec'])
@pytest.mark.parametrize("p", projects)
def test_location(p):
location = nix_ffi.eval(
f"subsystems.{p["subsystem"]}.translators.{p["translator"]}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.location
''',
)
assert isinstance(location, str)
@pytest.mark.parametrize("p", projects)
def test_serializedRawObjects(p):
serializedRawObjects = nix_ffi.eval(
f"subsystems.{p["subsystem"]}.translators.{p["translator"]}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.serializedRawObjects
''',
)
assert isinstance(serializedRawObjects, list)
for raw_obj in serializedRawObjects:
assert isinstance(raw_obj, dict)
@pytest.mark.parametrize("p", projects)
def test_subsystemName(p):
subsystemName = nix_ffi.eval(
f"subsystems.{p["subsystem"]}.translators.{p["translator"]}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.subsystemName
''',
)
assert isinstance(subsystemName, str)
assert len(subsystemName) > 0
@pytest.mark.parametrize("p", projects)
def test_subsystemAttrs(p):
subsystemAttrs = nix_ffi.eval(
f"subsystems.{p["subsystem"]}.translators.{p["translator"]}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.subsystemAttrs
''',
)
assert isinstance(subsystemAttrs, dict)
@pytest.mark.parametrize("p", projects)
def test_translatorName(p):
translatorName = nix_ffi.eval(
f"subsystems.{p["subsystem"]}.translators.{p["translator"]}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.translatorName
''',
)
assert isinstance(translatorName, str)
assert len(translatorName) > 0
@pytest.mark.parametrize("p", projects)
def test_extractors(p):
finalObjects = nix_ffi.eval(
f"subsystems.{p["subsystem"]}.translators.{p["translator"]}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
let
l = builtins;
inputs = result.inputs;
rawObjects = inputs.serializedRawObjects;
finalObjects =
l.map
(rawObj: let
finalObj =
l.mapAttrs
(key: extractFunc: extractFunc rawObj finalObj)
inputs.extractors;
in
finalObj)
rawObjects;
in
finalObjects ++ (inputs.extraObjects or [])
''',
)
assert isinstance(finalObjects, list)
assert len(finalObjects) > 0
for finalObj in finalObjects:
assert set(finalObj.keys()) == \
{'name', 'version', 'sourceSpec', 'dependencies'}
check_format_dependencies(finalObj['dependencies'])
check_format_sourceSpec(finalObj['sourceSpec'])
@pytest.mark.parametrize("p", projects)
def test_keys(p):
objectsByKey = nix_ffi.eval(
f"subsystems.{p["subsystem"]}.translators.{p["translator"]}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
let
l = builtins;
inputs = result.inputs;
rawObjects = inputs.serializedRawObjects;
finalObjects =
l.map
(rawObj: let
finalObj =
{inherit rawObj;}
// l.mapAttrs
(key: extractFunc: extractFunc rawObj finalObj)
inputs.extractors;
in
finalObj)
rawObjects;
objectsByKey =
l.mapAttrs
(key: keyFunc:
l.foldl'
(merged: finalObj:
merged
// {"${keyFunc finalObj.rawObj finalObj}" = finalObj;})
{}
(finalObjects))
inputs.keys;
in
objectsByKey
''',
)
assert isinstance(objectsByKey, dict)
for key_name, objects in objectsByKey.items():
for finalObj in objects.values():
assert set(finalObj.keys()) == \
{'name', 'version', 'sourceSpec', 'dependencies', 'rawObj'}
check_format_dependencies(finalObj['dependencies'])
check_format_sourceSpec(finalObj['sourceSpec'])
| import nix_ffi
import os
import pytest
def get_projects_to_test():
tests = nix_ffi.eval(
'subsystems.allTranslators',
wrapper_code = '''
{result}: let
lib = (import <nixpkgs> {}).lib;
l = lib // builtins;
in
l.flatten (
l.map
(
translator:
l.map
(source: {
source = l.toString source;
translator = translator.name;
inherit (translator) subsystem type;
})
(translator.generateUnitTestsForProjects or [])
)
result
)
''',
)
result = []
for test in tests:
if test['type'] == 'all':
continue
result.append(dict(
project = dict(
name="test",
relPath="",
translator=test['translator'],
subsystemInfo={},
),
translator=test['translator'],
source = test['source'],
subsystem = test['subsystem'],
type = test['type'],
))
return result
projects = get_projects_to_test()
def check_format_dependencies(dependencies):
assert isinstance(dependencies, list)
for dep in dependencies:
assert set(dep.keys()) == {'name', 'version'}
assert isinstance(dep['name'], str)
assert len(dep['name']) > 0
assert isinstance(dep['version'], str)
assert len(dep['version']) > 0
def check_format_sourceSpec(sourceSpec):
assert isinstance(sourceSpec, dict)
assert 'type' in sourceSpec
@pytest.mark.parametrize("p", projects)
def test_packageName(p):
defaultPackage = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.defaultPackage
''',
)
assert isinstance(defaultPackage, str)
assert len(defaultPackage) > 0
@pytest.mark.parametrize("p", projects)
def test_exportedPackages(p):
exportedPackages = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.exportedPackages
''',
)
assert isinstance(exportedPackages, dict)
assert len(exportedPackages) > 0
@pytest.mark.parametrize("p", projects)
def test_extraObjects(p):
extraObjects = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.extraObjects
''',
)
assert isinstance(extraObjects, list)
for extra_obj in extraObjects:
assert set(extra_obj.keys()) == \
{'name', 'version', 'dependencies', 'sourceSpec'}
assert isinstance(extra_obj['name'], str)
assert len(extra_obj['name']) > 0
assert isinstance(extra_obj['version'], str)
assert len(extra_obj['version']) > 0
check_format_dependencies(extra_obj['dependencies'])
check_format_sourceSpec(extra_obj['sourceSpec'])
@pytest.mark.parametrize("p", projects)
def test_location(p):
location = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.location
''',
)
assert isinstance(location, str)
@pytest.mark.parametrize("p", projects)
def test_serializedRawObjects(p):
serializedRawObjects = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.serializedRawObjects
''',
)
assert isinstance(serializedRawObjects, list)
for raw_obj in serializedRawObjects:
assert isinstance(raw_obj, dict)
@pytest.mark.parametrize("p", projects)
def test_subsystemName(p):
subsystemName = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.subsystemName
''',
)
assert isinstance(subsystemName, str)
assert len(subsystemName) > 0
@pytest.mark.parametrize("p", projects)
def test_subsystemAttrs(p):
subsystemAttrs = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.subsystemAttrs
''',
)
assert isinstance(subsystemAttrs, dict)
@pytest.mark.parametrize("p", projects)
def test_translatorName(p):
translatorName = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.translatorName
''',
)
assert isinstance(translatorName, str)
assert len(translatorName) > 0
@pytest.mark.parametrize("p", projects)
def test_extractors(p):
finalObjects = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
let
l = builtins;
inputs = result.inputs;
rawObjects = inputs.serializedRawObjects;
finalObjects =
l.map
(rawObj: let
finalObj =
l.mapAttrs
(key: extractFunc: extractFunc rawObj finalObj)
inputs.extractors;
in
finalObj)
rawObjects;
in
finalObjects ++ (inputs.extraObjects or [])
''',
)
assert isinstance(finalObjects, list)
assert len(finalObjects) > 0
for finalObj in finalObjects:
assert set(finalObj.keys()) == \
{'name', 'version', 'sourceSpec', 'dependencies'}
check_format_dependencies(finalObj['dependencies'])
check_format_sourceSpec(finalObj['sourceSpec'])
@pytest.mark.parametrize("p", projects)
def test_keys(p):
objectsByKey = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
let
l = builtins;
inputs = result.inputs;
rawObjects = inputs.serializedRawObjects;
finalObjects =
l.map
(rawObj: let
finalObj =
{inherit rawObj;}
// l.mapAttrs
(key: extractFunc: extractFunc rawObj finalObj)
inputs.extractors;
in
finalObj)
rawObjects;
objectsByKey =
l.mapAttrs
(key: keyFunc:
l.foldl'
(merged: finalObj:
merged
// {"${keyFunc finalObj.rawObj finalObj}" = finalObj;})
{}
(finalObjects))
inputs.keys;
in
objectsByKey
''',
)
assert isinstance(objectsByKey, dict)
for key_name, objects in objectsByKey.items():
for finalObj in objects.values():
assert set(finalObj.keys()) == \
{'name', 'version', 'sourceSpec', 'dependencies', 'rawObj'}
check_format_dependencies(finalObj['dependencies'])
check_format_sourceSpec(finalObj['sourceSpec'])
|
"""GraphQL base controller"""
from abc import ABCMeta, abstractmethod
import asyncio
from cgi import parse_multipart
from datetime import datetime
from functools import partial
import io
import logging
from typing import (
Any,
AsyncIterable,
Callable,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
cast
)
from urllib.parse import parse_qs, urlencode
from bareasgi import (
Application,
HttpRequest,
HttpResponse,
WebSocketRequest,
HttpMiddlewareCallback
)
from bareutils import text_reader, text_writer, response_code, header
import graphql
from graphql import (
ExecutionResult,
GraphQLError,
MapAsyncIterator,
MiddlewareManager
)
from .template import make_template
from .utils import (
cancellable_aiter,
get_host,
get_scheme,
has_subscription,
wrap_middleware,
ZeroEvent
)
LOGGER = logging.getLogger(__name__)
def _encode_sse(
dumps: Callable[[Any], str],
execution_result: Optional[ExecutionResult]
) -> bytes:
if execution_result is None:
payload = f'event: ping\ndata: {datetime.utcnow()}\n\n'
else:
response = {
'data': execution_result.data,
'errors': [
error.formatted
for error in execution_result.errors
] if execution_result.errors else None
}
payload = f'event: message\ndata: {dumps(response)}\n\n'
return payload.encode('utf-8')
def _encode_json(
dumps: Callable[[Any], str],
execution_result: Optional[ExecutionResult]
) -> bytes:
if execution_result is None:
return b'\n'
payload = dumps({
'data': execution_result.data,
'errors': [
error.formatted
for error in execution_result.errors
] if execution_result.errors else None
}) + '\n'
return payload.encode('utf-8')
class GraphQLControllerBase(metaclass=ABCMeta):
"""GraphQL Controller Base"""
def __init__(
self,
path_prefix: str,
middleware: Optional[Union[Tuple, List, MiddlewareManager]],
ping_interval: float,
loads: Callable[[str], Any],
dumps: Callable[[Any], str]
) -> None:
self.path_prefix = path_prefix
self.middleware = middleware
self.ping_interval = ping_interval
self.loads = loads
self.dumps = dumps
self.cancellation_event = asyncio.Event()
self.subscription_count = ZeroEvent()
def add_routes(
self,
app: Application,
path_prefix: str = '',
rest_middleware: Optional[HttpMiddlewareCallback] = None,
view_middleware: Optional[HttpMiddlewareCallback] = None
) -> Application:
"""Add the routes
Args:
app (Application): The ASGI application.
path_prefix (str, optional): The path prefix. Defaults to ''.
rest_middleware (Optional[HttpMiddlewareCallback], optional): The
rest middleware. Defaults to None.
view_middleware (Optional[HttpMiddlewareCallback], optional): The
view middleware. Defaults to None.
Returns:
Application: The application.
"""
# Add the REST routes.
app.http_router.add(
{'GET'},
path_prefix + '/graphql',
wrap_middleware(rest_middleware, self.handle_graphql)
)
app.http_router.add(
{'POST', 'OPTIONS'},
path_prefix + '/graphql',
wrap_middleware(rest_middleware, self.handle_graphql)
)
app.http_router.add(
{'GET'},
path_prefix + '/subscriptions',
wrap_middleware(rest_middleware, self.handle_subscription_get)
)
app.http_router.add(
{'POST', 'OPTIONS'},
path_prefix + '/subscriptions',
wrap_middleware(rest_middleware, self.handle_subscription_post)
)
# Add the subscription route
app.ws_router.add(
path_prefix + '/subscriptions',
self.handle_websocket_subscription
)
# Add Graphiql
app.http_router.add(
{'GET'},
path_prefix + '/graphiql',
wrap_middleware(view_middleware, self.view_graphiql)
)
return app
async def shutdown(self) -> None:
"""Shutdown the service"""
self.cancellation_event.set()
await self.subscription_count.wait()
async def view_graphiql(self, request: HttpRequest) -> HttpResponse:
"""Render the Graphiql view
Args:
request (HttpRequest): The request.
Returns:
HttpResponse: The response.
"""
try:
host = get_host(request)
scheme = get_scheme(request)
query_path = f'{scheme}://{host}{self.path_prefix}/graphql'
ws_scheme = 'ws' if scheme == 'http' else 'wss'
subscription_path = f'{ws_scheme}://{host}{self.path_prefix}/subscriptions'
body = make_template(
host,
query_path,
subscription_path
)
headers = [
(b'content-type', b'text/html'),
(b'content-length', str(len(body)).encode())
]
return HttpResponse(response_code.OK, headers, text_writer(body))
# pylint: disable=bare-except
except:
LOGGER.exception("Failed to handle grahphiql request")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
@abstractmethod
async def handle_websocket_subscription(self, request: WebSocketRequest) -> None:
"""Handle a websocket subscription
Args:
request (WebSocketRequest): The request
"""
async def handle_graphql(self, request: HttpRequest) -> HttpResponse:
"""A request handler for graphql queries
Args:
scope (Scope): The Request
Returns:
HttpResponse: The HTTP response to the query request
"""
try:
body = await self._get_query_document(request)
query: str = body['query']
variables: Optional[Dict[str, Any]] = body.get('variables')
operation_name: Optional[str] = body.get('operationName')
query_document = graphql.parse(query)
if not has_subscription(query_document):
return await self._handle_query_or_mutation(
request,
query,
variables,
operation_name
)
# The subscription method is determined by the `allow` header.
allow = header.find(b'allow', request.scope['headers'], b'GET')
if allow == b'GET':
return self._handle_subscription_redirect(request, body)
return await self._handle_streaming_subscription(
request,
query,
variables,
operation_name
)
# pylint: disable=bare-except
except:
LOGGER.exception("Failed to handle graphql query request")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
async def handle_subscription_get(self, request: HttpRequest) -> HttpResponse:
"""Handle a streaming subscription
Args:
request (HttpRequest): The request
Returns:
HttpResponse: The streaming response
"""
try:
LOGGER.debug(
"Received GET streaming subscription request: http_version='%s'.",
request.scope['http_version']
)
body = {
name.decode('utf-8'): self.loads(value[0].decode('utf-8'))
for name, value in cast(
Dict[bytes, List[bytes]],
parse_qs(request.scope['query_string'])
).items()
}
query: str = body['query']
variables: Optional[Dict[str, Any]] = body.get('variables')
operation_name: Optional[str] = body.get('operationName')
return await self._handle_streaming_subscription(
request,
query,
variables,
operation_name
)
# pylint: disable=bare-except
except:
LOGGER.exception("Failed to handle graphql GET subscription")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
async def handle_subscription_post(self, request: HttpRequest) -> HttpResponse:
"""Handle a streaming subscription
Args:
request (HttpRequest): The request
Returns:
HttpResponse: A stream response
"""
try:
LOGGER.debug(
"Received POST streaming subscription request: http_version='%s'.",
request.scope['http_version']
)
text = await text_reader(request.body)
body = self.loads(text)
query: str = body['query']
variables: Optional[Dict[str, Any]] = body.get('variables')
operation_name: Optional[str] = body.get('operationName')
return await self._handle_streaming_subscription(
request,
query,
variables,
operation_name
)
# pylint: disable=bare-except
except:
LOGGER.exception("Failed to handle graphql POST subscription")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
async def _get_query_document(self, request: HttpRequest) -> Mapping[str, Any]:
content_type = header.content_type(request.scope['headers'])
if content_type is None:
raise ValueError('Content type not specified')
media_type, parameters = content_type
if media_type == b'application/graphql':
return {'query': await text_reader(request.body)}
elif media_type in (b'application/json', b'text/plain'):
return self.loads(await text_reader(request.body))
elif media_type == b'application/x-www-form-urlencoded':
body = parse_qs(await text_reader(request.body))
return {name: value[0] for name, value in body.items()}
elif media_type == b'multipart/form-data':
if parameters is None:
raise ValueError(
'Missing content type parameters for multipart/form-data'
)
param_dict = {
key.decode('utf-8'): val
for key, val in parameters.items()
}
multipart_dict = parse_multipart(
io.StringIO(await text_reader(request.body)),
param_dict
)
return {
name: value[0]
for name, value in multipart_dict.items()
}
else:
raise RuntimeError(
f"Unsupported content type: {media_type.decode("ascii")}"
)
async def _handle_query_or_mutation(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str]
) -> HttpResponse:
LOGGER.debug("Processing a query or mutation.")
result = await self.query(request, query, variables, operation_name)
response: Dict[str, Any] = {'data': result.data}
if result.errors:
response['errors'] = [
error.formatted for error in result.errors]
text = self.dumps(response)
headers = [
(b'content-type', b'application/json'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(response_code.OK, headers, text_writer(text))
def _handle_subscription_redirect(
self,
request: HttpRequest,
body: Mapping[str, Any]
) -> HttpResponse:
# Handle a subscription by returning 201 (Created) with
# the url location of the subscription.
LOGGER.debug("Redirecting subscription request.")
scheme = request.scope['scheme']
host = cast(
bytes,
header.find( # type: ignore
b'host',
request.scope['headers'],
b'localhost'
)
).decode()
path = self.path_prefix + '/subscriptions'
query_string = urlencode(
{
name.encode('utf-8'): self.dumps(value).encode('utf-8')
for name, value in body.items()
}
)
location = f'{scheme}://{host}{path}?{query_string}'.encode('ascii')
headers = [
(b'access-control-expose-headers', b'location'),
(b'location', location)
]
return HttpResponse(response_code.CREATED, headers)
async def _handle_streaming_subscription(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str]
) -> HttpResponse:
# If unspecified default to server sent events as they have better support.
accept = cast(
bytes,
header.find(
b'accept', request.scope['headers'], b'text/event-stream')
)
content_type = (
b'application/stream+json'
if accept == b'application/json'
else accept
)
result = await self.subscribe(request, query, variables, operation_name)
is_sse = content_type == b'text/event-stream'
encode = partial(_encode_sse if is_sse else _encode_json, self.dumps)
nudge = b':\n\n' if is_sse else b'\n'
# Make an async iterator for the subscription results.
async def send_events(zero_event: ZeroEvent) -> AsyncIterable[bytes]:
LOGGER.debug('Streaming subscription started.')
try:
zero_event.increment()
async for val in cancellable_aiter(
result,
self.cancellation_event,
timeout=self.ping_interval
):
yield encode(val)
yield nudge # Give the ASGI server a nudge.
except asyncio.CancelledError:
LOGGER.debug("Streaming subscription cancelled.")
except Exception as error: # pylint: disable=broad-except
LOGGER.exception("Streaming subscription failed.")
# If the error is not caught the client fetch will fail, however
# the status code and headers have already been sent. So rather
# than let the fetch fail we send a GraphQL response with no
# data and the error and close gracefully.
if not isinstance(error, GraphQLError):
error = GraphQLError(
'Execution error',
original_error=error
)
val = ExecutionResult(None, [error])
yield encode(val)
yield nudge # Give the ASGI server a nudge.
finally:
zero_event.decrement()
LOGGER.debug("Streaming subscription stopped.")
headers = [
(b'cache-control', b'no-cache'),
(b'content-type', content_type),
(b'connection', b'keep-alive')
]
return HttpResponse(
response_code.OK,
headers,
send_events(self.subscription_count)
)
@abstractmethod
async def subscribe(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str],
) -> MapAsyncIterator:
"""Execute a subscription.
Args:
request (HttpRequest): The http request.
query (str): The subscription query.
variables (Optional[Dict[str, Any]]): Optional variables.
operation_name (Optional[str]): An optional operation name.
Returns:
MapAsyncIterator: An asynchronous iterator of the results.
"""
@abstractmethod
async def query(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str],
) -> ExecutionResult:
"""Execute a query
Args:
request (HttpRequest): The http request.
query (str): The subscription query.
variables (Optional[Dict[str, Any]]): Optional variables.
operation_name (Optional[str]): An optional operation name.
Returns:
ExecutionResult: The query results.
"""
| """GraphQL base controller"""
from abc import ABCMeta, abstractmethod
import asyncio
from cgi import parse_multipart
from datetime import datetime
from functools import partial
import io
import logging
from typing import (
Any,
AsyncIterable,
Callable,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
cast
)
from urllib.parse import parse_qs, urlencode
from bareasgi import (
Application,
HttpRequest,
HttpResponse,
WebSocketRequest,
HttpMiddlewareCallback
)
from bareutils import text_reader, text_writer, response_code, header
import graphql
from graphql import (
ExecutionResult,
GraphQLError,
MapAsyncIterator,
MiddlewareManager
)
from .template import make_template
from .utils import (
cancellable_aiter,
get_host,
get_scheme,
has_subscription,
wrap_middleware,
ZeroEvent
)
LOGGER = logging.getLogger(__name__)
def _encode_sse(
dumps: Callable[[Any], str],
execution_result: Optional[ExecutionResult]
) -> bytes:
if execution_result is None:
payload = f'event: ping\ndata: {datetime.utcnow()}\n\n'
else:
response = {
'data': execution_result.data,
'errors': [
error.formatted
for error in execution_result.errors
] if execution_result.errors else None
}
payload = f'event: message\ndata: {dumps(response)}\n\n'
return payload.encode('utf-8')
def _encode_json(
dumps: Callable[[Any], str],
execution_result: Optional[ExecutionResult]
) -> bytes:
if execution_result is None:
return b'\n'
payload = dumps({
'data': execution_result.data,
'errors': [
error.formatted
for error in execution_result.errors
] if execution_result.errors else None
}) + '\n'
return payload.encode('utf-8')
class GraphQLControllerBase(metaclass=ABCMeta):
"""GraphQL Controller Base"""
def __init__(
self,
path_prefix: str,
middleware: Optional[Union[Tuple, List, MiddlewareManager]],
ping_interval: float,
loads: Callable[[str], Any],
dumps: Callable[[Any], str]
) -> None:
self.path_prefix = path_prefix
self.middleware = middleware
self.ping_interval = ping_interval
self.loads = loads
self.dumps = dumps
self.cancellation_event = asyncio.Event()
self.subscription_count = ZeroEvent()
def add_routes(
self,
app: Application,
path_prefix: str = '',
rest_middleware: Optional[HttpMiddlewareCallback] = None,
view_middleware: Optional[HttpMiddlewareCallback] = None
) -> Application:
"""Add the routes
Args:
app (Application): The ASGI application.
path_prefix (str, optional): The path prefix. Defaults to ''.
rest_middleware (Optional[HttpMiddlewareCallback], optional): The
rest middleware. Defaults to None.
view_middleware (Optional[HttpMiddlewareCallback], optional): The
view middleware. Defaults to None.
Returns:
Application: The application.
"""
# Add the REST routes.
app.http_router.add(
{'GET'},
path_prefix + '/graphql',
wrap_middleware(rest_middleware, self.handle_graphql)
)
app.http_router.add(
{'POST', 'OPTIONS'},
path_prefix + '/graphql',
wrap_middleware(rest_middleware, self.handle_graphql)
)
app.http_router.add(
{'GET'},
path_prefix + '/subscriptions',
wrap_middleware(rest_middleware, self.handle_subscription_get)
)
app.http_router.add(
{'POST', 'OPTIONS'},
path_prefix + '/subscriptions',
wrap_middleware(rest_middleware, self.handle_subscription_post)
)
# Add the subscription route
app.ws_router.add(
path_prefix + '/subscriptions',
self.handle_websocket_subscription
)
# Add Graphiql
app.http_router.add(
{'GET'},
path_prefix + '/graphiql',
wrap_middleware(view_middleware, self.view_graphiql)
)
return app
async def shutdown(self) -> None:
"""Shutdown the service"""
self.cancellation_event.set()
await self.subscription_count.wait()
async def view_graphiql(self, request: HttpRequest) -> HttpResponse:
"""Render the Graphiql view
Args:
request (HttpRequest): The request.
Returns:
HttpResponse: The response.
"""
try:
host = get_host(request)
scheme = get_scheme(request)
query_path = f'{scheme}://{host}{self.path_prefix}/graphql'
ws_scheme = 'ws' if scheme == 'http' else 'wss'
subscription_path = f'{ws_scheme}://{host}{self.path_prefix}/subscriptions'
body = make_template(
host,
query_path,
subscription_path
)
headers = [
(b'content-type', b'text/html'),
(b'content-length', str(len(body)).encode())
]
return HttpResponse(response_code.OK, headers, text_writer(body))
# pylint: disable=bare-except
except:
LOGGER.exception("Failed to handle grahphiql request")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
@abstractmethod
async def handle_websocket_subscription(self, request: WebSocketRequest) -> None:
"""Handle a websocket subscription
Args:
request (WebSocketRequest): The request
"""
async def handle_graphql(self, request: HttpRequest) -> HttpResponse:
"""A request handler for graphql queries
Args:
scope (Scope): The Request
Returns:
HttpResponse: The HTTP response to the query request
"""
try:
body = await self._get_query_document(request)
query: str = body['query']
variables: Optional[Dict[str, Any]] = body.get('variables')
operation_name: Optional[str] = body.get('operationName')
query_document = graphql.parse(query)
if not has_subscription(query_document):
return await self._handle_query_or_mutation(
request,
query,
variables,
operation_name
)
# The subscription method is determined by the `allow` header.
allow = header.find(b'allow', request.scope['headers'], b'GET')
if allow == b'GET':
return self._handle_subscription_redirect(request, body)
return await self._handle_streaming_subscription(
request,
query,
variables,
operation_name
)
# pylint: disable=bare-except
except:
LOGGER.exception("Failed to handle graphql query request")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
async def handle_subscription_get(self, request: HttpRequest) -> HttpResponse:
"""Handle a streaming subscription
Args:
request (HttpRequest): The request
Returns:
HttpResponse: The streaming response
"""
try:
LOGGER.debug(
"Received GET streaming subscription request: http_version='%s'.",
request.scope['http_version']
)
body = {
name.decode('utf-8'): self.loads(value[0].decode('utf-8'))
for name, value in cast(
Dict[bytes, List[bytes]],
parse_qs(request.scope['query_string'])
).items()
}
query: str = body['query']
variables: Optional[Dict[str, Any]] = body.get('variables')
operation_name: Optional[str] = body.get('operationName')
return await self._handle_streaming_subscription(
request,
query,
variables,
operation_name
)
# pylint: disable=bare-except
except:
LOGGER.exception("Failed to handle graphql GET subscription")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
async def handle_subscription_post(self, request: HttpRequest) -> HttpResponse:
"""Handle a streaming subscription
Args:
request (HttpRequest): The request
Returns:
HttpResponse: A stream response
"""
try:
LOGGER.debug(
"Received POST streaming subscription request: http_version='%s'.",
request.scope['http_version']
)
text = await text_reader(request.body)
body = self.loads(text)
query: str = body['query']
variables: Optional[Dict[str, Any]] = body.get('variables')
operation_name: Optional[str] = body.get('operationName')
return await self._handle_streaming_subscription(
request,
query,
variables,
operation_name
)
# pylint: disable=bare-except
except:
LOGGER.exception("Failed to handle graphql POST subscription")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
async def _get_query_document(self, request: HttpRequest) -> Mapping[str, Any]:
content_type = header.content_type(request.scope['headers'])
if content_type is None:
raise ValueError('Content type not specified')
media_type, parameters = content_type
if media_type == b'application/graphql':
return {'query': await text_reader(request.body)}
elif media_type in (b'application/json', b'text/plain'):
return self.loads(await text_reader(request.body))
elif media_type == b'application/x-www-form-urlencoded':
body = parse_qs(await text_reader(request.body))
return {name: value[0] for name, value in body.items()}
elif media_type == b'multipart/form-data':
if parameters is None:
raise ValueError(
'Missing content type parameters for multipart/form-data'
)
param_dict = {
key.decode('utf-8'): val
for key, val in parameters.items()
}
multipart_dict = parse_multipart(
io.StringIO(await text_reader(request.body)),
param_dict
)
return {
name: value[0]
for name, value in multipart_dict.items()
}
else:
raise RuntimeError(
f"Unsupported content type: {media_type.decode('ascii')}"
)
async def _handle_query_or_mutation(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str]
) -> HttpResponse:
LOGGER.debug("Processing a query or mutation.")
result = await self.query(request, query, variables, operation_name)
response: Dict[str, Any] = {'data': result.data}
if result.errors:
response['errors'] = [
error.formatted for error in result.errors]
text = self.dumps(response)
headers = [
(b'content-type', b'application/json'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(response_code.OK, headers, text_writer(text))
def _handle_subscription_redirect(
self,
request: HttpRequest,
body: Mapping[str, Any]
) -> HttpResponse:
# Handle a subscription by returning 201 (Created) with
# the url location of the subscription.
LOGGER.debug("Redirecting subscription request.")
scheme = request.scope['scheme']
host = cast(
bytes,
header.find( # type: ignore
b'host',
request.scope['headers'],
b'localhost'
)
).decode()
path = self.path_prefix + '/subscriptions'
query_string = urlencode(
{
name.encode('utf-8'): self.dumps(value).encode('utf-8')
for name, value in body.items()
}
)
location = f'{scheme}://{host}{path}?{query_string}'.encode('ascii')
headers = [
(b'access-control-expose-headers', b'location'),
(b'location', location)
]
return HttpResponse(response_code.CREATED, headers)
async def _handle_streaming_subscription(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str]
) -> HttpResponse:
# If unspecified default to server sent events as they have better support.
accept = cast(
bytes,
header.find(
b'accept', request.scope['headers'], b'text/event-stream')
)
content_type = (
b'application/stream+json'
if accept == b'application/json'
else accept
)
result = await self.subscribe(request, query, variables, operation_name)
is_sse = content_type == b'text/event-stream'
encode = partial(_encode_sse if is_sse else _encode_json, self.dumps)
nudge = b':\n\n' if is_sse else b'\n'
# Make an async iterator for the subscription results.
async def send_events(zero_event: ZeroEvent) -> AsyncIterable[bytes]:
LOGGER.debug('Streaming subscription started.')
try:
zero_event.increment()
async for val in cancellable_aiter(
result,
self.cancellation_event,
timeout=self.ping_interval
):
yield encode(val)
yield nudge # Give the ASGI server a nudge.
except asyncio.CancelledError:
LOGGER.debug("Streaming subscription cancelled.")
except Exception as error: # pylint: disable=broad-except
LOGGER.exception("Streaming subscription failed.")
# If the error is not caught the client fetch will fail, however
# the status code and headers have already been sent. So rather
# than let the fetch fail we send a GraphQL response with no
# data and the error and close gracefully.
if not isinstance(error, GraphQLError):
error = GraphQLError(
'Execution error',
original_error=error
)
val = ExecutionResult(None, [error])
yield encode(val)
yield nudge # Give the ASGI server a nudge.
finally:
zero_event.decrement()
LOGGER.debug("Streaming subscription stopped.")
headers = [
(b'cache-control', b'no-cache'),
(b'content-type', content_type),
(b'connection', b'keep-alive')
]
return HttpResponse(
response_code.OK,
headers,
send_events(self.subscription_count)
)
@abstractmethod
async def subscribe(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str],
) -> MapAsyncIterator:
"""Execute a subscription.
Args:
request (HttpRequest): The http request.
query (str): The subscription query.
variables (Optional[Dict[str, Any]]): Optional variables.
operation_name (Optional[str]): An optional operation name.
Returns:
MapAsyncIterator: An asynchronous iterator of the results.
"""
@abstractmethod
async def query(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str],
) -> ExecutionResult:
"""Execute a query
Args:
request (HttpRequest): The http request.
query (str): The subscription query.
variables (Optional[Dict[str, Any]]): Optional variables.
operation_name (Optional[str]): An optional operation name.
Returns:
ExecutionResult: The query results.
"""
|
import re
import os
import json
import warnings
from sys import argv
from getopt import getopt
from typing import Union
from subprocess import Popen
DEVNULL = open(os.devnull, 'w')
CONFIG = {}
CONFIG_PATH = "config.json"
FORMAT_VIDEO_NAME = "{i}、{title}-{name}"
class BiLiVideoConvert:
def __init__(self, input_dir: str = None, output_dir: str = None):
"""
input_dir 相当于 Android/data/tv.danmaku.bili/download 目录,即该文件夹下存在多个下载的视频项目
:param input_dir: 下载视频路径
:param output_dir: 转换后视频存放路径
"""
# 参数为空时读取配置文件,配置文件中不存在则使用默认配置
if input_dir is None:
input_dir = CONFIG.get("input_dir", "download")
if output_dir is None:
output_dir = CONFIG.get("output_dir", "output")
self.input_dir = input_dir
self.output_dir = output_dir
self.movie_dirs = os.listdir(input_dir)
self.movies = {}
def parse_movies(self):
for movie_info in self.get_movie_infos():
avid = movie_info.get("avid")
if avid:
avid = f"AV{avid}"
bvid = movie_info["bvid"]
season_id = movie_info["season_id"]
if season_id:
season_id = f"S_{season_id}"
vid = avid or bvid or season_id
# 不存在添加默认信息
if vid not in self.movies:
self.movies[vid] = {
"avid": avid,
"bvid": bvid,
"season_id": season_id,
"title": movie_info['title'], # 标题
"total": 0, # 总量
"download_total": 0, # 下载总量
"page_data": [] # 视频Page数据
}
# 判断视频是否下载完成,添加分P数据
is_completed = movie_info['is_completed'] # 是否下载完成
self.movies[vid]["total"] += 1
page_data = {
"page": movie_info["page"],
"part": movie_info["part"],
"is_completed": is_completed
}
if is_completed:
self.movies[vid]["download_total"] += 1
page_data["video_path"] = movie_info["video_path"]
page_data["audio_path"] = movie_info["audio_path"]
self.movies[vid]["page_data"].append(page_data)
def get_movie_infos(self) -> dict:
"""
获取 input_dir 下视频项目的信息
:return:
"""
for movie_dir in self.movie_dirs:
# 拼接视频项目的绝对路径
movie_ads_dir = os.path.join(self.input_dir, movie_dir)
# 遍历视频项目下的目录
for folder_name, sub_folders, file_names in os.walk(movie_ads_dir):
entry_file = os.path.join(folder_name, "entry.json")
# 以存在entry.json文件为判断视频目录依据
if os.path.exists(entry_file):
# 解析 entry 文件
entry = parse_entry(entry_file)
if entry:
yield entry
# if movie_dir == str(entry['vid'])
def convert(self, vid: Union[int, str]):
# 视频项目目录
if vid in self.movies:
movie_info = self.movies.get(vid)
print(movie_info)
else:
print("无效的视频ID")
return
# 拼接视频输出目录
project_output_dir = filename_filter(os.path.join(self.output_dir, movie_info["title"]))
# 判断目录是否存在,没有就创建
if not os.path.exists(project_output_dir):
os.makedirs(project_output_dir)
# 转换视频
for page_data in movie_info["page_data"]:
# 判断视频是否下载完成
if page_data["is_completed"]:
# 获取格式化后的文件名
page_name = format_video_name(**movie_info, **page_data)
composite_video(
os.path.abspath(page_data["video_path"]),
os.path.abspath(page_data["audio_path"]),
os.path.abspath(os.path.join(project_output_dir, filename_filter(page_name)))
)
else:
print(f"{movie_info.get("title")}-{page_data.get("part")}未下载完成!")
def show_info(self):
"""
展示视频信息
:return:
"""
movies_list = []
for index, [vid, movie] in enumerate(self.movies.items()):
movies_list.append(vid)
print(f"{index + 1}、({vid: <12})[{movie["download_total"]:-3}/{movie["total"]:-3}] {movie["title"]}")
index: str = input("请输入要转换的编号(all 全部, exit 退出): ")
if index == "all":
for vid in movies_list:
self.convert(vid)
elif index in ["exit"]:
print("用户退出")
exit(0)
else:
self.convert(movies_list[int(index) - 1])
def run(self):
"""
主程序
:return:
"""
print("开始解析视频信息...")
self.parse_movies()
print("解析视频信息完成")
self.show_info()
pass
def format_video_name(**video_info: dict) -> str:
"""
根据 FORMAT_VIDEO_NAME 格式化转换的视频文件名
{title} 视频标题
{name} {part} 视频名称
{i} {page} {index} 视频索引,从1开始
:param video_info: 视频信息
:return: 格式化后的文件名
"""
title = video_info.get("title", "")
part = video_info.get("part", "")
page = str(video_info.get("page", ""))
# TODO 判断视频名称是否包序号 part.startswith(page), 存在则不添加序号
result = FORMAT_VIDEO_NAME + ".mp4"
# 视频索引
result = result.replace("{i}", page)
result = result.replace("{index}", page)
result = result.replace("{page}", page)
# 视频名称
result = result.replace("{name}", part)
result = result.replace("{part}", part)
# 视频标题
result = result.replace("{title}", title)
return result
def composite_video(video_path: str, audio_path: str, out_path: str):
"""
合成mp4文件
:param video_path: 视频路径
:param audio_path: 音频路径
:param out_path: 输出路径
:return:
"""
# 生成合成命令
cmd = f'ffmpeg -y -i "{video_path}" -i "{audio_path}" -codec copy "{out_path}"'
print('*' * 50)
print("视频源:" + video_path)
print("音频源:" + audio_path)
print("输出源:" + out_path)
Popen(cmd, stderr=DEVNULL).wait()
def filename_filter(filename: str, repl: str = '') -> str:
"""
将文件名替换成合法的文件名
:param filename: 原文件名
:param repl: 替换字符
:return: 合法文件名
"""
return re.sub('[/:*?"<>|]', repl, filename)
def parse_entry(entry_file):
"""
解析视频配置(入口)文件
:param entry_file: 文件路径
:return: 视频信息
"""
# 打开文件
try:
with open(entry_file, 'r', encoding='utf-8') as fp:
entry: dict = json.load(fp)
# 解析媒体类型
media_type: int = entry.get('media_type') # 媒体类型,1的可能是blv格式
if media_type not in [2]:
# 不支持的媒体类型
warnings.warn(f"Warning Unsupported media type:{media_type} in {entry_file}")
return
# 解析视频 ID
avid: int = entry.get('avid') # avid
bvid: str = entry.get('bvid') # bvid
season_id: int = entry.get('season_id') # season_id, 番剧id
# 视频信息
title: str = entry.get("title") # 视频标题
is_completed: bool = entry.get("is_completed", False) # 是否下载完成
# 获取当前视频分集的信息数据
if avid or bvid:
page = entry["page_data"]["page"] # 视频索引
part = entry["page_data"]["part"] # 视频标题
if season_id:
page = entry["ep"]["page"]
part = entry["ep"]["index_title"]
item = {
"avid": avid,
"bvid": bvid,
"season_id": season_id,
"title": title,
"is_completed": is_completed,
"page": page,
"part": part
}
# 判断视频下载完成, 获取视频文件及音频文件信息
if is_completed:
# 视频、音频下载目录
type_tag = entry.get('type_tag')
# 视频路径
video_path = os.path.join(os.path.dirname(entry_file), type_tag, "video.m4s")
if os.path.exists(video_path): # 判断文件是否存在
item["video_path"] = video_path
# 音频路径
audio_path = os.path.join(os.path.dirname(entry_file), type_tag, "audio.m4s")
if os.path.exists(audio_path): # 判断文件是否存在
item["audio_path"] = audio_path
return item
except json.decoder.JSONDecodeError as e:
# 文件无法解析
warnings.warn(f"Warning file could not parse: {entry_file} \n{e.msg}")
def get_command_args() -> tuple:
"""
获取命令行输入的参数
:return:
"""
i = o = None
opts, args = getopt(argv[1:], "i:o:")
for opt, arg in opts:
if opt in ["i"]:
i = arg
if opt in ["o"]:
o = arg
return i, o
def load_config():
"""
从文件读取配置
:return:
"""
try:
global CONFIG
with open(CONFIG_PATH, "r") as fp:
CONFIG = json.load(fp)
except FileNotFoundError:
print("create default config.")
CONFIG = {
"input_dir": "download",
"output_dir": "output"
}
refresh_config()
except json.decoder.JSONDecodeError:
print("读取配置文件错误,请检查配置文件,若无法使用可尝试删除配置文件。")
def refresh_config():
"""
保存配置到文件
:return:
"""
with open(CONFIG_PATH, 'w', encoding="utf-8") as fp:
json.dump(CONFIG, fp, ensure_ascii=False)
def main():
load_config()
video_convert = BiLiVideoConvert(*get_command_args())
video_convert.run()
if __name__ == '__main__':
main()
| import re
import os
import json
import warnings
from sys import argv
from getopt import getopt
from typing import Union
from subprocess import Popen
DEVNULL = open(os.devnull, 'w')
CONFIG = {}
CONFIG_PATH = "config.json"
FORMAT_VIDEO_NAME = "{i}、{title}-{name}"
class BiLiVideoConvert:
def __init__(self, input_dir: str = None, output_dir: str = None):
"""
input_dir 相当于 Android/data/tv.danmaku.bili/download 目录,即该文件夹下存在多个下载的视频项目
:param input_dir: 下载视频路径
:param output_dir: 转换后视频存放路径
"""
# 参数为空时读取配置文件,配置文件中不存在则使用默认配置
if input_dir is None:
input_dir = CONFIG.get("input_dir", "download")
if output_dir is None:
output_dir = CONFIG.get("output_dir", "output")
self.input_dir = input_dir
self.output_dir = output_dir
self.movie_dirs = os.listdir(input_dir)
self.movies = {}
def parse_movies(self):
for movie_info in self.get_movie_infos():
avid = movie_info.get("avid")
if avid:
avid = f"AV{avid}"
bvid = movie_info["bvid"]
season_id = movie_info["season_id"]
if season_id:
season_id = f"S_{season_id}"
vid = avid or bvid or season_id
# 不存在添加默认信息
if vid not in self.movies:
self.movies[vid] = {
"avid": avid,
"bvid": bvid,
"season_id": season_id,
"title": movie_info['title'], # 标题
"total": 0, # 总量
"download_total": 0, # 下载总量
"page_data": [] # 视频Page数据
}
# 判断视频是否下载完成,添加分P数据
is_completed = movie_info['is_completed'] # 是否下载完成
self.movies[vid]["total"] += 1
page_data = {
"page": movie_info["page"],
"part": movie_info["part"],
"is_completed": is_completed
}
if is_completed:
self.movies[vid]["download_total"] += 1
page_data["video_path"] = movie_info["video_path"]
page_data["audio_path"] = movie_info["audio_path"]
self.movies[vid]["page_data"].append(page_data)
def get_movie_infos(self) -> dict:
"""
获取 input_dir 下视频项目的信息
:return:
"""
for movie_dir in self.movie_dirs:
# 拼接视频项目的绝对路径
movie_ads_dir = os.path.join(self.input_dir, movie_dir)
# 遍历视频项目下的目录
for folder_name, sub_folders, file_names in os.walk(movie_ads_dir):
entry_file = os.path.join(folder_name, "entry.json")
# 以存在entry.json文件为判断视频目录依据
if os.path.exists(entry_file):
# 解析 entry 文件
entry = parse_entry(entry_file)
if entry:
yield entry
# if movie_dir == str(entry['vid'])
def convert(self, vid: Union[int, str]):
# 视频项目目录
if vid in self.movies:
movie_info = self.movies.get(vid)
print(movie_info)
else:
print("无效的视频ID")
return
# 拼接视频输出目录
project_output_dir = filename_filter(os.path.join(self.output_dir, movie_info["title"]))
# 判断目录是否存在,没有就创建
if not os.path.exists(project_output_dir):
os.makedirs(project_output_dir)
# 转换视频
for page_data in movie_info["page_data"]:
# 判断视频是否下载完成
if page_data["is_completed"]:
# 获取格式化后的文件名
page_name = format_video_name(**movie_info, **page_data)
composite_video(
os.path.abspath(page_data["video_path"]),
os.path.abspath(page_data["audio_path"]),
os.path.abspath(os.path.join(project_output_dir, filename_filter(page_name)))
)
else:
print(f"{movie_info.get('title')}-{page_data.get('part')}未下载完成!")
def show_info(self):
"""
展示视频信息
:return:
"""
movies_list = []
for index, [vid, movie] in enumerate(self.movies.items()):
movies_list.append(vid)
print(f"{index + 1}、({vid: <12})[{movie['download_total']:-3}/{movie['total']:-3}] {movie['title']}")
index: str = input("请输入要转换的编号(all 全部, exit 退出): ")
if index == "all":
for vid in movies_list:
self.convert(vid)
elif index in ["exit"]:
print("用户退出")
exit(0)
else:
self.convert(movies_list[int(index) - 1])
def run(self):
"""
主程序
:return:
"""
print("开始解析视频信息...")
self.parse_movies()
print("解析视频信息完成")
self.show_info()
pass
def format_video_name(**video_info: dict) -> str:
"""
根据 FORMAT_VIDEO_NAME 格式化转换的视频文件名
{title} 视频标题
{name} {part} 视频名称
{i} {page} {index} 视频索引,从1开始
:param video_info: 视频信息
:return: 格式化后的文件名
"""
title = video_info.get("title", "")
part = video_info.get("part", "")
page = str(video_info.get("page", ""))
# TODO 判断视频名称是否包序号 part.startswith(page), 存在则不添加序号
result = FORMAT_VIDEO_NAME + ".mp4"
# 视频索引
result = result.replace("{i}", page)
result = result.replace("{index}", page)
result = result.replace("{page}", page)
# 视频名称
result = result.replace("{name}", part)
result = result.replace("{part}", part)
# 视频标题
result = result.replace("{title}", title)
return result
def composite_video(video_path: str, audio_path: str, out_path: str):
"""
合成mp4文件
:param video_path: 视频路径
:param audio_path: 音频路径
:param out_path: 输出路径
:return:
"""
# 生成合成命令
cmd = f'ffmpeg -y -i "{video_path}" -i "{audio_path}" -codec copy "{out_path}"'
print('*' * 50)
print("视频源:" + video_path)
print("音频源:" + audio_path)
print("输出源:" + out_path)
Popen(cmd, stderr=DEVNULL).wait()
def filename_filter(filename: str, repl: str = '') -> str:
"""
将文件名替换成合法的文件名
:param filename: 原文件名
:param repl: 替换字符
:return: 合法文件名
"""
return re.sub('[/:*?"<>|]', repl, filename)
def parse_entry(entry_file):
"""
解析视频配置(入口)文件
:param entry_file: 文件路径
:return: 视频信息
"""
# 打开文件
try:
with open(entry_file, 'r', encoding='utf-8') as fp:
entry: dict = json.load(fp)
# 解析媒体类型
media_type: int = entry.get('media_type') # 媒体类型,1的可能是blv格式
if media_type not in [2]:
# 不支持的媒体类型
warnings.warn(f"Warning Unsupported media type:{media_type} in {entry_file}")
return
# 解析视频 ID
avid: int = entry.get('avid') # avid
bvid: str = entry.get('bvid') # bvid
season_id: int = entry.get('season_id') # season_id, 番剧id
# 视频信息
title: str = entry.get("title") # 视频标题
is_completed: bool = entry.get("is_completed", False) # 是否下载完成
# 获取当前视频分集的信息数据
if avid or bvid:
page = entry["page_data"]["page"] # 视频索引
part = entry["page_data"]["part"] # 视频标题
if season_id:
page = entry["ep"]["page"]
part = entry["ep"]["index_title"]
item = {
"avid": avid,
"bvid": bvid,
"season_id": season_id,
"title": title,
"is_completed": is_completed,
"page": page,
"part": part
}
# 判断视频下载完成, 获取视频文件及音频文件信息
if is_completed:
# 视频、音频下载目录
type_tag = entry.get('type_tag')
# 视频路径
video_path = os.path.join(os.path.dirname(entry_file), type_tag, "video.m4s")
if os.path.exists(video_path): # 判断文件是否存在
item["video_path"] = video_path
# 音频路径
audio_path = os.path.join(os.path.dirname(entry_file), type_tag, "audio.m4s")
if os.path.exists(audio_path): # 判断文件是否存在
item["audio_path"] = audio_path
return item
except json.decoder.JSONDecodeError as e:
# 文件无法解析
warnings.warn(f"Warning file could not parse: {entry_file} \n{e.msg}")
def get_command_args() -> tuple:
"""
获取命令行输入的参数
:return:
"""
i = o = None
opts, args = getopt(argv[1:], "i:o:")
for opt, arg in opts:
if opt in ["i"]:
i = arg
if opt in ["o"]:
o = arg
return i, o
def load_config():
"""
从文件读取配置
:return:
"""
try:
global CONFIG
with open(CONFIG_PATH, "r") as fp:
CONFIG = json.load(fp)
except FileNotFoundError:
print("create default config.")
CONFIG = {
"input_dir": "download",
"output_dir": "output"
}
refresh_config()
except json.decoder.JSONDecodeError:
print("读取配置文件错误,请检查配置文件,若无法使用可尝试删除配置文件。")
def refresh_config():
"""
保存配置到文件
:return:
"""
with open(CONFIG_PATH, 'w', encoding="utf-8") as fp:
json.dump(CONFIG, fp, ensure_ascii=False)
def main():
load_config()
video_convert = BiLiVideoConvert(*get_command_args())
video_convert.run()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
"""
Kim Brugger (25 Sep 2020), contact: kim@brugger.dk
"""
import socket
import subprocess
import argparse
import shlex
import json
import datetime
states = {'active': 1,
'inactive': 2,
'activating': 3,
'deactivating': 4,
'failed': 5,
'not-found': 6,
'dead': 7,}
def get_host_name() -> str:
return socket.getfqdn()
def get_state(service_name) -> int:
cmd = f"systemctl show --no-page {service_name}"
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) # NOQA
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
res = {'name': service_name}
if stderr:
res['status':'status-error']
return res
for line in str(stdout).split("\n"):
if "=" not in line:
continue
key, value = line.split("=",1)
if key == 'ActiveState' and 'status' not in res:
res['status'] = value
if key == 'LoadState' and value == 'not-found':
res['status'] = "not-found"
if key == 'ExecMainStartTimestamp':
ts = datetime.datetime.strptime(value, "%a %Y-%m-%d %H:%M:%S %Z")
now = datetime.datetime.now()
res['uptime'] = (now - ts).total_seconds()
res[ "status_code" ] = states[ res['status']]
return res
def main():
parser = argparse.ArgumentParser(description='systemd service status reporter')
parser.add_argument('-t', '--telegraf', default=False, action="store_true", help="telegraf compatible format")
parser.add_argument('-j', '--json', default=False, action="store_true", help="telegraf compatible format")
parser.add_argument('services', nargs='+', help="service(s) to check")
args = parser.parse_args()
statuses = []
for service in args.services:
status = get_state(service)
if args.telegraf:
line = f"service,host={get_host_name()},service={service} status_code={status["status_code"]}"
if 'uptime' in status:
line += f",uptime={status["uptime"]}"
print( line )
elif args.json:
statuses.append( status )
else:
line = f"{service:20s} {status["status_code"]}/{status["status"]:15}"
if 'uptime' in status:
line += f" {status["uptime"]}s"
print( line )
if args.json:
print(json.dumps( statuses))
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
"""
Kim Brugger (25 Sep 2020), contact: kim@brugger.dk
"""
import socket
import subprocess
import argparse
import shlex
import json
import datetime
states = {'active': 1,
'inactive': 2,
'activating': 3,
'deactivating': 4,
'failed': 5,
'not-found': 6,
'dead': 7,}
def get_host_name() -> str:
return socket.getfqdn()
def get_state(service_name) -> int:
cmd = f"systemctl show --no-page {service_name}"
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) # NOQA
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
res = {'name': service_name}
if stderr:
res['status':'status-error']
return res
for line in str(stdout).split("\n"):
if "=" not in line:
continue
key, value = line.split("=",1)
if key == 'ActiveState' and 'status' not in res:
res['status'] = value
if key == 'LoadState' and value == 'not-found':
res['status'] = "not-found"
if key == 'ExecMainStartTimestamp':
ts = datetime.datetime.strptime(value, "%a %Y-%m-%d %H:%M:%S %Z")
now = datetime.datetime.now()
res['uptime'] = (now - ts).total_seconds()
res[ "status_code" ] = states[ res['status']]
return res
def main():
parser = argparse.ArgumentParser(description='systemd service status reporter')
parser.add_argument('-t', '--telegraf', default=False, action="store_true", help="telegraf compatible format")
parser.add_argument('-j', '--json', default=False, action="store_true", help="telegraf compatible format")
parser.add_argument('services', nargs='+', help="service(s) to check")
args = parser.parse_args()
statuses = []
for service in args.services:
status = get_state(service)
if args.telegraf:
line = f"service,host={get_host_name()},service={service} status_code={status['status_code']}"
if 'uptime' in status:
line += f",uptime={status['uptime']}"
print( line )
elif args.json:
statuses.append( status )
else:
line = f"{service:20s} {status['status_code']}/{status['status']:15}"
if 'uptime' in status:
line += f" {status['uptime']}s"
print( line )
if args.json:
print(json.dumps( statuses))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
def parseBags(lines: list) -> dict:
lines = [l.split(' contain ') for l in lines]
bags = {}
for line in lines:
outer = line[0][:line[0].index('bags') - 1]
inner = {}
if 'no other' not in line[1]:
for each in line[1].split(','):
each = each.strip()
first_space = each.index(' ')
last_space = each.index(' bag')
colour = each[first_space + 1:last_space]
num = int(each[:first_space])
inner[colour] = num
bags[outer] = inner
return bags
def countShiny(bags: dict) -> int:
return sum([int(containsShiny(bags, colour)) for colour in bags])
def containsShiny(bags: dict, colour: str) -> bool:
for each in bags[colour]:
if each == 'shiny gold' or containsShiny(bags, each):
return True
return False
def countInside(bags: dict, colour: str) -> int:
return sum([v + v * countInside(bags, k) for k, v in bags[colour].items()])
if __name__ == '__main__':
with open('day7.in') as f:
bags = parseBags(f.readlines())
print(f"Part 1 = {countShiny(bags)}")
print(f"Part 2 = {countInside(bags, "shiny gold")}")
| #!/usr/bin/env python
def parseBags(lines: list) -> dict:
lines = [l.split(' contain ') for l in lines]
bags = {}
for line in lines:
outer = line[0][:line[0].index('bags') - 1]
inner = {}
if 'no other' not in line[1]:
for each in line[1].split(','):
each = each.strip()
first_space = each.index(' ')
last_space = each.index(' bag')
colour = each[first_space + 1:last_space]
num = int(each[:first_space])
inner[colour] = num
bags[outer] = inner
return bags
def countShiny(bags: dict) -> int:
return sum([int(containsShiny(bags, colour)) for colour in bags])
def containsShiny(bags: dict, colour: str) -> bool:
for each in bags[colour]:
if each == 'shiny gold' or containsShiny(bags, each):
return True
return False
def countInside(bags: dict, colour: str) -> int:
return sum([v + v * countInside(bags, k) for k, v in bags[colour].items()])
if __name__ == '__main__':
with open('day7.in') as f:
bags = parseBags(f.readlines())
print(f"Part 1 = {countShiny(bags)}")
print(f"Part 2 = {countInside(bags, 'shiny gold')}")
|
#!/usr/bin/env python3
import itertools
import sys
from time import sleep, time
import numpy as np
import pygame
from pygame.colordict import THECOLORS as colors
def load_image(name):
image = pygame.image.load(name).convert_alpha()
return image
def get_food_sprite():
image = load_image("./renderer/sprites/food.png")
image = pygame.transform.scale(image, (20, 30))
return image
def get_base_sprite():
image = load_image("./renderer/sprites/base.png")
image = pygame.transform.scale(image, (20, 20))
return image
def get_actor_sprite():
image = load_image("./renderer/sprites/actor.png")
image = pygame.transform.scale(image, (20, 20))
return image
class Renderer:
def __init__(self):
pygame.init()
self.size = 600 * 1.5, 600 * 1.5
self.screen = pygame.display.set_mode(self.size)
self._data = None
self._current_tick = None
self.clock = pygame.time.Clock()
self.font = pygame.font.Font(pygame.font.get_default_font(), 16)
self.food_sprite = get_food_sprite()
self.actor_sprite = get_actor_sprite()
self.base_sprite = get_base_sprite()
# FIXME: This should be read from the replay file
self._scale = np.array(self.size) / np.array([40, 40])
# Update the game state 30 times per second
self.tick_duration = 1.0 / 30.0
self._target_frame_duration = 1.0 / 60.0
self._frame_timer = time()
self._tick_timer = time()
self.color_map = {}
self.agent_colors = [
colors["cadetblue"],
colors["mediumorchid3"],
colors["yellow3"],
colors["darkolivegreen3"],
]
def set_data(self, data):
self._data = data
self._current_tick = 0
first_data = data[0]
bases = first_data["world_state"]["bases"]
agent_ids = [base["owner_id"] for base in bases]
for index, agent_id in enumerate(agent_ids):
self.color_map[agent_id] = self.agent_colors[index]
def _advance_tick(self):
now = time()
if now - self._tick_timer < self.tick_duration:
return
self._tick_timer = now
self._current_tick += 1
if self._current_tick >= len(self._data):
self._current_tick = 0
@property
def data(self):
return self._data[self._current_tick]
def update(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
self.clock.tick()
self._advance_tick()
self.screen.fill(colors["white"])
world_state = self.data["world_state"]
actions = list(
itertools.chain.from_iterable(
[x["actions"] for x in self.data["agent_actions"]]
)
)
foods = world_state["foods"]
actors = world_state["actors"]
bases = world_state["bases"]
# FIXME: Each agent should have its own color
for base in bases:
self._draw_base(base)
for actor in actors:
position = np.array(actor["position"]) * self._scale
self._draw_actor(position, actor["owner_id"])
for action in actions:
if action.get("actor_id") == actor["id"]:
if action["action"] == "move":
pygame.draw.line(
self.screen,
colors["gray"],
np.array(actor["position"]) * self._scale,
np.array(action["target"]) * self._scale,
2,
)
if action["action"] == "attack":
pygame.draw.line(
self.screen,
colors["red"],
np.array(actor["position"]) * self._scale,
np.array(self._get_object_position(action["target"]))
* self._scale,
4,
)
for food in foods:
position = np.array(food["position"]) * self._scale
self._draw_food(position)
now = time()
diff = self._target_frame_duration - (now - self._frame_timer)
if diff < self._target_frame_duration and diff > 0:
sleep(diff)
self._text(f"fps: {self.clock.get_fps():6.2f}", (10, 10))
self._text(f" {diff:.4f} (ms)", (10, 30))
self._frame_timer = time()
pygame.display.flip()
def _get_object_position(self, object_id):
objects = [
self.data["world_state"]["foods"],
self.data["world_state"]["actors"],
self.data["world_state"]["bases"],
]
for obj in itertools.chain.from_iterable(objects):
if obj["id"] == object_id:
return obj["position"]
def _text(self, text, position, antialias=True, color=(220, 230, 225)):
text_surface = self.font.render(text, antialias, color)
self.screen.blit(text_surface, dest=position)
def _draw_actor(self, position, owner_id):
color = self.color_map[owner_id]
pygame.draw.circle(self.screen, color, position, 14, 0)
self.screen.blit(self.actor_sprite, position + np.array([-10, -10]))
def _draw_base(self, base):
position = np.array(base["position"]) * self._scale
color = self.color_map[base["owner_id"]]
pygame.draw.circle(self.screen, color, position, 14, 0)
self.screen.blit(self.base_sprite, position + np.array([-10, -10]))
self._text(
f"food: {base["food"]:.1f}",
position + np.array([-7, -22]),
color=colors["brown3"],
)
def _draw_food(self, position):
self.screen.blit(self.food_sprite, position + np.array([-10, -25]))
| #!/usr/bin/env python3
import itertools
import sys
from time import sleep, time
import numpy as np
import pygame
from pygame.colordict import THECOLORS as colors
def load_image(name):
image = pygame.image.load(name).convert_alpha()
return image
def get_food_sprite():
image = load_image("./renderer/sprites/food.png")
image = pygame.transform.scale(image, (20, 30))
return image
def get_base_sprite():
image = load_image("./renderer/sprites/base.png")
image = pygame.transform.scale(image, (20, 20))
return image
def get_actor_sprite():
image = load_image("./renderer/sprites/actor.png")
image = pygame.transform.scale(image, (20, 20))
return image
class Renderer:
def __init__(self):
pygame.init()
self.size = 600 * 1.5, 600 * 1.5
self.screen = pygame.display.set_mode(self.size)
self._data = None
self._current_tick = None
self.clock = pygame.time.Clock()
self.font = pygame.font.Font(pygame.font.get_default_font(), 16)
self.food_sprite = get_food_sprite()
self.actor_sprite = get_actor_sprite()
self.base_sprite = get_base_sprite()
# FIXME: This should be read from the replay file
self._scale = np.array(self.size) / np.array([40, 40])
# Update the game state 30 times per second
self.tick_duration = 1.0 / 30.0
self._target_frame_duration = 1.0 / 60.0
self._frame_timer = time()
self._tick_timer = time()
self.color_map = {}
self.agent_colors = [
colors["cadetblue"],
colors["mediumorchid3"],
colors["yellow3"],
colors["darkolivegreen3"],
]
def set_data(self, data):
self._data = data
self._current_tick = 0
first_data = data[0]
bases = first_data["world_state"]["bases"]
agent_ids = [base["owner_id"] for base in bases]
for index, agent_id in enumerate(agent_ids):
self.color_map[agent_id] = self.agent_colors[index]
def _advance_tick(self):
now = time()
if now - self._tick_timer < self.tick_duration:
return
self._tick_timer = now
self._current_tick += 1
if self._current_tick >= len(self._data):
self._current_tick = 0
@property
def data(self):
return self._data[self._current_tick]
def update(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
self.clock.tick()
self._advance_tick()
self.screen.fill(colors["white"])
world_state = self.data["world_state"]
actions = list(
itertools.chain.from_iterable(
[x["actions"] for x in self.data["agent_actions"]]
)
)
foods = world_state["foods"]
actors = world_state["actors"]
bases = world_state["bases"]
# FIXME: Each agent should have its own color
for base in bases:
self._draw_base(base)
for actor in actors:
position = np.array(actor["position"]) * self._scale
self._draw_actor(position, actor["owner_id"])
for action in actions:
if action.get("actor_id") == actor["id"]:
if action["action"] == "move":
pygame.draw.line(
self.screen,
colors["gray"],
np.array(actor["position"]) * self._scale,
np.array(action["target"]) * self._scale,
2,
)
if action["action"] == "attack":
pygame.draw.line(
self.screen,
colors["red"],
np.array(actor["position"]) * self._scale,
np.array(self._get_object_position(action["target"]))
* self._scale,
4,
)
for food in foods:
position = np.array(food["position"]) * self._scale
self._draw_food(position)
now = time()
diff = self._target_frame_duration - (now - self._frame_timer)
if diff < self._target_frame_duration and diff > 0:
sleep(diff)
self._text(f"fps: {self.clock.get_fps():6.2f}", (10, 10))
self._text(f" {diff:.4f} (ms)", (10, 30))
self._frame_timer = time()
pygame.display.flip()
def _get_object_position(self, object_id):
objects = [
self.data["world_state"]["foods"],
self.data["world_state"]["actors"],
self.data["world_state"]["bases"],
]
for obj in itertools.chain.from_iterable(objects):
if obj["id"] == object_id:
return obj["position"]
def _text(self, text, position, antialias=True, color=(220, 230, 225)):
text_surface = self.font.render(text, antialias, color)
self.screen.blit(text_surface, dest=position)
def _draw_actor(self, position, owner_id):
color = self.color_map[owner_id]
pygame.draw.circle(self.screen, color, position, 14, 0)
self.screen.blit(self.actor_sprite, position + np.array([-10, -10]))
def _draw_base(self, base):
position = np.array(base["position"]) * self._scale
color = self.color_map[base["owner_id"]]
pygame.draw.circle(self.screen, color, position, 14, 0)
self.screen.blit(self.base_sprite, position + np.array([-10, -10]))
self._text(
f"food: {base['food']:.1f}",
position + np.array([-7, -22]),
color=colors["brown3"],
)
def _draw_food(self, position):
self.screen.blit(self.food_sprite, position + np.array([-10, -25]))
|
"""
Coding Bot v4
~~~~~~~~~~~~~~~~~~
This file contains elements that are under the following licenses:
Copyright (c) 2015 Rapptz
license MIT, see
https://github.com/Rapptz/RoboDanny/blob/e1c3c28fe20eb192463f7fc224a399141f0d915d/LICENSE.txt
for more details.
"""
import discord
import time
import asyncio
import datetime
import re
import aiohttp
import asyncpg
import os
import sys
import traceback
import url_parser
import humanize
import inspect
from jishaku.codeblocks import codeblock_converter
from discord.ext import commands, menus
class ClientSession(aiohttp.ClientSession):
def __init__(self, *args, **kwargs):
try:
default = {
# 'response_class': ClientResponse,
'rickroll_queries': ["rickroll","rick roll","rick astley","never gonna give you up"],
'block': [],
'timeout': aiohttp.ClientTimeout(total=300, sock_read=10) # to prevent attacks relating to sending massive payload and lagging the client
}
default.update(kwargs)
self.rickroll_regex = re.compile('|'.join(default['rickroll_queries']), re.IGNORECASE)
self.block_list = default['block']
del default['rickroll_queries']
del default['block']
super().__init__(*args, **default)
except:
raise
super().__init__(*args, **kwargs)
async def _request(self, *args, **kwargs):
req = await super()._request(*args, **kwargs)
regex = self.rickroll_regex
content = str(await req.content.read())
req.rickroll = bool(regex.search(content))
blocked_urls = self.block_list
urls = [str(redirect.url_obj) for redirect in req.history]
req.blocked = bool(await check_links(urls, blocked_urls))
return req
class RedirectMenu(menus.ListPageSource):
def __init__(self, data, ctx, rickroll=False):
grouped = [' \n'.join(data[i:i + 5]) for i in range(0, len(data), 5)]
super().__init__(grouped, per_page=1)
self.ctx = ctx
self.rickroll = rickroll
async def format_page(self, menu, entry):
embed = self.ctx.embed(title='Redirect Checker', description=entry)
embed.set_footer(text=f'Page {menu.current_page + 1}/{menu._source.get_max_pages()} | ' + embed.footer.text, icon_url=embed.footer.icon_url)
if self.rickroll:
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/814195797380825088/844955986674712646/rick.gif')
return embed
async def check_link_base(url, block_list):
url = url_parser.get_url(url)._asdict()
for blocked in block_list:
parsed_blocked = url_parser.get_url(
blocked.replace('*', '-'))._asdict()
delete = True
for k in ['sub_domain', 'domain', 'top_domain', 'path']:
rep = parsed_blocked[k]
if k == 'path':
rep = rep[1:]
if url[k] != rep and rep.replace('.','') != '-':
delete = False
break
if delete:
return True
async def check_links(urls, block_list):
for url in urls:
if await check_link_base(url, block_list):
return True
def convert_link(content):
base_regex = r'(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+$'
if re.match(r'^http[s]?://' + base_regex, content):
return content
elif re.match(r'^' + base_regex, content):
return 'https://' + content
else:
raise ValueError('Not a link')
async def check_link(url):
return await check_link_base(url, [ # "*" means any
# [http[s]://][sub.]<name>.<tld>[/path] # Reason
################################################################
'*.grabify.link/*', # Ip Grabber
'*.pornhub.com/*', # Porn
'*.guilded.gg/*', # Advertising
'*.tornadus.net/orange', # Discord Crasher
'giant.gfycat.com/SizzlingScrawnyBudgie.mp4', # Discord Crasher
])
async def find_links(cog, content, channel=None):
regex = (r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|'
r'(?:%[0-9a-fA-F][0-9a-fA-F]))+')
matches = re.findall(regex, content, re.MULTILINE)
urls = []
rickroll = False
for link in matches:
location = link
try:
for i in range(10):
if await check_link(location) or await check_invite(cog.bot, location, channel):
return 1
async with cog.session.get(location, allow_redirects=False) as resp:
location = resp.headers.get('Location')
if resp.rickroll:
rickroll = True
if location == resp.real_url or location is None:
break
except Exception as error:
print('Ignoring exception in url filter {}:'.format(content), file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
if rickroll:
return 2
async def filter_links(cog, message):
if ((not isinstance(message.author, discord.Member)) or
message.author.permissions_in(message.channel).manage_messages):
return
checked = await find_links(cog, message.content, message.channel)
if checked == 1:
try:
await message.delete()
except discord.errors.NotFound:
pass
await message.channel.send((
f':warning: {message.author.mention} That link is not '
'allowed :warning:'), delete_after=15)
elif checked == 2:
await message.add_reaction(cog.bot.get_emoji(844957433511542794))
return
async def check_invite(bot, content, channel=None):
content = discord.utils.remove_markdown(content)
pattern = (
r'discord(?:(?:(?:app)?\.com)\/invite|\.gg)/([a-zA-z0-9\-]{2,})\b')
matches = re.findall(pattern, content, re.MULTILINE)
if channel.id in [
754992725480439809,
801641781028454420,
727029474767667322
]:
return False
if len(matches) > 5:
return True
for code in matches:
try:
invite = await bot.fetch_invite(code)
except discord.errors.NotFound:
invite = None # invite is fine
if invite:
if invite.guild.id not in [
channel.guild.id if channel else None,
681882711945641997, # TCA
782903894468198450, # Swasville
336642139381301249, # Discord.py
267624335836053506, # Python
412754940885467146, # Blurple
613425648685547541, # Discord Developers
]:
return True
return False
async def filter_invite(bot, message=None, content=None):
if ((not isinstance(message.author, discord.Member)) or
message.author.permissions_in(message.channel).manage_messages):
return
matched = await check_invite(bot, message.content, message.channel)
if matched:
await message.delete()
await message.channel.send((
f':warning: {message.author.mention} Invite links are not allowed '
':warning:'), delete_after=15)
return True
def gcd(a, b):
"""
calculate the greatest common divisor of a and b.
"""
while b:
a, b = b, a % b
return a
class General(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = ClientSession()
@commands.Cog.listener()
async def on_message_edit(self, before, after):
if before.content != after.content: # invoke the command again on edit
if not after.author.bot:
ctx = await self.bot.get_context(
after, cls=self.bot.helpers.Context)
await self.bot.invoke(ctx)
if after.guild:
if after.guild.id == 681882711945641997:
invite = await filter_invite(self.bot, after)
if not invite:
await filter_links(self, after)
@commands.Cog.listener()
async def on_message(self, message):
if message.guild:
if message.guild.id == 681882711945641997:
invite = await filter_invite(self.bot, message)
if not invite:
await filter_links(self, message)
@commands.command(name="source", aliases=["github", "code"])
@commands.cooldown(1, 1, commands.BucketType.channel)
async def _source(self, ctx, *, command: str = None):
"""Displays my full source code or for a specific command.
To display the source code of a subcommand you can separate it by
periods or spaces.
"""
github = '<:githubwhite:804344724621230091>'
embed = ctx.embed(title=f'{github} GitHub (Click Here) {github}')
source_url = 'https://github.com/The-Coding-Academy/coding-bot-v4'
branch = 'main'
if command is None:
embed.url = source_url
return await ctx.send(embed=embed)
if command == 'help':
src = type(self.bot.help_command)
module = src.__module__
filename = inspect.getsourcefile(src)
else:
obj = self.bot.get_command(command.replace('.', ' '))
if obj is None:
return await ctx.send(embed=ctx.error('Could not find command.'))
src = obj.callback.__code__
module = obj.callback.__module__
filename = src.co_filename
lines, firstlineno = inspect.getsourcelines(src)
if not module.startswith('discord'):
# not a built-in command
location = os.path.relpath(filename).replace('\\', '/')
else:
location = module.replace('.', '/') + '.py'
source_url = 'https://github.com/Rapptz/discord.py'
branch = 'master'
final_url = (f'{source_url}/blob/{branch}/{location}#L{firstlineno}-L'
f'{firstlineno + len(lines) - 1}')
embed.url = final_url
await ctx.send(embed=embed)
@commands.command(name="mystbin", aliases=["mb"])
@commands.cooldown(1, 1, commands.BucketType.channel)
async def _mystbin(self, ctx, *, code: codeblock_converter = None):
"""Send your code to Mystb.in. You may use codeblocks if you want,
or use code from inside a file."""
code = code.content if code else None
attachments = None
if len(ctx.message.attachments) != 0:
attachments = ctx.message.attachments
elif ctx.message.reference:
message = await ctx.channel.fetch_message(
ctx.message.reference.message_id)
attachments = message.attachments
if attachments:
for attachment in attachments:
code = await attachment.read()
if not code:
return await ctx.send(embed=ctx.error((
'Please either provide code in the command, attach a file, or '
'react to a message that contains a file.')))
async with self.bot.http._HTTPClient__session.post(
'https://mystb.in/documents', data=code) as r:
res = await r.json()
key = res["key"]
embed = ctx.embed(title="Mystb.in Link", description=(
'I pasted your code into a bin, click on the title access it!'),
url=f'https://mystb.in/{key}')
embed.set_thumbnail(url=(
'https://cdn.discordapp.com/avatars/569566608817782824/'
'14f120e096fb515d770eea38f9cddd88.png'))
await ctx.send(embed=embed)
@commands.command(name='ping')
async def _ping(self, ctx):
loading = '<a:DiscordSpin:795546311319355393>'
ws_ping = f'{(self.bot.latency * 1000):.2f}ms ' \
f'({humanize.precisedelta(datetime.timedelta(seconds=self.bot.latency))})'
embed = ctx.embed(title='PONG! :ping_pong:', description=(
f'**{loading} Websocket:** {ws_ping}\n**'
':repeat: Round-Trip:** Calculating...\n**:elephant: Database:** '
'Calculating...'))
start = time.perf_counter()
message = await ctx.send(embed=embed)
end = time.perf_counter()
await asyncio.sleep(0.5)
trip = end - start
rt_ping = f'{(trip * 1000):.2f}ms ({humanize.precisedelta(datetime.timedelta(seconds=trip))})'
embed.description = (
f'**{loading} Websocket:** {ws_ping}\n**'
f':repeat: Round-Trip:** {rt_ping}\n**:elephant: '
'Database:** Calculating...')
await message.edit(embed=embed)
await asyncio.sleep(0.5)
start = time.perf_counter()
try:
async with self.bot.pools.config.acquire() as connection:
await connection.fetchval(
'SELECT prefixes FROM serverconf WHERE id = 0')
end = time.perf_counter()
database = end - start
db_ping = f'{(database * 1000):.2f}ms ({humanize.precisedelta(datetime.timedelta(seconds=database))})'
embed.description = (
f'**{loading} Websocket:** {ws_ping}\n'
f'**:repeat: Round-Trip:** {rt_ping}\n**:elephant:'
f' Database:** {db_ping}')
except asyncpg.exceptions._base.InterfaceError:
embed.description = (
f'**{loading} Websocket:** {ws_ping}'
f'\n**:repeat: Round-Trip:** {rt_ping}\n**'
':elephant: Database:** *Did not respond!*')
await message.edit(embed=embed)
@commands.command(name='revive', aliases=['revivechat', 'chatrevive',
'revchat', 'chatrev'])
@commands.guild_only()
@commands.cooldown(1, 1800, commands.BucketType.guild)
@commands.has_any_role(729530191109554237, 795136568805294097,
725899526350831616) # Senior Mod +
async def _revive(self, ctx):
mention = ctx.guild.get_role(759219083639783448).mention
embed = ctx.embed(
title='Revive Chat Ping!',
description='Come back to chat and make it alive again!')
await ctx.send(content=mention, embed=embed)
@commands.command(name='reinvoke', aliases=['re'])
async def _reinvoke(self, ctx):
"""
Reinvoke a command, running it again. This does NOT bypass any permissions checks
"""
try:
message = await ctx.channel.fetch_message(ctx.message.reference.message_id)
except discord.errors.NotFound:
return await ctx.send(embed=ctx.error('I couldn\'t find that message'))
if message.author == ctx.author:
await ctx.message.add_reaction('\U00002705')
context = await self.bot.get_context(
message, cls=self.bot.helpers.Context)
await self.bot.invoke(context)
else:
await ctx.send(embed=ctx.error('That isn\'t your message'))
@commands.command(name="joined")
async def _joined(self, ctx, position: int):
async with ctx.typing():
if position > ctx.guild.member_count:
return await ctx.send(embed=ctx.error('There are not that many members here'))
all_members = list(ctx.guild.members)
all_members.sort(key=lambda m: m.joined_at)
def ord(n):
return str(n)+("th" if 4<=n%100<=20 else {1:"st",2:"nd",3:"rd"}.get(n%10, "th"))
embed = ctx.embed(title = f"The {ord(position)} person to join is: ", description=all_members[position - 1].mention)
await ctx.send(embed=embed)
@commands.command(name="joinposition", aliases=['joinpos'])
async def _join_position(self, ctx, member: discord.Member):
async with ctx.typing():
all_members = list(ctx.guild.members)
all_members.sort(key=lambda m: m.joined_at)
def ord(n):
return str(n)+("th" if 4<=n%100<=20 else {1:"st",2:"nd",3:"rd"}.get(n%10, "th"))
embed = ctx.embed(title = "Member info", description = f'{member.mention} was the {ord(all_members.index(member) + 1)} person to join')
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True)
async def math(self, ctx):
await ctx.send_help('math')
@math.command(name='simplify')
async def _math_simplify(self, ctx, fraction):
try:
numerator, denominator = (int(x) for x in fraction.split('/'))
except:
return await ctx.send_error('Not a fraction')
if denominator == 0:
return await ctx.send_error("Division by 0")
common_divisor = gcd(numerator, denominator)
(reduced_numerator, reduced_denominator) = (numerator / common_divisor, denominator / common_divisor)
if reduced_denominator == 1:
final = int(reduced_numerator)
elif common_divisor == 1:
final = f'{int(numerator)}/{int(denominator)}'
else:
final = f'{int(reduced_numerator)}/{int(reduced_denominator)}'
await ctx.send(embed=ctx.embed(title='Reduced Fraction', description=final))
@commands.command(name='redirects', aliases=['checklink'])
async def _redirects(self, ctx, url: convert_link):
hl = []
status_map = {
1: '\U0001f504',
2: '\U00002705',
3: '\U000027a1',
4: '\U0000274c',
5: '\U000026a0'
}
def build_string(res):
return f'{status_map[int(res.status / 100)]} [{(res.url_obj.host + res.url_obj.path).strip('/')}]({res.url_obj}) ({res.status} {res.reason})'
rickroll = False
try:
async with ctx.typing():
r = await self.session.get(url)
for res in r.history:
hl.append(build_string(res))
hl.append(build_string(r))
rickroll = r.rickroll
except:
return await ctx.send_error(f'Could not reach "{url}"')
pages = menus.MenuPages(source=RedirectMenu(hl, ctx, rickroll=rickroll), delete_message_after=True)
await pages.start(ctx)
def setup(bot):
bot.add_cog(General(bot))
| """
Coding Bot v4
~~~~~~~~~~~~~~~~~~
This file contains elements that are under the following licenses:
Copyright (c) 2015 Rapptz
license MIT, see
https://github.com/Rapptz/RoboDanny/blob/e1c3c28fe20eb192463f7fc224a399141f0d915d/LICENSE.txt
for more details.
"""
import discord
import time
import asyncio
import datetime
import re
import aiohttp
import asyncpg
import os
import sys
import traceback
import url_parser
import humanize
import inspect
from jishaku.codeblocks import codeblock_converter
from discord.ext import commands, menus
class ClientSession(aiohttp.ClientSession):
def __init__(self, *args, **kwargs):
try:
default = {
# 'response_class': ClientResponse,
'rickroll_queries': ["rickroll","rick roll","rick astley","never gonna give you up"],
'block': [],
'timeout': aiohttp.ClientTimeout(total=300, sock_read=10) # to prevent attacks relating to sending massive payload and lagging the client
}
default.update(kwargs)
self.rickroll_regex = re.compile('|'.join(default['rickroll_queries']), re.IGNORECASE)
self.block_list = default['block']
del default['rickroll_queries']
del default['block']
super().__init__(*args, **default)
except:
raise
super().__init__(*args, **kwargs)
async def _request(self, *args, **kwargs):
req = await super()._request(*args, **kwargs)
regex = self.rickroll_regex
content = str(await req.content.read())
req.rickroll = bool(regex.search(content))
blocked_urls = self.block_list
urls = [str(redirect.url_obj) for redirect in req.history]
req.blocked = bool(await check_links(urls, blocked_urls))
return req
class RedirectMenu(menus.ListPageSource):
def __init__(self, data, ctx, rickroll=False):
grouped = [' \n'.join(data[i:i + 5]) for i in range(0, len(data), 5)]
super().__init__(grouped, per_page=1)
self.ctx = ctx
self.rickroll = rickroll
async def format_page(self, menu, entry):
embed = self.ctx.embed(title='Redirect Checker', description=entry)
embed.set_footer(text=f'Page {menu.current_page + 1}/{menu._source.get_max_pages()} | ' + embed.footer.text, icon_url=embed.footer.icon_url)
if self.rickroll:
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/814195797380825088/844955986674712646/rick.gif')
return embed
async def check_link_base(url, block_list):
url = url_parser.get_url(url)._asdict()
for blocked in block_list:
parsed_blocked = url_parser.get_url(
blocked.replace('*', '-'))._asdict()
delete = True
for k in ['sub_domain', 'domain', 'top_domain', 'path']:
rep = parsed_blocked[k]
if k == 'path':
rep = rep[1:]
if url[k] != rep and rep.replace('.','') != '-':
delete = False
break
if delete:
return True
async def check_links(urls, block_list):
for url in urls:
if await check_link_base(url, block_list):
return True
def convert_link(content):
base_regex = r'(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+$'
if re.match(r'^http[s]?://' + base_regex, content):
return content
elif re.match(r'^' + base_regex, content):
return 'https://' + content
else:
raise ValueError('Not a link')
async def check_link(url):
return await check_link_base(url, [ # "*" means any
# [http[s]://][sub.]<name>.<tld>[/path] # Reason
################################################################
'*.grabify.link/*', # Ip Grabber
'*.pornhub.com/*', # Porn
'*.guilded.gg/*', # Advertising
'*.tornadus.net/orange', # Discord Crasher
'giant.gfycat.com/SizzlingScrawnyBudgie.mp4', # Discord Crasher
])
async def find_links(cog, content, channel=None):
regex = (r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|'
r'(?:%[0-9a-fA-F][0-9a-fA-F]))+')
matches = re.findall(regex, content, re.MULTILINE)
urls = []
rickroll = False
for link in matches:
location = link
try:
for i in range(10):
if await check_link(location) or await check_invite(cog.bot, location, channel):
return 1
async with cog.session.get(location, allow_redirects=False) as resp:
location = resp.headers.get('Location')
if resp.rickroll:
rickroll = True
if location == resp.real_url or location is None:
break
except Exception as error:
print('Ignoring exception in url filter {}:'.format(content), file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
if rickroll:
return 2
async def filter_links(cog, message):
if ((not isinstance(message.author, discord.Member)) or
message.author.permissions_in(message.channel).manage_messages):
return
checked = await find_links(cog, message.content, message.channel)
if checked == 1:
try:
await message.delete()
except discord.errors.NotFound:
pass
await message.channel.send((
f':warning: {message.author.mention} That link is not '
'allowed :warning:'), delete_after=15)
elif checked == 2:
await message.add_reaction(cog.bot.get_emoji(844957433511542794))
return
async def check_invite(bot, content, channel=None):
content = discord.utils.remove_markdown(content)
pattern = (
r'discord(?:(?:(?:app)?\.com)\/invite|\.gg)/([a-zA-z0-9\-]{2,})\b')
matches = re.findall(pattern, content, re.MULTILINE)
if channel.id in [
754992725480439809,
801641781028454420,
727029474767667322
]:
return False
if len(matches) > 5:
return True
for code in matches:
try:
invite = await bot.fetch_invite(code)
except discord.errors.NotFound:
invite = None # invite is fine
if invite:
if invite.guild.id not in [
channel.guild.id if channel else None,
681882711945641997, # TCA
782903894468198450, # Swasville
336642139381301249, # Discord.py
267624335836053506, # Python
412754940885467146, # Blurple
613425648685547541, # Discord Developers
]:
return True
return False
async def filter_invite(bot, message=None, content=None):
if ((not isinstance(message.author, discord.Member)) or
message.author.permissions_in(message.channel).manage_messages):
return
matched = await check_invite(bot, message.content, message.channel)
if matched:
await message.delete()
await message.channel.send((
f':warning: {message.author.mention} Invite links are not allowed '
':warning:'), delete_after=15)
return True
def gcd(a, b):
"""
calculate the greatest common divisor of a and b.
"""
while b:
a, b = b, a % b
return a
class General(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = ClientSession()
@commands.Cog.listener()
async def on_message_edit(self, before, after):
if before.content != after.content: # invoke the command again on edit
if not after.author.bot:
ctx = await self.bot.get_context(
after, cls=self.bot.helpers.Context)
await self.bot.invoke(ctx)
if after.guild:
if after.guild.id == 681882711945641997:
invite = await filter_invite(self.bot, after)
if not invite:
await filter_links(self, after)
@commands.Cog.listener()
async def on_message(self, message):
if message.guild:
if message.guild.id == 681882711945641997:
invite = await filter_invite(self.bot, message)
if not invite:
await filter_links(self, message)
@commands.command(name="source", aliases=["github", "code"])
@commands.cooldown(1, 1, commands.BucketType.channel)
async def _source(self, ctx, *, command: str = None):
"""Displays my full source code or for a specific command.
To display the source code of a subcommand you can separate it by
periods or spaces.
"""
github = '<:githubwhite:804344724621230091>'
embed = ctx.embed(title=f'{github} GitHub (Click Here) {github}')
source_url = 'https://github.com/The-Coding-Academy/coding-bot-v4'
branch = 'main'
if command is None:
embed.url = source_url
return await ctx.send(embed=embed)
if command == 'help':
src = type(self.bot.help_command)
module = src.__module__
filename = inspect.getsourcefile(src)
else:
obj = self.bot.get_command(command.replace('.', ' '))
if obj is None:
return await ctx.send(embed=ctx.error('Could not find command.'))
src = obj.callback.__code__
module = obj.callback.__module__
filename = src.co_filename
lines, firstlineno = inspect.getsourcelines(src)
if not module.startswith('discord'):
# not a built-in command
location = os.path.relpath(filename).replace('\\', '/')
else:
location = module.replace('.', '/') + '.py'
source_url = 'https://github.com/Rapptz/discord.py'
branch = 'master'
final_url = (f'{source_url}/blob/{branch}/{location}#L{firstlineno}-L'
f'{firstlineno + len(lines) - 1}')
embed.url = final_url
await ctx.send(embed=embed)
@commands.command(name="mystbin", aliases=["mb"])
@commands.cooldown(1, 1, commands.BucketType.channel)
async def _mystbin(self, ctx, *, code: codeblock_converter = None):
"""Send your code to Mystb.in. You may use codeblocks if you want,
or use code from inside a file."""
code = code.content if code else None
attachments = None
if len(ctx.message.attachments) != 0:
attachments = ctx.message.attachments
elif ctx.message.reference:
message = await ctx.channel.fetch_message(
ctx.message.reference.message_id)
attachments = message.attachments
if attachments:
for attachment in attachments:
code = await attachment.read()
if not code:
return await ctx.send(embed=ctx.error((
'Please either provide code in the command, attach a file, or '
'react to a message that contains a file.')))
async with self.bot.http._HTTPClient__session.post(
'https://mystb.in/documents', data=code) as r:
res = await r.json()
key = res["key"]
embed = ctx.embed(title="Mystb.in Link", description=(
'I pasted your code into a bin, click on the title access it!'),
url=f'https://mystb.in/{key}')
embed.set_thumbnail(url=(
'https://cdn.discordapp.com/avatars/569566608817782824/'
'14f120e096fb515d770eea38f9cddd88.png'))
await ctx.send(embed=embed)
@commands.command(name='ping')
async def _ping(self, ctx):
loading = '<a:DiscordSpin:795546311319355393>'
ws_ping = f'{(self.bot.latency * 1000):.2f}ms ' \
f'({humanize.precisedelta(datetime.timedelta(seconds=self.bot.latency))})'
embed = ctx.embed(title='PONG! :ping_pong:', description=(
f'**{loading} Websocket:** {ws_ping}\n**'
':repeat: Round-Trip:** Calculating...\n**:elephant: Database:** '
'Calculating...'))
start = time.perf_counter()
message = await ctx.send(embed=embed)
end = time.perf_counter()
await asyncio.sleep(0.5)
trip = end - start
rt_ping = f'{(trip * 1000):.2f}ms ({humanize.precisedelta(datetime.timedelta(seconds=trip))})'
embed.description = (
f'**{loading} Websocket:** {ws_ping}\n**'
f':repeat: Round-Trip:** {rt_ping}\n**:elephant: '
'Database:** Calculating...')
await message.edit(embed=embed)
await asyncio.sleep(0.5)
start = time.perf_counter()
try:
async with self.bot.pools.config.acquire() as connection:
await connection.fetchval(
'SELECT prefixes FROM serverconf WHERE id = 0')
end = time.perf_counter()
database = end - start
db_ping = f'{(database * 1000):.2f}ms ({humanize.precisedelta(datetime.timedelta(seconds=database))})'
embed.description = (
f'**{loading} Websocket:** {ws_ping}\n'
f'**:repeat: Round-Trip:** {rt_ping}\n**:elephant:'
f' Database:** {db_ping}')
except asyncpg.exceptions._base.InterfaceError:
embed.description = (
f'**{loading} Websocket:** {ws_ping}'
f'\n**:repeat: Round-Trip:** {rt_ping}\n**'
':elephant: Database:** *Did not respond!*')
await message.edit(embed=embed)
@commands.command(name='revive', aliases=['revivechat', 'chatrevive',
'revchat', 'chatrev'])
@commands.guild_only()
@commands.cooldown(1, 1800, commands.BucketType.guild)
@commands.has_any_role(729530191109554237, 795136568805294097,
725899526350831616) # Senior Mod +
async def _revive(self, ctx):
mention = ctx.guild.get_role(759219083639783448).mention
embed = ctx.embed(
title='Revive Chat Ping!',
description='Come back to chat and make it alive again!')
await ctx.send(content=mention, embed=embed)
@commands.command(name='reinvoke', aliases=['re'])
async def _reinvoke(self, ctx):
"""
Reinvoke a command, running it again. This does NOT bypass any permissions checks
"""
try:
message = await ctx.channel.fetch_message(ctx.message.reference.message_id)
except discord.errors.NotFound:
return await ctx.send(embed=ctx.error('I couldn\'t find that message'))
if message.author == ctx.author:
await ctx.message.add_reaction('\U00002705')
context = await self.bot.get_context(
message, cls=self.bot.helpers.Context)
await self.bot.invoke(context)
else:
await ctx.send(embed=ctx.error('That isn\'t your message'))
@commands.command(name="joined")
async def _joined(self, ctx, position: int):
async with ctx.typing():
if position > ctx.guild.member_count:
return await ctx.send(embed=ctx.error('There are not that many members here'))
all_members = list(ctx.guild.members)
all_members.sort(key=lambda m: m.joined_at)
def ord(n):
return str(n)+("th" if 4<=n%100<=20 else {1:"st",2:"nd",3:"rd"}.get(n%10, "th"))
embed = ctx.embed(title = f"The {ord(position)} person to join is: ", description=all_members[position - 1].mention)
await ctx.send(embed=embed)
@commands.command(name="joinposition", aliases=['joinpos'])
async def _join_position(self, ctx, member: discord.Member):
async with ctx.typing():
all_members = list(ctx.guild.members)
all_members.sort(key=lambda m: m.joined_at)
def ord(n):
return str(n)+("th" if 4<=n%100<=20 else {1:"st",2:"nd",3:"rd"}.get(n%10, "th"))
embed = ctx.embed(title = "Member info", description = f'{member.mention} was the {ord(all_members.index(member) + 1)} person to join')
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True)
async def math(self, ctx):
await ctx.send_help('math')
@math.command(name='simplify')
async def _math_simplify(self, ctx, fraction):
try:
numerator, denominator = (int(x) for x in fraction.split('/'))
except:
return await ctx.send_error('Not a fraction')
if denominator == 0:
return await ctx.send_error("Division by 0")
common_divisor = gcd(numerator, denominator)
(reduced_numerator, reduced_denominator) = (numerator / common_divisor, denominator / common_divisor)
if reduced_denominator == 1:
final = int(reduced_numerator)
elif common_divisor == 1:
final = f'{int(numerator)}/{int(denominator)}'
else:
final = f'{int(reduced_numerator)}/{int(reduced_denominator)}'
await ctx.send(embed=ctx.embed(title='Reduced Fraction', description=final))
@commands.command(name='redirects', aliases=['checklink'])
async def _redirects(self, ctx, url: convert_link):
hl = []
status_map = {
1: '\U0001f504',
2: '\U00002705',
3: '\U000027a1',
4: '\U0000274c',
5: '\U000026a0'
}
def build_string(res):
return f'{status_map[int(res.status / 100)]} [{(res.url_obj.host + res.url_obj.path).strip("/")}]({res.url_obj}) ({res.status} {res.reason})'
rickroll = False
try:
async with ctx.typing():
r = await self.session.get(url)
for res in r.history:
hl.append(build_string(res))
hl.append(build_string(r))
rickroll = r.rickroll
except:
return await ctx.send_error(f'Could not reach "{url}"')
pages = menus.MenuPages(source=RedirectMenu(hl, ctx, rickroll=rickroll), delete_message_after=True)
await pages.start(ctx)
def setup(bot):
bot.add_cog(General(bot))
|
#!/usr/bin/python3
import json
import os
import copy
from collections import defaultdict
from distutils.version import LooseVersion
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union, Optional
COLOR_VALUES = [
'#10a100',
'#7ead14',
'#bab73c',
'#e8c268',
'#e59838',
'#e36717',
'#de1414',
]
COLOR_NAN = '#9b00a1'
NOT_A_NUMBER = 'N/A'
STD_MEAN_THRESHOLD = 0.5
COLOR_LEGEND = ' | '.join(
[
f'<span style="color:{color};">{i*10} - {(i+1)*10}%</span>'
for i, color in enumerate(COLOR_VALUES)
]
)
LEGEND = f"""
The following data should be read as follows:
- Colors of cells display the percentage of the minimum value in the column:\n
{COLOR_LEGEND}
- <s>1337</s>: unstable tests with "standard deviation / mean > {STD_MEAN_THRESHOLD}"
"""
def _format(data: Union[int, float]) -> Any:
if isinstance(data, bool):
return str(data)
elif isinstance(data, int) or isinstance(data, float):
if data >= 1000:
_data = data
i = 0
while abs(_data) >= 1000:
i += 1
_data /= 1000
if isinstance(data, int):
return '%d%s' % (_data, ['', 'K', 'M', 'G', 'T', 'P'][i])
else:
return '%.2f%s' % (_data, ['', 'K', 'M', 'G', 'T', 'P'][i])
else:
i = 1
_data = round(data, i)
while _data == 0 and i <= 5:
i += 1
_data = round(data, i)
return _data
else:
return data
def _get_color(mean_time, master_mean_time):
if mean_time is None or mean_time == NOT_A_NUMBER or master_mean_time == 0:
return COLOR_NAN
raw_bucket = int((float(mean_time) / float(master_mean_time) - 1) * 10)
bucket = max(0, min(6, raw_bucket))
return COLOR_VALUES[bucket]
def _get_cleaned_mean_time(time: Optional[int], scaling: int) -> str:
"""Return cleaned data"""
if time is not None:
return str(int(int(time) / scaling))
else:
return NOT_A_NUMBER
def _cleaned_title(raw_heading: str) -> str:
"""Return cleaned title of artifact name."""
return raw_heading.replace('test_', '').replace('_', ' ').title()
def is_test_unstable(run_stats):
mean = run_stats.get('mean_time', 1e20)
return mean != 0 and run_stats.get('std_time', 0.0) / mean > STD_MEAN_THRESHOLD
def _get_table_header(raw_data: List[Dict[str, Any]]) -> Tuple[str, str]:
"""Return metadata table title and table separator."""
titles = {}
for test_run in raw_data:
for name in test_run['metadata']:
titles[name] = []
break
separators = []
for result in raw_data:
separators.append('---:')
for field in titles:
if 'metadata' in result:
value = result['metadata'].get(field, 'N/A')
titles[field].append(f'**{value}**')
else:
titles[field].append('**N/A**')
final = []
for title, values in titles.items():
final.append(f'| **{title}** | {' | '.join(values)} |\n')
header = f'{final[0]}| :---: | {' | '.join(separators)} |\n{''.join(final[1:])}'
return header
def _get_version_list(artifacts_dir: str) -> List[str]:
"""Generates sorted list of all versions found in reports.
Args:
artifacts_dir: Absolute path to artifact directory.
Return: List of versions found in reports.
"""
lv = []
for folder in os.listdir(artifacts_dir):
if os.path.isfile(os.path.join(artifacts_dir, folder, 'report.json')):
lv.append(LooseVersion(folder))
lv.sort()
sorted_dev = [v.vstring for v in lv]
import re
p = re.compile('dev\\d+$')
i = 0
while i + 1 < len(sorted_dev):
tmp = sorted_dev[i]
m = p.search(sorted_dev[i + 1])
if m and sorted_dev[i + 1].startswith(tmp):
sorted_dev[i] = sorted_dev[i + 1]
sorted_dev[i + 1] = tmp
i += 1
version_list = [sorted_dev[i - 1] for i in range(len(sorted_dev), 0, -1)]
return version_list
def _get_cum_data(version_list: List[str], artifacts_dir: str) -> Dict[Any, Any]:
"""Generates cumulative data and return in a dict.
Args:
version_list: List of versions found in reports.
artifacts_dir: Absolute path to artifact directory.
Return: Dict of cumulative data
"""
data: Dict[Any, Any] = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
)
for version in version_list:
report_file = os.path.join(artifacts_dir, version, 'report.json')
searchers_compare_file = os.path.join(
artifacts_dir, version, 'searchers_compare.json'
)
if os.path.isfile(report_file):
with open(report_file) as fp:
_raw_data = json.load(fp)
if os.path.isfile(searchers_compare_file):
with open(searchers_compare_file) as fp:
_raw_data.extend(json.load(fp))
for i in _raw_data:
page = i.get('page', 'unsorted_tests')
test_name = i['name']
metadata_hash = _hash_run(i)
data[page][test_name][version][metadata_hash] = i
return data
def generate_homepage(output_dir: str) -> None:
"""This generate required homepage for the website.
Args:
output_dir: Absolute path to Hugo content directory.
"""
src = os.path.join(os.getcwd(), 'README.md')
dst = os.path.join(output_dir, '_index.md')
Path(output_dir).mkdir(parents=True, exist_ok=True)
if os.path.isfile(src):
with open(src) as f:
data = f.read()
with open(dst, 'w') as fp:
fp.write('---\n')
fp.write('title: Benchmark Jina\n')
fp.write('type: docs\n')
fp.write('---\n')
fp.write(data)
def _hash_run(d):
tmp_dict = copy.deepcopy(d)
tmp_dict.pop('mean_time', None)
tmp_dict.pop('std_time', None)
tmp_dict.pop('iterations', None)
tmp_dict.pop('results', None)
return json.dumps(tmp_dict, sort_keys=True)
def _get_stats(test_data, latest_version):
results = defaultdict(dict)
for version, test_results in test_data.items():
for test_result in test_results.values():
parameter_hash = _hash_run(test_result)
metadata = test_result.get('metadata', {})
if not metadata:
metadata = {'name': test_result['name']}
results[parameter_hash]['metadata'] = metadata
results[parameter_hash]['min'] = min(
results[parameter_hash].get('min', 1e20), test_result['mean_time']
)
results[parameter_hash]['max'] = max(
results[parameter_hash].get('max', 0), test_result['mean_time']
)
results[parameter_hash]['parameter_hash'] = parameter_hash
if version == latest_version:
results[parameter_hash]['last_version_mean'] = test_result['mean_time']
stats = list(results.values())
_add_scaling(stats)
return stats
def _get_one_version_stats(test_results):
results = defaultdict(lambda x: 1e20)
results['min_mean_docs_per_sec'] = 0
for test in test_results:
results['min_time'] = min(results['min_time'], test['mean_time'])
results['min_memory'] = min(results['min_memory'], test['mean_memory'])
results['min_indexer_memory'] = min(
results['min_indexer_memory'], test['mean_indexer_memory']
)
results['min_mean_docs_per_sec'] = max(
results['min_mean_docs_per_sec'], test['mean_mean_docs_per_sec']
)
results['min_latency'] = min(results['min_latency'], test['mean_latency'])
return results
def _add_scaling(stats):
for run_stats in stats:
if run_stats['min'] > 10_000_000_000:
run_stats['scaling'] = 1_000_000_000
run_stats['metadata']['unit'] = 's'
if run_stats['min'] > 10_000_000:
run_stats['scaling'] = 1_000_000
run_stats['metadata']['unit'] = 'ms'
elif run_stats['min'] > 10_000:
run_stats['scaling'] = 1_000
run_stats['metadata']['unit'] = 'μs'
else:
run_stats['scaling'] = 1
run_stats['metadata']['unit'] = 'ns'
run_stats['min'] = int(run_stats['min'] / run_stats['scaling'])
run_stats['max'] = int(run_stats['max'] / run_stats['scaling'])
def generate_docs(
version_list: List[str], cum_data: Dict[Any, Any], output_dir: str
) -> None:
"""This generate required docs from artifacts.
Args:
version_list: List of versions found in reports.
cum_data: Cumulative data in Dict.
output_dir: Absolute path to Hugo docs directory.
"""
Path(output_dir).mkdir(parents=True, exist_ok=True)
for page, page_data in cum_data.items():
output_file = os.path.join(output_dir, f'{page}.md')
if page == 'indexer_comparison':
generate_comparison_test(page_data, output_file, _cleaned_title(page))
else:
generate_versioned_test(page_data, output_file, _cleaned_title(page))
def _get_last_version(single_test_data):
versions = list(single_test_data.keys())
if versions:
return max(versions)
else:
return None
def generate_versioned_test(page_data, output_file, title):
with open(output_file, 'w') as fp:
fp.write('---\n')
fp.write(f'title: {title}\n')
fp.write('---\n')
fp.write(f'# {title}\n\n')
fp.write(f'{LEGEND}\n')
for test_name, single_test_data in page_data.items():
latest_version = _get_last_version(single_test_data)
if latest_version is None:
return
stats = _get_stats(single_test_data, latest_version)
header = _get_table_header(stats)
fp.write(f'## {_cleaned_title(test_name)}\n')
fp.write(header)
for version, data_dict in single_test_data.items():
fp.write(f'| {version} |')
for run in stats:
run_data = data_dict[run['parameter_hash']]
mean_time = _get_cleaned_mean_time(
run_data.get('mean_time', None), run['scaling']
)
color = _get_color(mean_time, run['min'])
if is_test_unstable(run_data):
mean_time = f'<s>{mean_time}</s>'
fp.write(f' <span style="color:{color};">{mean_time}</span> |')
fp.write('\n')
fp.write('\n')
def generate_comparison_test(page_data, output_file, title):
with open(output_file, 'w') as fp:
fp.write('---\n')
fp.write(f'title: {title}\n')
fp.write('---\n')
fp.write(f'# {title}\n\n')
for test_name, single_test_data in page_data.items():
latest_version = _get_last_version(single_test_data)
if latest_version is None:
continue
table = []
test_data = single_test_data[latest_version]
header = _get_table_header(list(test_data.values()))
fp.write(f'## {_cleaned_title(test_name)}\n')
fp.write(f'Tests were performed against Jina {latest_version}.\n\n')
fp.write(header)
table.append(
[
'index time in ms',
'search time in ms',
'index memory',
'search memory',
'p90 in ms',
'p99 in ms',
'RPS',
'Documents per second',
]
)
for run in test_data.values():
table.append(
[
_get_cleaned_mean_time(run['results']['mean_index_time'], 1e6),
_get_cleaned_mean_time(run['results']['mean_search_time'], 1e6),
get_readable_size(run['results']['mean_search_memory']),
get_readable_size(run['results']['mean_index_memory']),
_get_cleaned_mean_time(run['results']['p90'], 1e6),
_get_cleaned_mean_time(run['results']['p99'], 1e6),
get_rps(run),
get_dps(run),
]
)
transposed = list(map(list, zip(*table)))
fp.write('|\n|'.join(' | '.join(row) for row in transposed))
fp.write('\n\n')
def get_dps(run):
total_docs = run['metadata']['docs_per_request'] * run['metadata']['num_requests']
dps = total_docs / (run['results']['mean_search_time'] / 1e9)
return f'{dps:.2f}'
def get_rps(run):
rps = run['metadata']['num_requests'] / (run['results']['mean_search_time'] / 1e9)
return f'{rps:.2f}'
def get_readable_size(num_bytes: Union[int, float]) -> str:
"""
Transform the bytes into readable value with different units (e.g. 1 KB, 20 MB, 30.1 GB).
:param num_bytes: Number of bytes.
:return: Human readable string representation.
"""
num_bytes = int(num_bytes)
if num_bytes < 1024:
return f'{num_bytes} Bytes'
elif num_bytes < 1024 ** 2:
return f'{num_bytes / 1024:.1f} KB'
elif num_bytes < 1024 ** 3:
return f'{num_bytes / (1024 ** 2):.1f} MB'
else:
return f'{num_bytes / (1024 ** 3):.1f} GB'
def generate_menus(cum_data: Dict[Any, Any], output_dir: str) -> None:
"""This generate required menus from artifacts.
Args:
cum_data: Cumulative data in Dict.
output_dir: Absolute path to Hugo menus directory.
"""
menu_dir = os.path.join(output_dir, 'menu')
menu_index = os.path.join(menu_dir, 'index.md')
Path(menu_dir).mkdir(parents=True, exist_ok=True)
with open(menu_index, 'w') as fp:
fp.write('---\n')
fp.write('headless: true\n')
fp.write('---\n\n')
for page in cum_data:
fp.write(
'- [%s]({{< relref "/docs/%s.md" >}})\n' % (_cleaned_title(page), page)
)
def main():
"""This is the main function to call."""
base_dir = os.path.join(os.getcwd(), 'docs')
content_dir = os.path.join(base_dir, 'content')
docs_dir = os.path.join(content_dir, 'docs')
artifacts_dir = os.path.join(base_dir, 'static/artifacts')
version_list = _get_version_list(artifacts_dir)
cum_data = _get_cum_data(version_list, artifacts_dir)
generate_homepage(content_dir)
generate_docs(version_list, cum_data, docs_dir)
generate_menus(cum_data, content_dir)
if __name__ == '__main__':
main()
| #!/usr/bin/python3
import json
import os
import copy
from collections import defaultdict
from distutils.version import LooseVersion
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union, Optional
COLOR_VALUES = [
'#10a100',
'#7ead14',
'#bab73c',
'#e8c268',
'#e59838',
'#e36717',
'#de1414',
]
COLOR_NAN = '#9b00a1'
NOT_A_NUMBER = 'N/A'
STD_MEAN_THRESHOLD = 0.5
COLOR_LEGEND = ' | '.join(
[
f'<span style="color:{color};">{i*10} - {(i+1)*10}%</span>'
for i, color in enumerate(COLOR_VALUES)
]
)
LEGEND = f"""
The following data should be read as follows:
- Colors of cells display the percentage of the minimum value in the column:\n
{COLOR_LEGEND}
- <s>1337</s>: unstable tests with "standard deviation / mean > {STD_MEAN_THRESHOLD}"
"""
def _format(data: Union[int, float]) -> Any:
if isinstance(data, bool):
return str(data)
elif isinstance(data, int) or isinstance(data, float):
if data >= 1000:
_data = data
i = 0
while abs(_data) >= 1000:
i += 1
_data /= 1000
if isinstance(data, int):
return '%d%s' % (_data, ['', 'K', 'M', 'G', 'T', 'P'][i])
else:
return '%.2f%s' % (_data, ['', 'K', 'M', 'G', 'T', 'P'][i])
else:
i = 1
_data = round(data, i)
while _data == 0 and i <= 5:
i += 1
_data = round(data, i)
return _data
else:
return data
def _get_color(mean_time, master_mean_time):
if mean_time is None or mean_time == NOT_A_NUMBER or master_mean_time == 0:
return COLOR_NAN
raw_bucket = int((float(mean_time) / float(master_mean_time) - 1) * 10)
bucket = max(0, min(6, raw_bucket))
return COLOR_VALUES[bucket]
def _get_cleaned_mean_time(time: Optional[int], scaling: int) -> str:
"""Return cleaned data"""
if time is not None:
return str(int(int(time) / scaling))
else:
return NOT_A_NUMBER
def _cleaned_title(raw_heading: str) -> str:
"""Return cleaned title of artifact name."""
return raw_heading.replace('test_', '').replace('_', ' ').title()
def is_test_unstable(run_stats):
mean = run_stats.get('mean_time', 1e20)
return mean != 0 and run_stats.get('std_time', 0.0) / mean > STD_MEAN_THRESHOLD
def _get_table_header(raw_data: List[Dict[str, Any]]) -> Tuple[str, str]:
"""Return metadata table title and table separator."""
titles = {}
for test_run in raw_data:
for name in test_run['metadata']:
titles[name] = []
break
separators = []
for result in raw_data:
separators.append('---:')
for field in titles:
if 'metadata' in result:
value = result['metadata'].get(field, 'N/A')
titles[field].append(f'**{value}**')
else:
titles[field].append('**N/A**')
final = []
for title, values in titles.items():
final.append(f'| **{title}** | {" | ".join(values)} |\n')
header = f'{final[0]}| :---: | {" | ".join(separators)} |\n{"".join(final[1:])}'
return header
def _get_version_list(artifacts_dir: str) -> List[str]:
"""Generates sorted list of all versions found in reports.
Args:
artifacts_dir: Absolute path to artifact directory.
Return: List of versions found in reports.
"""
lv = []
for folder in os.listdir(artifacts_dir):
if os.path.isfile(os.path.join(artifacts_dir, folder, 'report.json')):
lv.append(LooseVersion(folder))
lv.sort()
sorted_dev = [v.vstring for v in lv]
import re
p = re.compile('dev\\d+$')
i = 0
while i + 1 < len(sorted_dev):
tmp = sorted_dev[i]
m = p.search(sorted_dev[i + 1])
if m and sorted_dev[i + 1].startswith(tmp):
sorted_dev[i] = sorted_dev[i + 1]
sorted_dev[i + 1] = tmp
i += 1
version_list = [sorted_dev[i - 1] for i in range(len(sorted_dev), 0, -1)]
return version_list
def _get_cum_data(version_list: List[str], artifacts_dir: str) -> Dict[Any, Any]:
"""Generates cumulative data and return in a dict.
Args:
version_list: List of versions found in reports.
artifacts_dir: Absolute path to artifact directory.
Return: Dict of cumulative data
"""
data: Dict[Any, Any] = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
)
for version in version_list:
report_file = os.path.join(artifacts_dir, version, 'report.json')
searchers_compare_file = os.path.join(
artifacts_dir, version, 'searchers_compare.json'
)
if os.path.isfile(report_file):
with open(report_file) as fp:
_raw_data = json.load(fp)
if os.path.isfile(searchers_compare_file):
with open(searchers_compare_file) as fp:
_raw_data.extend(json.load(fp))
for i in _raw_data:
page = i.get('page', 'unsorted_tests')
test_name = i['name']
metadata_hash = _hash_run(i)
data[page][test_name][version][metadata_hash] = i
return data
def generate_homepage(output_dir: str) -> None:
"""This generate required homepage for the website.
Args:
output_dir: Absolute path to Hugo content directory.
"""
src = os.path.join(os.getcwd(), 'README.md')
dst = os.path.join(output_dir, '_index.md')
Path(output_dir).mkdir(parents=True, exist_ok=True)
if os.path.isfile(src):
with open(src) as f:
data = f.read()
with open(dst, 'w') as fp:
fp.write('---\n')
fp.write('title: Benchmark Jina\n')
fp.write('type: docs\n')
fp.write('---\n')
fp.write(data)
def _hash_run(d):
tmp_dict = copy.deepcopy(d)
tmp_dict.pop('mean_time', None)
tmp_dict.pop('std_time', None)
tmp_dict.pop('iterations', None)
tmp_dict.pop('results', None)
return json.dumps(tmp_dict, sort_keys=True)
def _get_stats(test_data, latest_version):
results = defaultdict(dict)
for version, test_results in test_data.items():
for test_result in test_results.values():
parameter_hash = _hash_run(test_result)
metadata = test_result.get('metadata', {})
if not metadata:
metadata = {'name': test_result['name']}
results[parameter_hash]['metadata'] = metadata
results[parameter_hash]['min'] = min(
results[parameter_hash].get('min', 1e20), test_result['mean_time']
)
results[parameter_hash]['max'] = max(
results[parameter_hash].get('max', 0), test_result['mean_time']
)
results[parameter_hash]['parameter_hash'] = parameter_hash
if version == latest_version:
results[parameter_hash]['last_version_mean'] = test_result['mean_time']
stats = list(results.values())
_add_scaling(stats)
return stats
def _get_one_version_stats(test_results):
results = defaultdict(lambda x: 1e20)
results['min_mean_docs_per_sec'] = 0
for test in test_results:
results['min_time'] = min(results['min_time'], test['mean_time'])
results['min_memory'] = min(results['min_memory'], test['mean_memory'])
results['min_indexer_memory'] = min(
results['min_indexer_memory'], test['mean_indexer_memory']
)
results['min_mean_docs_per_sec'] = max(
results['min_mean_docs_per_sec'], test['mean_mean_docs_per_sec']
)
results['min_latency'] = min(results['min_latency'], test['mean_latency'])
return results
def _add_scaling(stats):
for run_stats in stats:
if run_stats['min'] > 10_000_000_000:
run_stats['scaling'] = 1_000_000_000
run_stats['metadata']['unit'] = 's'
if run_stats['min'] > 10_000_000:
run_stats['scaling'] = 1_000_000
run_stats['metadata']['unit'] = 'ms'
elif run_stats['min'] > 10_000:
run_stats['scaling'] = 1_000
run_stats['metadata']['unit'] = 'μs'
else:
run_stats['scaling'] = 1
run_stats['metadata']['unit'] = 'ns'
run_stats['min'] = int(run_stats['min'] / run_stats['scaling'])
run_stats['max'] = int(run_stats['max'] / run_stats['scaling'])
def generate_docs(
version_list: List[str], cum_data: Dict[Any, Any], output_dir: str
) -> None:
"""This generate required docs from artifacts.
Args:
version_list: List of versions found in reports.
cum_data: Cumulative data in Dict.
output_dir: Absolute path to Hugo docs directory.
"""
Path(output_dir).mkdir(parents=True, exist_ok=True)
for page, page_data in cum_data.items():
output_file = os.path.join(output_dir, f'{page}.md')
if page == 'indexer_comparison':
generate_comparison_test(page_data, output_file, _cleaned_title(page))
else:
generate_versioned_test(page_data, output_file, _cleaned_title(page))
def _get_last_version(single_test_data):
versions = list(single_test_data.keys())
if versions:
return max(versions)
else:
return None
def generate_versioned_test(page_data, output_file, title):
with open(output_file, 'w') as fp:
fp.write('---\n')
fp.write(f'title: {title}\n')
fp.write('---\n')
fp.write(f'# {title}\n\n')
fp.write(f'{LEGEND}\n')
for test_name, single_test_data in page_data.items():
latest_version = _get_last_version(single_test_data)
if latest_version is None:
return
stats = _get_stats(single_test_data, latest_version)
header = _get_table_header(stats)
fp.write(f'## {_cleaned_title(test_name)}\n')
fp.write(header)
for version, data_dict in single_test_data.items():
fp.write(f'| {version} |')
for run in stats:
run_data = data_dict[run['parameter_hash']]
mean_time = _get_cleaned_mean_time(
run_data.get('mean_time', None), run['scaling']
)
color = _get_color(mean_time, run['min'])
if is_test_unstable(run_data):
mean_time = f'<s>{mean_time}</s>'
fp.write(f' <span style="color:{color};">{mean_time}</span> |')
fp.write('\n')
fp.write('\n')
def generate_comparison_test(page_data, output_file, title):
with open(output_file, 'w') as fp:
fp.write('---\n')
fp.write(f'title: {title}\n')
fp.write('---\n')
fp.write(f'# {title}\n\n')
for test_name, single_test_data in page_data.items():
latest_version = _get_last_version(single_test_data)
if latest_version is None:
continue
table = []
test_data = single_test_data[latest_version]
header = _get_table_header(list(test_data.values()))
fp.write(f'## {_cleaned_title(test_name)}\n')
fp.write(f'Tests were performed against Jina {latest_version}.\n\n')
fp.write(header)
table.append(
[
'index time in ms',
'search time in ms',
'index memory',
'search memory',
'p90 in ms',
'p99 in ms',
'RPS',
'Documents per second',
]
)
for run in test_data.values():
table.append(
[
_get_cleaned_mean_time(run['results']['mean_index_time'], 1e6),
_get_cleaned_mean_time(run['results']['mean_search_time'], 1e6),
get_readable_size(run['results']['mean_search_memory']),
get_readable_size(run['results']['mean_index_memory']),
_get_cleaned_mean_time(run['results']['p90'], 1e6),
_get_cleaned_mean_time(run['results']['p99'], 1e6),
get_rps(run),
get_dps(run),
]
)
transposed = list(map(list, zip(*table)))
fp.write('|\n|'.join(' | '.join(row) for row in transposed))
fp.write('\n\n')
def get_dps(run):
total_docs = run['metadata']['docs_per_request'] * run['metadata']['num_requests']
dps = total_docs / (run['results']['mean_search_time'] / 1e9)
return f'{dps:.2f}'
def get_rps(run):
rps = run['metadata']['num_requests'] / (run['results']['mean_search_time'] / 1e9)
return f'{rps:.2f}'
def get_readable_size(num_bytes: Union[int, float]) -> str:
"""
Transform the bytes into readable value with different units (e.g. 1 KB, 20 MB, 30.1 GB).
:param num_bytes: Number of bytes.
:return: Human readable string representation.
"""
num_bytes = int(num_bytes)
if num_bytes < 1024:
return f'{num_bytes} Bytes'
elif num_bytes < 1024 ** 2:
return f'{num_bytes / 1024:.1f} KB'
elif num_bytes < 1024 ** 3:
return f'{num_bytes / (1024 ** 2):.1f} MB'
else:
return f'{num_bytes / (1024 ** 3):.1f} GB'
def generate_menus(cum_data: Dict[Any, Any], output_dir: str) -> None:
"""This generate required menus from artifacts.
Args:
cum_data: Cumulative data in Dict.
output_dir: Absolute path to Hugo menus directory.
"""
menu_dir = os.path.join(output_dir, 'menu')
menu_index = os.path.join(menu_dir, 'index.md')
Path(menu_dir).mkdir(parents=True, exist_ok=True)
with open(menu_index, 'w') as fp:
fp.write('---\n')
fp.write('headless: true\n')
fp.write('---\n\n')
for page in cum_data:
fp.write(
'- [%s]({{< relref "/docs/%s.md" >}})\n' % (_cleaned_title(page), page)
)
def main():
"""This is the main function to call."""
base_dir = os.path.join(os.getcwd(), 'docs')
content_dir = os.path.join(base_dir, 'content')
docs_dir = os.path.join(content_dir, 'docs')
artifacts_dir = os.path.join(base_dir, 'static/artifacts')
version_list = _get_version_list(artifacts_dir)
cum_data = _get_cum_data(version_list, artifacts_dir)
generate_homepage(content_dir)
generate_docs(version_list, cum_data, docs_dir)
generate_menus(cum_data, content_dir)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# There are tests here with unicode string literals and
# identifiers. There's a code in ast.c that was added because of a
# failure with a non-ascii-only expression. So, I have tests for
# that. There are workarounds that would let me run tests for that
# code without unicode identifiers and strings, but just using them
# directly seems like the easiest and therefore safest thing to do.
# Unicode identifiers in tests is allowed by PEP 3131.
import ast
import os
import re
import types
import decimal
import unittest
<<<<<<< HEAD
from test.support.os_helper import temp_cwd
=======
from test.support import temp_cwd, use_old_parser
>>>>>>> 3.9
from test.support.script_helper import assert_python_failure
a_global = 'global variable'
# You could argue that I'm too strict in looking for specific error
# values with assertRaisesRegex, but without it it's way too easy to
# make a syntax error in the test strings. Especially with all of the
# triple quotes, raw strings, backslashes, etc. I think it's a
# worthwhile tradeoff. When I switched to this method, I found many
# examples where I wasn't testing what I thought I was.
class TestCase(unittest.TestCase):
def assertAllRaise(self, exception_type, regex, error_strings):
for str in error_strings:
with self.subTest(str=str):
with self.assertRaisesRegex(exception_type, regex):
eval(str)
def test__format__lookup(self):
# Make sure __format__ is looked up on the type, not the instance.
class X:
def __format__(self, spec):
return 'class'
x = X()
# Add a bound __format__ method to the 'y' instance, but not
# the 'x' instance.
y = X()
y.__format__ = types.MethodType(lambda self, spec: 'instance', y)
self.assertEqual(f'{y}', format(y))
self.assertEqual(f'{y}', 'class')
self.assertEqual(format(x), format(y))
# __format__ is not called this way, but still make sure it
# returns what we expect (so we can make sure we're bypassing
# it).
self.assertEqual(x.__format__(''), 'class')
self.assertEqual(y.__format__(''), 'instance')
# This is how __format__ is actually called.
self.assertEqual(type(x).__format__(x, ''), 'class')
self.assertEqual(type(y).__format__(y, ''), 'class')
def test_ast(self):
# Inspired by http://bugs.python.org/issue24975
class X:
def __init__(self):
self.called = False
def __call__(self):
self.called = True
return 4
x = X()
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
# Make sure x was not called.
self.assertFalse(x.called)
# Actually run the code.
exec(c)
# Make sure x was called.
self.assertTrue(x.called)
def test_ast_line_numbers(self):
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_multiple_formattedvalues(self):
expr = """
f'no formatted values'
f'eggs {a * x()} spam {b + y()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `f'no formatted value'`
self.assertEqual(type(t.body[0]), ast.Expr)
self.assertEqual(type(t.body[0].value), ast.JoinedStr)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 4)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(type(t.body[1].value.values[3]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
# check the first binop location
binop1 = t.body[1].value.values[1].value
self.assertEqual(type(binop1), ast.BinOp)
self.assertEqual(type(binop1.left), ast.Name)
self.assertEqual(type(binop1.op), ast.Mult)
self.assertEqual(type(binop1.right), ast.Call)
self.assertEqual(binop1.lineno, 3)
self.assertEqual(binop1.left.lineno, 3)
self.assertEqual(binop1.right.lineno, 3)
self.assertEqual(binop1.col_offset, 8)
self.assertEqual(binop1.left.col_offset, 8)
self.assertEqual(binop1.right.col_offset, 12)
# check the second binop location
binop2 = t.body[1].value.values[3].value
self.assertEqual(type(binop2), ast.BinOp)
self.assertEqual(type(binop2.left), ast.Name)
self.assertEqual(type(binop2.op), ast.Add)
self.assertEqual(type(binop2.right), ast.Call)
self.assertEqual(binop2.lineno, 3)
self.assertEqual(binop2.left.lineno, 3)
self.assertEqual(binop2.right.lineno, 3)
self.assertEqual(binop2.col_offset, 23)
self.assertEqual(binop2.left.col_offset, 23)
self.assertEqual(binop2.right.col_offset, 27)
def test_ast_line_numbers_nested(self):
expr = """
a = 10
f'{a * f'-{x()}-'}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.JoinedStr)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the nested call location
self.assertEqual(len(binop.right.values), 3)
self.assertEqual(type(binop.right.values[0]), ast.Constant)
self.assertEqual(type(binop.right.values[0].value), str)
self.assertEqual(type(binop.right.values[1]), ast.FormattedValue)
self.assertEqual(type(binop.right.values[2]), ast.Constant)
self.assertEqual(type(binop.right.values[2].value), str)
self.assertEqual(binop.right.values[0].lineno, 3)
self.assertEqual(binop.right.values[1].lineno, 3)
self.assertEqual(binop.right.values[2].lineno, 3)
call = binop.right.values[1].value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 3)
self.assertEqual(call.col_offset, 11)
def test_ast_line_numbers_duplicate_expression(self):
"""Duplicate expression
NOTE: this is currently broken, always sets location of the first
expression.
"""
expr = """
a = 10
f'{a * x()} {a * x()} {a * x()}'
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 5)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[1]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[1].value), str)
self.assertEqual(type(t.body[1].value.values[2]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[3]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[3].value), str)
self.assertEqual(type(t.body[1].value.values[4]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
self.assertEqual(t.body[1].value.values[4].lineno, 3)
# check the first binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the second binop location
binop = t.body[1].value.values[2].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
# check the third binop location
binop = t.body[1].value.values[4].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
def test_ast_line_numbers_multiline_fstring(self):
# See bpo-30465 for details.
expr = """
a = 10
f'''
{a
*
x()}
non-important content
'''
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 3)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].col_offset, 0)
self.assertEqual(t.body[1].value.col_offset, 0)
self.assertEqual(t.body[1].value.values[0].col_offset, 0)
self.assertEqual(t.body[1].value.values[1].col_offset, 0)
self.assertEqual(t.body[1].value.values[2].col_offset, 0)
# NOTE: the following lineno information and col_offset is correct for
# expressions within FormattedValues.
binop = t.body[1].value.values[1].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 4)
self.assertEqual(binop.left.lineno, 4)
self.assertEqual(binop.right.lineno, 6)
self.assertEqual(binop.col_offset, 4)
self.assertEqual(binop.left.col_offset, 4)
self.assertEqual(binop.right.col_offset, 7)
def test_docstring(self):
def f():
f'''Not a docstring'''
self.assertIsNone(f.__doc__)
def g():
'''Not a docstring''' \
f''
self.assertIsNone(g.__doc__)
def test_literal_eval(self):
with self.assertRaisesRegex(ValueError, 'malformed node or string'):
ast.literal_eval("f'x'")
def test_ast_compile_time_concat(self):
x = ['']
expr = """x[0] = 'foo' f'{3}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
exec(c)
self.assertEqual(x[0], 'foo3')
def test_compile_time_concat_errors(self):
self.assertAllRaise(SyntaxError,
'cannot mix bytes and nonbytes literals',
[r"""f'' b''""",
r"""b'' f''""",
])
def test_literal(self):
self.assertEqual(f'', '')
self.assertEqual(f'a', 'a')
self.assertEqual(f' ', ' ')
def test_unterminated_string(self):
self.assertAllRaise(SyntaxError, 'f-string: unterminated string',
[r"""f'{"x'""",
r"""f'{"x}'""",
r"""f'{("x'""",
r"""f'{("x}'""",
])
def test_mismatched_parens(self):
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{((}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\)' "
r"does not match opening parenthesis '\['",
["f'{a[4)}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\]' "
r"does not match opening parenthesis '\('",
["f'{a(4]}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\['",
["f'{a[4}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{a(4}'",
])
self.assertRaises(SyntaxError, eval, "f'{' + '('*500 + '}'")
def test_double_braces(self):
self.assertEqual(f'{{', '{')
self.assertEqual(f'a{{', 'a{')
self.assertEqual(f'{{b', '{b')
self.assertEqual(f'a{{b', 'a{b')
self.assertEqual(f'}}', '}')
self.assertEqual(f'a}}', 'a}')
self.assertEqual(f'}}b', '}b')
self.assertEqual(f'a}}b', 'a}b')
self.assertEqual(f'{{}}', '{}')
self.assertEqual(f'a{{}}', 'a{}')
self.assertEqual(f'{{b}}', '{b}')
self.assertEqual(f'{{}}c', '{}c')
self.assertEqual(f'a{{b}}', 'a{b}')
self.assertEqual(f'a{{}}c', 'a{}c')
self.assertEqual(f'{{b}}c', '{b}c')
self.assertEqual(f'a{{b}}c', 'a{b}c')
self.assertEqual(f'{{{10}', '{10')
self.assertEqual(f'}}{10}', '}10')
self.assertEqual(f'}}{{{10}', '}{10')
self.assertEqual(f'}}a{{{10}', '}a{10')
self.assertEqual(f'{10}{{', '10{')
self.assertEqual(f'{10}}}', '10}')
self.assertEqual(f'{10}}}{{', '10}{')
self.assertEqual(f'{10}}}a{{' '}', '10}a{}')
# Inside of strings, don't interpret doubled brackets.
self.assertEqual(f'{'{{}}'}', '{{}}')
self.assertAllRaise(TypeError, 'unhashable type',
["f'{ {{}} }'", # dict in a set
])
def test_compile_time_concat(self):
x = 'def'
self.assertEqual('abc' f'## {x}ghi', 'abc## defghi')
self.assertEqual('abc' f'{x}' 'ghi', 'abcdefghi')
self.assertEqual('abc' f'{x}' 'gh' f'i{x:4}', 'abcdefghidef ')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{x' f'{x}', '{xdef')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{{x}}' f'{x}', '{{x}}def')
self.assertEqual('{{x' f'{x}', '{{xdef')
self.assertEqual('x}}' f'{x}', 'x}}def')
self.assertEqual(f'{x}' 'x}}', 'defx}}')
self.assertEqual(f'{x}' '', 'def')
self.assertEqual('' f'{x}' '', 'def')
self.assertEqual('' f'{x}', 'def')
self.assertEqual(f'{x}' '2', 'def2')
self.assertEqual('1' f'{x}' '2', '1def2')
self.assertEqual('1' f'{x}', '1def')
self.assertEqual(f'{x}' f'-{x}', 'def-def')
self.assertEqual('' f'', '')
self.assertEqual('' f'' '', '')
self.assertEqual('' f'' '' f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'' '', '')
self.assertEqual(f'' '' f'', '')
self.assertEqual(f'' '' f'' '', '')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3' f'}'", # can't concat to get a valid f-string
])
def test_comments(self):
# These aren't comments, since they're in strings.
d = {'#': 'hash'}
self.assertEqual(f'{'#'}', '#')
self.assertEqual(f'{d['#']}', 'hash')
self.assertAllRaise(SyntaxError, "f-string expression part cannot include '#'",
["f'{1#}'", # error because the expression becomes "(1#)"
"f'{3(#)}'",
"f'{#}'",
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{)#}'", # When wrapped in parens, this becomes
# '()#)'. Make sure that doesn't compile.
])
def test_many_expressions(self):
# Create a string with many expressions in it. Note that
# because we have a space in here as a literal, we're actually
# going to use twice as many ast nodes: one for each literal
# plus one for each expression.
def build_fstr(n, extra=''):
return "f'" + ('{x} ' * n) + extra + "'"
x = 'X'
width = 1
# Test around 256.
for i in range(250, 260):
self.assertEqual(eval(build_fstr(i)), (x+' ')*i)
# Test concatenating 2 largs fstrings.
self.assertEqual(eval(build_fstr(255)*256), (x+' ')*(255*256))
s = build_fstr(253, '{x:{width}} ')
self.assertEqual(eval(s), (x+' ')*254)
# Test lots of expressions and constants, concatenated.
s = "f'{1}' 'x' 'y'" * 1024
self.assertEqual(eval(s), '1xy' * 1024)
def test_format_specifier_expressions(self):
width = 10
precision = 4
value = decimal.Decimal('12.34567')
self.assertEqual(f'result: {value:{width}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width!r}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{1}{0:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{ 1}{ 0:0}.{ precision:1}}', 'result: 12.35')
self.assertEqual(f'{10:#{1}0x}', ' 0xa')
self.assertEqual(f'{10:{'#'}1{0}{'x'}}', ' 0xa')
self.assertEqual(f'{-10:-{'#'}1{0}x}', ' -0xa')
self.assertEqual(f'{-10:{'-'}#{1}0{'x'}}', ' -0xa')
self.assertEqual(f'{10:#{3 != {4:5} and width}x}', ' 0xa')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["""f'{'s'!r{':10'}}'""",
# This looks like a nested format spec.
])
<<<<<<< HEAD
self.assertAllRaise(SyntaxError, "f-string: invalid syntax",
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
self.assertAllRaise(SyntaxError, err_msg,
>>>>>>> 3.9
[# Invalid syntax inside a nested spec.
"f'{4:{/5}}'",
])
self.assertAllRaise(SyntaxError, "f-string: expressions nested too deeply",
[# Can't nest format specifiers.
"f'result: {value:{width:{0}}.{precision:1}}'",
])
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
[# No expansion inside conversion or for
# the : or ! itself.
"""f'{'s'!{'r'}}'""",
])
def test_side_effect_order(self):
class X:
def __init__(self):
self.i = 0
def __format__(self, spec):
self.i += 1
return str(self.i)
x = X()
self.assertEqual(f'{x} {x}', '1 2')
def test_missing_expression(self):
self.assertAllRaise(SyntaxError, 'f-string: empty expression not allowed',
["f'{}'",
"f'{ }'"
"f' {} '",
"f'{!r}'",
"f'{ !r}'",
"f'{10:{ }}'",
"f' { } '",
# The Python parser ignores also the following
# whitespace characters in additional to a space.
"f'''{\t\f\r\n}'''",
# Catch the empty expression before the
# invalid conversion.
"f'{!x}'",
"f'{ !xr}'",
"f'{!x:}'",
"f'{!x:a}'",
"f'{ !xr:}'",
"f'{ !xr:a}'",
"f'{!}'",
"f'{:}'",
# We find the empty expression before the
# missing closing brace.
"f'{!'",
"f'{!s:'",
"f'{:'",
"f'{:x'",
])
# Different error message is raised for other whitespace characters.
self.assertAllRaise(SyntaxError, r"invalid non-printable character U\+00A0",
["f'''{\xa0}'''",
"\xa0",
])
def test_parens_in_expressions(self):
self.assertEqual(f'{3,}', '(3,)')
# Add these because when an expression is evaluated, parens
# are added around it. But we shouldn't go from an invalid
# expression to a valid one. The added parens are just
# supposed to allow whitespace (including newlines).
<<<<<<< HEAD
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
self.assertAllRaise(SyntaxError, err_msg,
>>>>>>> 3.9
["f'{,}'",
"f'{,}'", # this is (,), which is an error
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{3)+(4}'",
])
self.assertAllRaise(SyntaxError, 'EOL while scanning string literal',
["f'{\n}'",
])
def test_backslashes_in_string_part(self):
self.assertEqual(f'\t', '\t')
self.assertEqual(r'\t', '\\t')
self.assertEqual(rf'\t', '\\t')
self.assertEqual(f'{2}\t', '2\t')
self.assertEqual(f'{2}\t{3}', '2\t3')
self.assertEqual(f'\t{3}', '\t3')
self.assertEqual(f'\u0394', '\u0394')
self.assertEqual(r'\u0394', '\\u0394')
self.assertEqual(rf'\u0394', '\\u0394')
self.assertEqual(f'{2}\u0394', '2\u0394')
self.assertEqual(f'{2}\u0394{3}', '2\u03943')
self.assertEqual(f'\u0394{3}', '\u03943')
self.assertEqual(f'\U00000394', '\u0394')
self.assertEqual(r'\U00000394', '\\U00000394')
self.assertEqual(rf'\U00000394', '\\U00000394')
self.assertEqual(f'{2}\U00000394', '2\u0394')
self.assertEqual(f'{2}\U00000394{3}', '2\u03943')
self.assertEqual(f'\U00000394{3}', '\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}', '\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}{3}', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}{3}', '\u03943')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}3', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}3', '\u03943')
self.assertEqual(f'\x20', ' ')
self.assertEqual(r'\x20', '\\x20')
self.assertEqual(rf'\x20', '\\x20')
self.assertEqual(f'{2}\x20', '2 ')
self.assertEqual(f'{2}\x20{3}', '2 3')
self.assertEqual(f'\x20{3}', ' 3')
self.assertEqual(f'2\x20', '2 ')
self.assertEqual(f'2\x203', '2 3')
self.assertEqual(f'\x203', ' 3')
with self.assertWarns(DeprecationWarning): # invalid escape sequence
value = eval(r"f'\{6*7}'")
self.assertEqual(value, '\\42')
self.assertEqual(f'\\{6*7}', '\\42')
self.assertEqual(fr'\{6*7}', '\\42')
AMPERSAND = 'spam'
# Get the right unicode character (&), or pick up local variable
# depending on the number of backslashes.
self.assertEqual(f'\N{AMPERSAND}', '&')
self.assertEqual(f'\\N{AMPERSAND}', '\\Nspam')
self.assertEqual(fr'\N{AMPERSAND}', '\\Nspam')
self.assertEqual(f'\\\N{AMPERSAND}', '\\&')
def test_misformed_unicode_character_name(self):
# These test are needed because unicode names are parsed
# differently inside f-strings.
self.assertAllRaise(SyntaxError, r"\(unicode error\) 'unicodeescape' codec can't decode bytes in position .*: malformed \\N character escape",
[r"f'\N'",
r"f'\N{'",
r"f'\N{GREEK CAPITAL LETTER DELTA'",
# Here are the non-f-string versions,
# which should give the same errors.
r"'\N'",
r"'\N{'",
r"'\N{GREEK CAPITAL LETTER DELTA'",
])
def test_no_backslashes_in_expression_part(self):
self.assertAllRaise(SyntaxError, 'f-string expression part cannot include a backslash',
[r"f'{\'a\'}'",
r"f'{\t3}'",
r"f'{\}'",
r"rf'{\'a\'}'",
r"rf'{\t3}'",
r"rf'{\}'",
r"""rf'{'\N{LEFT CURLY BRACKET}'}'""",
r"f'{\n}'",
])
def test_no_escapes_for_braces(self):
"""
Only literal curly braces begin an expression.
"""
# \x7b is '{'.
self.assertEqual(f'\x7b1+1}}', '{1+1}')
self.assertEqual(f'\x7b1+1', '{1+1')
self.assertEqual(f'\u007b1+1', '{1+1')
self.assertEqual(f'\N{LEFT CURLY BRACKET}1+1\N{RIGHT CURLY BRACKET}', '{1+1}')
def test_newlines_in_expressions(self):
self.assertEqual(f'{0}', '0')
self.assertEqual(rf'''{3+
4}''', '7')
def test_lambda(self):
x = 5
self.assertEqual(f'{(lambda y:x*y)('8')!r}', "'88888'")
self.assertEqual(f'{(lambda y:x*y)('8')!r:10}', "'88888' ")
self.assertEqual(f'{(lambda y:x*y)('8'):10}', "88888 ")
# lambda doesn't work without parens, because the colon
# makes the parser think it's a format_spec
<<<<<<< HEAD
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
self.assertAllRaise(SyntaxError, err_msg,
>>>>>>> 3.9
["f'{lambda x:x}'",
])
def test_yield(self):
# Not terribly useful, but make sure the yield turns
# a function into a generator
def fn(y):
f'y:{yield y*2}'
f'{yield}'
g = fn(4)
self.assertEqual(next(g), 8)
self.assertEqual(next(g), None)
def test_yield_send(self):
def fn(x):
yield f'x:{yield (lambda i: x * i)}'
g = fn(10)
the_lambda = next(g)
self.assertEqual(the_lambda(4), 40)
self.assertEqual(g.send('string'), 'x:string')
def test_expressions_with_triple_quoted_strings(self):
self.assertEqual(f"{"""x"""}", 'x')
self.assertEqual(f"{"""eric"s"""}", "eric's")
# Test concatenation within an expression
self.assertEqual(f'{'x' '''eric's''' 'y'}', 'xeric"sy')
self.assertEqual(f'{'x' '''eric's'''}', 'xeric"s')
self.assertEqual(f'{'''eric's''' 'y'}', 'eric"sy')
self.assertEqual(f'{'''x''' '''eric's''' 'y'}', 'xeric"sy')
self.assertEqual(f'{'''x''' '''eric's''' '''y'''}', 'xeric"sy')
self.assertEqual(f'{r'''x''' '''eric's''' '''y'''}', 'xeric"sy')
def test_multiple_vars(self):
x = 98
y = 'abc'
self.assertEqual(f'{x}{y}', '98abc')
self.assertEqual(f'X{x}{y}', 'X98abc')
self.assertEqual(f'{x}X{y}', '98Xabc')
self.assertEqual(f'{x}{y}X', '98abcX')
self.assertEqual(f'X{x}Y{y}', 'X98Yabc')
self.assertEqual(f'X{x}{y}Y', 'X98abcY')
self.assertEqual(f'{x}X{y}Y', '98XabcY')
self.assertEqual(f'X{x}Y{y}Z', 'X98YabcZ')
def test_closure(self):
def outer(x):
def inner():
return f'x:{x}'
return inner
self.assertEqual(outer('987')(), 'x:987')
self.assertEqual(outer(7)(), 'x:7')
def test_arguments(self):
y = 2
def f(x, width):
return f'x={x*y:{width}}'
self.assertEqual(f('foo', 10), 'x=foofoo ')
x = 'bar'
self.assertEqual(f(10, 10), 'x= 20')
def test_locals(self):
value = 123
self.assertEqual(f'v:{value}', 'v:123')
def test_missing_variable(self):
with self.assertRaises(NameError):
f'v:{value}'
def test_missing_format_spec(self):
class O:
def __format__(self, spec):
if not spec:
return '*'
return spec
self.assertEqual(f'{O():x}', 'x')
self.assertEqual(f'{O()}', '*')
self.assertEqual(f'{O():}', '*')
self.assertEqual(f'{3:}', '3')
self.assertEqual(f'{3!s:}', '3')
def test_global(self):
self.assertEqual(f'g:{a_global}', 'g:global variable')
self.assertEqual(f'g:{a_global!r}', "g:'global variable'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:global variable l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'global variable'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:global variable l:'local variable'")
self.assertIn("module 'unittest' from", f'{unittest}')
def test_shadowed_global(self):
a_global = 'really a local'
self.assertEqual(f'g:{a_global}', 'g:really a local')
self.assertEqual(f'g:{a_global!r}', "g:'really a local'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:really a local l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'really a local'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:really a local l:'local variable'")
def test_call(self):
def foo(x):
return 'x=' + str(x)
self.assertEqual(f'{foo(10)}', 'x=10')
def test_nested_fstrings(self):
y = 5
self.assertEqual(f'{f'{0}'*3}', '000')
self.assertEqual(f'{f'{y}'*3}', '555')
def test_invalid_string_prefixes(self):
single_quote_cases = ["fu''",
"uf''",
"Fu''",
"fU''",
"Uf''",
"uF''",
"ufr''",
"urf''",
"fur''",
"fru''",
"rfu''",
"ruf''",
"FUR''",
"Fur''",
"fb''",
"fB''",
"Fb''",
"FB''",
"bf''",
"bF''",
"Bf''",
"BF''",]
double_quote_cases = [case.replace("'", '"') for case in single_quote_cases]
error_msg = (
'invalid syntax'
if use_old_parser()
else 'unexpected EOF while parsing'
)
self.assertAllRaise(SyntaxError, error_msg,
single_quote_cases + double_quote_cases)
def test_leading_trailing_spaces(self):
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]}}',
'expr={1: 2}')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]} }',
'expr={1: 2}')
def test_not_equal(self):
# There's a special test for this because there's a special
# case in the f-string parser to look for != as not ending an
# expression. Normally it would, while looking for !s or !r.
self.assertEqual(f'{3!=4}', 'True')
self.assertEqual(f'{3!=4:}', 'True')
self.assertEqual(f'{3!=4!s}', 'True')
self.assertEqual(f'{3!=4!s:.3}', 'Tru')
def test_equal_equal(self):
# Because an expression ending in = has special meaning,
# there's a special test for ==. Make sure it works.
self.assertEqual(f'{0==1}', 'False')
def test_conversions(self):
self.assertEqual(f'{3.14:10.10}', ' 3.14')
self.assertEqual(f'{3.14!s:10.10}', '3.14 ')
self.assertEqual(f'{3.14!r:10.10}', '3.14 ')
self.assertEqual(f'{3.14!a:10.10}', '3.14 ')
self.assertEqual(f'{'a'}', 'a')
self.assertEqual(f'{'a'!r}', "'a'")
self.assertEqual(f'{'a'!a}', "'a'")
# Not a conversion.
self.assertEqual(f'{'a!r'}', "a!r")
# Not a conversion, but show that ! is allowed in a format spec.
self.assertEqual(f'{3.14:!<10.10}', '3.14!!!!!!')
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
["f'{3!g}'",
"f'{3!A}'",
"f'{3!3}'",
"f'{3!G}'",
"f'{3!!}'",
"f'{3!:}'",
"f'{3! s}'", # no space before conversion char
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{x!s{y}}'",
"f'{3!ss}'",
"f'{3!ss:}'",
"f'{3!ss:s}'",
])
def test_assignment(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["f'' = 3",
"f'{0}' = x",
"f'{x}' = x",
])
def test_del(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["del f''",
"del '' f''",
])
def test_mismatched_braces(self):
self.assertAllRaise(SyntaxError, "f-string: single '}' is not allowed",
["f'{{}'",
"f'{{}}}'",
"f'}'",
"f'x}'",
"f'x}x'",
r"f'\u007b}'",
# Can't have { or } in a format spec.
"f'{3:}>10}'",
"f'{3:}}>10}'",
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3:{{>10}'",
"f'{3'",
"f'{3!'",
"f'{3:'",
"f'{3!s'",
"f'{3!s:'",
"f'{3!s:3'",
"f'x{'",
"f'x{x'",
"f'{x'",
"f'{3:s'",
"f'{{{'",
"f'{{}}{'",
"f'{'",
])
# But these are just normal strings.
self.assertEqual(f'{'{'}', '{')
self.assertEqual(f'{'}'}', '}')
self.assertEqual(f'{3:{'}'}>10}', '}}}}}}}}}3')
self.assertEqual(f'{2:{'{'}>10}', '{{{{{{{{{2')
def test_if_conditional(self):
# There's special logic in compile.c to test if the
# conditional for an if (and while) are constants. Exercise
# that code.
def test_fstring(x, expected):
flag = 0
if f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_empty(x, expected):
flag = 0
if '' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_non_empty(x, expected):
flag = 0
if ' ' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
test_fstring('', 2)
test_fstring(' ', 1)
test_concat_empty('', 2)
test_concat_empty(' ', 1)
test_concat_non_empty('', 1)
test_concat_non_empty(' ', 1)
def test_empty_format_specifier(self):
x = 'test'
self.assertEqual(f'{x}', 'test')
self.assertEqual(f'{x:}', 'test')
self.assertEqual(f'{x!s:}', 'test')
self.assertEqual(f'{x!r:}', "'test'")
def test_str_format_differences(self):
d = {'a': 'string',
0: 'integer',
}
a = 0
self.assertEqual(f'{d[0]}', 'integer')
self.assertEqual(f'{d['a']}', 'string')
self.assertEqual(f'{d[a]}', 'integer')
self.assertEqual('{d[a]}'.format(d=d), 'string')
self.assertEqual('{d[0]}'.format(d=d), 'integer')
def test_errors(self):
# see issue 26287
self.assertAllRaise(TypeError, 'unsupported',
[r"f'{(lambda: 0):x}'",
r"f'{(0,):x}'",
])
self.assertAllRaise(ValueError, 'Unknown format code',
[r"f'{1000:j}'",
r"f'{1000:j}'",
])
<<<<<<< HEAD
=======
@unittest.skipIf(use_old_parser(), "The old parser only supports <fstring> as the filename")
>>>>>>> 3.9
def test_filename_in_syntaxerror(self):
# see issue 38964
with temp_cwd() as cwd:
file_path = os.path.join(cwd, 't.py')
with open(file_path, 'w') as f:
f.write('f"{a b}"') # This generates a SyntaxError
_, _, stderr = assert_python_failure(file_path,
PYTHONIOENCODING='ascii')
self.assertIn(file_path.encode('ascii', 'backslashreplace'), stderr)
def test_loop(self):
for i in range(1000):
self.assertEqual(f'i:{i}', 'i:' + str(i))
def test_dict(self):
d = {'"': 'dquote',
"'": 'squote',
'foo': 'bar',
}
self.assertEqual(f'''{d[''']}''', 'squote')
self.assertEqual(f"""{d["""]}""", 'dquote')
self.assertEqual(f'{d['foo']}', 'bar')
self.assertEqual(f"{d["foo"]}", 'bar')
def test_backslash_char(self):
# Check eval of a backslash followed by a control char.
# See bpo-30682: this used to raise an assert in pydebug mode.
self.assertEqual(eval('f"\\\n"'), '')
self.assertEqual(eval('f"\\\r"'), '')
def test_debug_conversion(self):
x = 'A string'
self.assertEqual(f'{x=}', 'x=' + repr(x))
self.assertEqual(f'{x =}', 'x =' + repr(x))
self.assertEqual(f'{x=!s}', 'x=' + str(x))
self.assertEqual(f'{x=!r}', 'x=' + repr(x))
self.assertEqual(f'{x=!a}', 'x=' + ascii(x))
x = 2.71828
self.assertEqual(f'{x=:.2f}', 'x=' + format(x, '.2f'))
self.assertEqual(f'{x=:}', 'x=' + format(x, ''))
self.assertEqual(f'{x=!r:^20}', 'x=' + format(repr(x), '^20'))
self.assertEqual(f'{x=!s:^20}', 'x=' + format(str(x), '^20'))
self.assertEqual(f'{x=!a:^20}', 'x=' + format(ascii(x), '^20'))
x = 9
self.assertEqual(f'{3*x+15=}', '3*x+15=42')
# There is code in ast.c that deals with non-ascii expression values. So,
# use a unicode identifier to trigger that.
tenπ = 31.4
self.assertEqual(f'{tenπ=:.2f}', 'tenπ=31.40')
# Also test with Unicode in non-identifiers.
self.assertEqual(f'{'Σ'=}', '"Σ"=\'Σ\'')
# Make sure nested fstrings still work.
self.assertEqual(f'{f'{3.1415=:.1f}':*^20}', '*****3.1415=3.1*****')
# Make sure text before and after an expression with = works
# correctly.
pi = 'π'
self.assertEqual(f'alpha α {pi=} ω omega', "alpha α pi='π' ω omega")
# Check multi-line expressions.
self.assertEqual(f'''{
3
=}''', '\n3\n=3')
# Since = is handled specially, make sure all existing uses of
# it still work.
self.assertEqual(f'{0==1}', 'False')
self.assertEqual(f'{0!=1}', 'True')
self.assertEqual(f'{0<=1}', 'True')
self.assertEqual(f'{0>=1}', 'False')
self.assertEqual(f'{(x:='5')}', '5')
self.assertEqual(x, '5')
self.assertEqual(f'{(x:=5)}', '5')
self.assertEqual(x, 5)
self.assertEqual(f'{'='}', '=')
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'. See test_walrus: you need to use parens.
self.assertEqual(f'{x:=10}', ' 20')
# Test named function parameters, to make sure '=' parsing works
# there.
def f(a):
nonlocal x
oldx = x
x = a
return oldx
x = 0
self.assertEqual(f'{f(a='3=')}', '0')
self.assertEqual(x, '3=')
self.assertEqual(f'{f(a=4)}', '3=')
self.assertEqual(x, 4)
# Make sure __format__ is being called.
class C:
def __format__(self, s):
return f'FORMAT-{s}'
def __repr__(self):
return 'REPR'
self.assertEqual(f'{C()=}', 'C()=REPR')
self.assertEqual(f'{C()=!r}', 'C()=REPR')
self.assertEqual(f'{C()=:}', 'C()=FORMAT-')
self.assertEqual(f'{C()=: }', 'C()=FORMAT- ')
self.assertEqual(f'{C()=:x}', 'C()=FORMAT-x')
self.assertEqual(f'{C()=!r:*^20}', 'C()=********REPR********')
self.assertRaises(SyntaxError, eval, "f'{C=]'")
# Make sure leading and following text works.
x = 'foo'
self.assertEqual(f'X{x=}Y', 'Xx='+repr(x)+'Y')
# Make sure whitespace around the = works.
self.assertEqual(f'X{x =}Y', 'Xx ='+repr(x)+'Y')
self.assertEqual(f'X{x= }Y', 'Xx= '+repr(x)+'Y')
self.assertEqual(f'X{x = }Y', 'Xx = '+repr(x)+'Y')
# These next lines contains tabs. Backslash escapes don't
# work in f-strings.
# patchcheck doesn't like these tabs. So the only way to test
# this will be to dynamically created and exec the f-strings. But
# that's such a hassle I'll save it for another day. For now, convert
# the tabs to spaces just to shut up patchcheck.
#self.assertEqual(f'X{x =}Y', 'Xx\t='+repr(x)+'Y')
#self.assertEqual(f'X{x = }Y', 'Xx\t=\t'+repr(x)+'Y')
def test_walrus(self):
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'.
self.assertEqual(f'{x:=10}', ' 20')
# This is an assignment expression, which requires parens.
self.assertEqual(f'{(x:=10)}', '10')
self.assertEqual(x, 10)
def test_invalid_syntax_error_message(self):
<<<<<<< HEAD
with self.assertRaisesRegex(SyntaxError, "f-string: invalid syntax"):
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
with self.assertRaisesRegex(SyntaxError, err_msg):
>>>>>>> 3.9
compile("f'{a $ b}'", "?", "exec")
def test_with_two_commas_in_format_specifier(self):
error_msg = re.escape("Cannot specify ',' with ','.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,,}'
def test_with_two_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify '_' with '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:__}'
def test_with_a_commas_and_an_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,_}'
def test_with_an_underscore_and_a_comma_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:_,}'
if __name__ == '__main__':
unittest.main()
| # -*- coding: utf-8 -*-
# There are tests here with unicode string literals and
# identifiers. There's a code in ast.c that was added because of a
# failure with a non-ascii-only expression. So, I have tests for
# that. There are workarounds that would let me run tests for that
# code without unicode identifiers and strings, but just using them
# directly seems like the easiest and therefore safest thing to do.
# Unicode identifiers in tests is allowed by PEP 3131.
import ast
import os
import re
import types
import decimal
import unittest
<<<<<<< HEAD
from test.support.os_helper import temp_cwd
=======
from test.support import temp_cwd, use_old_parser
>>>>>>> 3.9
from test.support.script_helper import assert_python_failure
a_global = 'global variable'
# You could argue that I'm too strict in looking for specific error
# values with assertRaisesRegex, but without it it's way too easy to
# make a syntax error in the test strings. Especially with all of the
# triple quotes, raw strings, backslashes, etc. I think it's a
# worthwhile tradeoff. When I switched to this method, I found many
# examples where I wasn't testing what I thought I was.
class TestCase(unittest.TestCase):
def assertAllRaise(self, exception_type, regex, error_strings):
for str in error_strings:
with self.subTest(str=str):
with self.assertRaisesRegex(exception_type, regex):
eval(str)
def test__format__lookup(self):
# Make sure __format__ is looked up on the type, not the instance.
class X:
def __format__(self, spec):
return 'class'
x = X()
# Add a bound __format__ method to the 'y' instance, but not
# the 'x' instance.
y = X()
y.__format__ = types.MethodType(lambda self, spec: 'instance', y)
self.assertEqual(f'{y}', format(y))
self.assertEqual(f'{y}', 'class')
self.assertEqual(format(x), format(y))
# __format__ is not called this way, but still make sure it
# returns what we expect (so we can make sure we're bypassing
# it).
self.assertEqual(x.__format__(''), 'class')
self.assertEqual(y.__format__(''), 'instance')
# This is how __format__ is actually called.
self.assertEqual(type(x).__format__(x, ''), 'class')
self.assertEqual(type(y).__format__(y, ''), 'class')
def test_ast(self):
# Inspired by http://bugs.python.org/issue24975
class X:
def __init__(self):
self.called = False
def __call__(self):
self.called = True
return 4
x = X()
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
# Make sure x was not called.
self.assertFalse(x.called)
# Actually run the code.
exec(c)
# Make sure x was called.
self.assertTrue(x.called)
def test_ast_line_numbers(self):
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_multiple_formattedvalues(self):
expr = """
f'no formatted values'
f'eggs {a * x()} spam {b + y()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `f'no formatted value'`
self.assertEqual(type(t.body[0]), ast.Expr)
self.assertEqual(type(t.body[0].value), ast.JoinedStr)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 4)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(type(t.body[1].value.values[3]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
# check the first binop location
binop1 = t.body[1].value.values[1].value
self.assertEqual(type(binop1), ast.BinOp)
self.assertEqual(type(binop1.left), ast.Name)
self.assertEqual(type(binop1.op), ast.Mult)
self.assertEqual(type(binop1.right), ast.Call)
self.assertEqual(binop1.lineno, 3)
self.assertEqual(binop1.left.lineno, 3)
self.assertEqual(binop1.right.lineno, 3)
self.assertEqual(binop1.col_offset, 8)
self.assertEqual(binop1.left.col_offset, 8)
self.assertEqual(binop1.right.col_offset, 12)
# check the second binop location
binop2 = t.body[1].value.values[3].value
self.assertEqual(type(binop2), ast.BinOp)
self.assertEqual(type(binop2.left), ast.Name)
self.assertEqual(type(binop2.op), ast.Add)
self.assertEqual(type(binop2.right), ast.Call)
self.assertEqual(binop2.lineno, 3)
self.assertEqual(binop2.left.lineno, 3)
self.assertEqual(binop2.right.lineno, 3)
self.assertEqual(binop2.col_offset, 23)
self.assertEqual(binop2.left.col_offset, 23)
self.assertEqual(binop2.right.col_offset, 27)
def test_ast_line_numbers_nested(self):
expr = """
a = 10
f'{a * f"-{x()}-"}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.JoinedStr)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the nested call location
self.assertEqual(len(binop.right.values), 3)
self.assertEqual(type(binop.right.values[0]), ast.Constant)
self.assertEqual(type(binop.right.values[0].value), str)
self.assertEqual(type(binop.right.values[1]), ast.FormattedValue)
self.assertEqual(type(binop.right.values[2]), ast.Constant)
self.assertEqual(type(binop.right.values[2].value), str)
self.assertEqual(binop.right.values[0].lineno, 3)
self.assertEqual(binop.right.values[1].lineno, 3)
self.assertEqual(binop.right.values[2].lineno, 3)
call = binop.right.values[1].value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 3)
self.assertEqual(call.col_offset, 11)
def test_ast_line_numbers_duplicate_expression(self):
"""Duplicate expression
NOTE: this is currently broken, always sets location of the first
expression.
"""
expr = """
a = 10
f'{a * x()} {a * x()} {a * x()}'
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 5)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[1]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[1].value), str)
self.assertEqual(type(t.body[1].value.values[2]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[3]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[3].value), str)
self.assertEqual(type(t.body[1].value.values[4]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
self.assertEqual(t.body[1].value.values[4].lineno, 3)
# check the first binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the second binop location
binop = t.body[1].value.values[2].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
# check the third binop location
binop = t.body[1].value.values[4].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
def test_ast_line_numbers_multiline_fstring(self):
# See bpo-30465 for details.
expr = """
a = 10
f'''
{a
*
x()}
non-important content
'''
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 3)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].col_offset, 0)
self.assertEqual(t.body[1].value.col_offset, 0)
self.assertEqual(t.body[1].value.values[0].col_offset, 0)
self.assertEqual(t.body[1].value.values[1].col_offset, 0)
self.assertEqual(t.body[1].value.values[2].col_offset, 0)
# NOTE: the following lineno information and col_offset is correct for
# expressions within FormattedValues.
binop = t.body[1].value.values[1].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 4)
self.assertEqual(binop.left.lineno, 4)
self.assertEqual(binop.right.lineno, 6)
self.assertEqual(binop.col_offset, 4)
self.assertEqual(binop.left.col_offset, 4)
self.assertEqual(binop.right.col_offset, 7)
def test_docstring(self):
def f():
f'''Not a docstring'''
self.assertIsNone(f.__doc__)
def g():
'''Not a docstring''' \
f''
self.assertIsNone(g.__doc__)
def test_literal_eval(self):
with self.assertRaisesRegex(ValueError, 'malformed node or string'):
ast.literal_eval("f'x'")
def test_ast_compile_time_concat(self):
x = ['']
expr = """x[0] = 'foo' f'{3}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
exec(c)
self.assertEqual(x[0], 'foo3')
def test_compile_time_concat_errors(self):
self.assertAllRaise(SyntaxError,
'cannot mix bytes and nonbytes literals',
[r"""f'' b''""",
r"""b'' f''""",
])
def test_literal(self):
self.assertEqual(f'', '')
self.assertEqual(f'a', 'a')
self.assertEqual(f' ', ' ')
def test_unterminated_string(self):
self.assertAllRaise(SyntaxError, 'f-string: unterminated string',
[r"""f'{"x'""",
r"""f'{"x}'""",
r"""f'{("x'""",
r"""f'{("x}'""",
])
def test_mismatched_parens(self):
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{((}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\)' "
r"does not match opening parenthesis '\['",
["f'{a[4)}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\]' "
r"does not match opening parenthesis '\('",
["f'{a(4]}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\['",
["f'{a[4}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{a(4}'",
])
self.assertRaises(SyntaxError, eval, "f'{" + "("*500 + "}'")
def test_double_braces(self):
self.assertEqual(f'{{', '{')
self.assertEqual(f'a{{', 'a{')
self.assertEqual(f'{{b', '{b')
self.assertEqual(f'a{{b', 'a{b')
self.assertEqual(f'}}', '}')
self.assertEqual(f'a}}', 'a}')
self.assertEqual(f'}}b', '}b')
self.assertEqual(f'a}}b', 'a}b')
self.assertEqual(f'{{}}', '{}')
self.assertEqual(f'a{{}}', 'a{}')
self.assertEqual(f'{{b}}', '{b}')
self.assertEqual(f'{{}}c', '{}c')
self.assertEqual(f'a{{b}}', 'a{b}')
self.assertEqual(f'a{{}}c', 'a{}c')
self.assertEqual(f'{{b}}c', '{b}c')
self.assertEqual(f'a{{b}}c', 'a{b}c')
self.assertEqual(f'{{{10}', '{10')
self.assertEqual(f'}}{10}', '}10')
self.assertEqual(f'}}{{{10}', '}{10')
self.assertEqual(f'}}a{{{10}', '}a{10')
self.assertEqual(f'{10}{{', '10{')
self.assertEqual(f'{10}}}', '10}')
self.assertEqual(f'{10}}}{{', '10}{')
self.assertEqual(f'{10}}}a{{' '}', '10}a{}')
# Inside of strings, don't interpret doubled brackets.
self.assertEqual(f'{"{{}}"}', '{{}}')
self.assertAllRaise(TypeError, 'unhashable type',
["f'{ {{}} }'", # dict in a set
])
def test_compile_time_concat(self):
x = 'def'
self.assertEqual('abc' f'## {x}ghi', 'abc## defghi')
self.assertEqual('abc' f'{x}' 'ghi', 'abcdefghi')
self.assertEqual('abc' f'{x}' 'gh' f'i{x:4}', 'abcdefghidef ')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{x' f'{x}', '{xdef')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{{x}}' f'{x}', '{{x}}def')
self.assertEqual('{{x' f'{x}', '{{xdef')
self.assertEqual('x}}' f'{x}', 'x}}def')
self.assertEqual(f'{x}' 'x}}', 'defx}}')
self.assertEqual(f'{x}' '', 'def')
self.assertEqual('' f'{x}' '', 'def')
self.assertEqual('' f'{x}', 'def')
self.assertEqual(f'{x}' '2', 'def2')
self.assertEqual('1' f'{x}' '2', '1def2')
self.assertEqual('1' f'{x}', '1def')
self.assertEqual(f'{x}' f'-{x}', 'def-def')
self.assertEqual('' f'', '')
self.assertEqual('' f'' '', '')
self.assertEqual('' f'' '' f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'' '', '')
self.assertEqual(f'' '' f'', '')
self.assertEqual(f'' '' f'' '', '')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3' f'}'", # can't concat to get a valid f-string
])
def test_comments(self):
# These aren't comments, since they're in strings.
d = {'#': 'hash'}
self.assertEqual(f'{"#"}', '#')
self.assertEqual(f'{d["#"]}', 'hash')
self.assertAllRaise(SyntaxError, "f-string expression part cannot include '#'",
["f'{1#}'", # error because the expression becomes "(1#)"
"f'{3(#)}'",
"f'{#}'",
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{)#}'", # When wrapped in parens, this becomes
# '()#)'. Make sure that doesn't compile.
])
def test_many_expressions(self):
# Create a string with many expressions in it. Note that
# because we have a space in here as a literal, we're actually
# going to use twice as many ast nodes: one for each literal
# plus one for each expression.
def build_fstr(n, extra=''):
return "f'" + ('{x} ' * n) + extra + "'"
x = 'X'
width = 1
# Test around 256.
for i in range(250, 260):
self.assertEqual(eval(build_fstr(i)), (x+' ')*i)
# Test concatenating 2 largs fstrings.
self.assertEqual(eval(build_fstr(255)*256), (x+' ')*(255*256))
s = build_fstr(253, '{x:{width}} ')
self.assertEqual(eval(s), (x+' ')*254)
# Test lots of expressions and constants, concatenated.
s = "f'{1}' 'x' 'y'" * 1024
self.assertEqual(eval(s), '1xy' * 1024)
def test_format_specifier_expressions(self):
width = 10
precision = 4
value = decimal.Decimal('12.34567')
self.assertEqual(f'result: {value:{width}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width!r}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{1}{0:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{ 1}{ 0:0}.{ precision:1}}', 'result: 12.35')
self.assertEqual(f'{10:#{1}0x}', ' 0xa')
self.assertEqual(f'{10:{"#"}1{0}{"x"}}', ' 0xa')
self.assertEqual(f'{-10:-{"#"}1{0}x}', ' -0xa')
self.assertEqual(f'{-10:{"-"}#{1}0{"x"}}', ' -0xa')
self.assertEqual(f'{10:#{3 != {4:5} and width}x}', ' 0xa')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["""f'{"s"!r{":10"}}'""",
# This looks like a nested format spec.
])
<<<<<<< HEAD
self.assertAllRaise(SyntaxError, "f-string: invalid syntax",
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
self.assertAllRaise(SyntaxError, err_msg,
>>>>>>> 3.9
[# Invalid syntax inside a nested spec.
"f'{4:{/5}}'",
])
self.assertAllRaise(SyntaxError, "f-string: expressions nested too deeply",
[# Can't nest format specifiers.
"f'result: {value:{width:{0}}.{precision:1}}'",
])
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
[# No expansion inside conversion or for
# the : or ! itself.
"""f'{"s"!{"r"}}'""",
])
def test_side_effect_order(self):
class X:
def __init__(self):
self.i = 0
def __format__(self, spec):
self.i += 1
return str(self.i)
x = X()
self.assertEqual(f'{x} {x}', '1 2')
def test_missing_expression(self):
self.assertAllRaise(SyntaxError, 'f-string: empty expression not allowed',
["f'{}'",
"f'{ }'"
"f' {} '",
"f'{!r}'",
"f'{ !r}'",
"f'{10:{ }}'",
"f' { } '",
# The Python parser ignores also the following
# whitespace characters in additional to a space.
"f'''{\t\f\r\n}'''",
# Catch the empty expression before the
# invalid conversion.
"f'{!x}'",
"f'{ !xr}'",
"f'{!x:}'",
"f'{!x:a}'",
"f'{ !xr:}'",
"f'{ !xr:a}'",
"f'{!}'",
"f'{:}'",
# We find the empty expression before the
# missing closing brace.
"f'{!'",
"f'{!s:'",
"f'{:'",
"f'{:x'",
])
# Different error message is raised for other whitespace characters.
self.assertAllRaise(SyntaxError, r"invalid non-printable character U\+00A0",
["f'''{\xa0}'''",
"\xa0",
])
def test_parens_in_expressions(self):
self.assertEqual(f'{3,}', '(3,)')
# Add these because when an expression is evaluated, parens
# are added around it. But we shouldn't go from an invalid
# expression to a valid one. The added parens are just
# supposed to allow whitespace (including newlines).
<<<<<<< HEAD
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
self.assertAllRaise(SyntaxError, err_msg,
>>>>>>> 3.9
["f'{,}'",
"f'{,}'", # this is (,), which is an error
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{3)+(4}'",
])
self.assertAllRaise(SyntaxError, 'EOL while scanning string literal',
["f'{\n}'",
])
def test_backslashes_in_string_part(self):
self.assertEqual(f'\t', '\t')
self.assertEqual(r'\t', '\\t')
self.assertEqual(rf'\t', '\\t')
self.assertEqual(f'{2}\t', '2\t')
self.assertEqual(f'{2}\t{3}', '2\t3')
self.assertEqual(f'\t{3}', '\t3')
self.assertEqual(f'\u0394', '\u0394')
self.assertEqual(r'\u0394', '\\u0394')
self.assertEqual(rf'\u0394', '\\u0394')
self.assertEqual(f'{2}\u0394', '2\u0394')
self.assertEqual(f'{2}\u0394{3}', '2\u03943')
self.assertEqual(f'\u0394{3}', '\u03943')
self.assertEqual(f'\U00000394', '\u0394')
self.assertEqual(r'\U00000394', '\\U00000394')
self.assertEqual(rf'\U00000394', '\\U00000394')
self.assertEqual(f'{2}\U00000394', '2\u0394')
self.assertEqual(f'{2}\U00000394{3}', '2\u03943')
self.assertEqual(f'\U00000394{3}', '\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}', '\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}{3}', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}{3}', '\u03943')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}3', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}3', '\u03943')
self.assertEqual(f'\x20', ' ')
self.assertEqual(r'\x20', '\\x20')
self.assertEqual(rf'\x20', '\\x20')
self.assertEqual(f'{2}\x20', '2 ')
self.assertEqual(f'{2}\x20{3}', '2 3')
self.assertEqual(f'\x20{3}', ' 3')
self.assertEqual(f'2\x20', '2 ')
self.assertEqual(f'2\x203', '2 3')
self.assertEqual(f'\x203', ' 3')
with self.assertWarns(DeprecationWarning): # invalid escape sequence
value = eval(r"f'\{6*7}'")
self.assertEqual(value, '\\42')
self.assertEqual(f'\\{6*7}', '\\42')
self.assertEqual(fr'\{6*7}', '\\42')
AMPERSAND = 'spam'
# Get the right unicode character (&), or pick up local variable
# depending on the number of backslashes.
self.assertEqual(f'\N{AMPERSAND}', '&')
self.assertEqual(f'\\N{AMPERSAND}', '\\Nspam')
self.assertEqual(fr'\N{AMPERSAND}', '\\Nspam')
self.assertEqual(f'\\\N{AMPERSAND}', '\\&')
def test_misformed_unicode_character_name(self):
# These test are needed because unicode names are parsed
# differently inside f-strings.
self.assertAllRaise(SyntaxError, r"\(unicode error\) 'unicodeescape' codec can't decode bytes in position .*: malformed \\N character escape",
[r"f'\N'",
r"f'\N{'",
r"f'\N{GREEK CAPITAL LETTER DELTA'",
# Here are the non-f-string versions,
# which should give the same errors.
r"'\N'",
r"'\N{'",
r"'\N{GREEK CAPITAL LETTER DELTA'",
])
def test_no_backslashes_in_expression_part(self):
self.assertAllRaise(SyntaxError, 'f-string expression part cannot include a backslash',
[r"f'{\'a\'}'",
r"f'{\t3}'",
r"f'{\}'",
r"rf'{\'a\'}'",
r"rf'{\t3}'",
r"rf'{\}'",
r"""rf'{"\N{LEFT CURLY BRACKET}"}'""",
r"f'{\n}'",
])
def test_no_escapes_for_braces(self):
"""
Only literal curly braces begin an expression.
"""
# \x7b is '{'.
self.assertEqual(f'\x7b1+1}}', '{1+1}')
self.assertEqual(f'\x7b1+1', '{1+1')
self.assertEqual(f'\u007b1+1', '{1+1')
self.assertEqual(f'\N{LEFT CURLY BRACKET}1+1\N{RIGHT CURLY BRACKET}', '{1+1}')
def test_newlines_in_expressions(self):
self.assertEqual(f'{0}', '0')
self.assertEqual(rf'''{3+
4}''', '7')
def test_lambda(self):
x = 5
self.assertEqual(f'{(lambda y:x*y)("8")!r}', "'88888'")
self.assertEqual(f'{(lambda y:x*y)("8")!r:10}', "'88888' ")
self.assertEqual(f'{(lambda y:x*y)("8"):10}', "88888 ")
# lambda doesn't work without parens, because the colon
# makes the parser think it's a format_spec
<<<<<<< HEAD
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
self.assertAllRaise(SyntaxError, err_msg,
>>>>>>> 3.9
["f'{lambda x:x}'",
])
def test_yield(self):
# Not terribly useful, but make sure the yield turns
# a function into a generator
def fn(y):
f'y:{yield y*2}'
f'{yield}'
g = fn(4)
self.assertEqual(next(g), 8)
self.assertEqual(next(g), None)
def test_yield_send(self):
def fn(x):
yield f'x:{yield (lambda i: x * i)}'
g = fn(10)
the_lambda = next(g)
self.assertEqual(the_lambda(4), 40)
self.assertEqual(g.send('string'), 'x:string')
def test_expressions_with_triple_quoted_strings(self):
self.assertEqual(f"{'''x'''}", 'x')
self.assertEqual(f"{'''eric's'''}", "eric's")
# Test concatenation within an expression
self.assertEqual(f'{"x" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"x" """eric"s"""}', 'xeric"s')
self.assertEqual(f'{"""eric"s""" "y"}', 'eric"sy')
self.assertEqual(f'{"""x""" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"""x""" """eric"s""" """y"""}', 'xeric"sy')
self.assertEqual(f'{r"""x""" """eric"s""" """y"""}', 'xeric"sy')
def test_multiple_vars(self):
x = 98
y = 'abc'
self.assertEqual(f'{x}{y}', '98abc')
self.assertEqual(f'X{x}{y}', 'X98abc')
self.assertEqual(f'{x}X{y}', '98Xabc')
self.assertEqual(f'{x}{y}X', '98abcX')
self.assertEqual(f'X{x}Y{y}', 'X98Yabc')
self.assertEqual(f'X{x}{y}Y', 'X98abcY')
self.assertEqual(f'{x}X{y}Y', '98XabcY')
self.assertEqual(f'X{x}Y{y}Z', 'X98YabcZ')
def test_closure(self):
def outer(x):
def inner():
return f'x:{x}'
return inner
self.assertEqual(outer('987')(), 'x:987')
self.assertEqual(outer(7)(), 'x:7')
def test_arguments(self):
y = 2
def f(x, width):
return f'x={x*y:{width}}'
self.assertEqual(f('foo', 10), 'x=foofoo ')
x = 'bar'
self.assertEqual(f(10, 10), 'x= 20')
def test_locals(self):
value = 123
self.assertEqual(f'v:{value}', 'v:123')
def test_missing_variable(self):
with self.assertRaises(NameError):
f'v:{value}'
def test_missing_format_spec(self):
class O:
def __format__(self, spec):
if not spec:
return '*'
return spec
self.assertEqual(f'{O():x}', 'x')
self.assertEqual(f'{O()}', '*')
self.assertEqual(f'{O():}', '*')
self.assertEqual(f'{3:}', '3')
self.assertEqual(f'{3!s:}', '3')
def test_global(self):
self.assertEqual(f'g:{a_global}', 'g:global variable')
self.assertEqual(f'g:{a_global!r}', "g:'global variable'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:global variable l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'global variable'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:global variable l:'local variable'")
self.assertIn("module 'unittest' from", f'{unittest}')
def test_shadowed_global(self):
a_global = 'really a local'
self.assertEqual(f'g:{a_global}', 'g:really a local')
self.assertEqual(f'g:{a_global!r}', "g:'really a local'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:really a local l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'really a local'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:really a local l:'local variable'")
def test_call(self):
def foo(x):
return 'x=' + str(x)
self.assertEqual(f'{foo(10)}', 'x=10')
def test_nested_fstrings(self):
y = 5
self.assertEqual(f'{f"{0}"*3}', '000')
self.assertEqual(f'{f"{y}"*3}', '555')
def test_invalid_string_prefixes(self):
single_quote_cases = ["fu''",
"uf''",
"Fu''",
"fU''",
"Uf''",
"uF''",
"ufr''",
"urf''",
"fur''",
"fru''",
"rfu''",
"ruf''",
"FUR''",
"Fur''",
"fb''",
"fB''",
"Fb''",
"FB''",
"bf''",
"bF''",
"Bf''",
"BF''",]
double_quote_cases = [case.replace("'", '"') for case in single_quote_cases]
error_msg = (
'invalid syntax'
if use_old_parser()
else 'unexpected EOF while parsing'
)
self.assertAllRaise(SyntaxError, error_msg,
single_quote_cases + double_quote_cases)
def test_leading_trailing_spaces(self):
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]}}',
'expr={1: 2}')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]} }',
'expr={1: 2}')
def test_not_equal(self):
# There's a special test for this because there's a special
# case in the f-string parser to look for != as not ending an
# expression. Normally it would, while looking for !s or !r.
self.assertEqual(f'{3!=4}', 'True')
self.assertEqual(f'{3!=4:}', 'True')
self.assertEqual(f'{3!=4!s}', 'True')
self.assertEqual(f'{3!=4!s:.3}', 'Tru')
def test_equal_equal(self):
# Because an expression ending in = has special meaning,
# there's a special test for ==. Make sure it works.
self.assertEqual(f'{0==1}', 'False')
def test_conversions(self):
self.assertEqual(f'{3.14:10.10}', ' 3.14')
self.assertEqual(f'{3.14!s:10.10}', '3.14 ')
self.assertEqual(f'{3.14!r:10.10}', '3.14 ')
self.assertEqual(f'{3.14!a:10.10}', '3.14 ')
self.assertEqual(f'{"a"}', 'a')
self.assertEqual(f'{"a"!r}', "'a'")
self.assertEqual(f'{"a"!a}', "'a'")
# Not a conversion.
self.assertEqual(f'{"a!r"}', "a!r")
# Not a conversion, but show that ! is allowed in a format spec.
self.assertEqual(f'{3.14:!<10.10}', '3.14!!!!!!')
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
["f'{3!g}'",
"f'{3!A}'",
"f'{3!3}'",
"f'{3!G}'",
"f'{3!!}'",
"f'{3!:}'",
"f'{3! s}'", # no space before conversion char
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{x!s{y}}'",
"f'{3!ss}'",
"f'{3!ss:}'",
"f'{3!ss:s}'",
])
def test_assignment(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["f'' = 3",
"f'{0}' = x",
"f'{x}' = x",
])
def test_del(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["del f''",
"del '' f''",
])
def test_mismatched_braces(self):
self.assertAllRaise(SyntaxError, "f-string: single '}' is not allowed",
["f'{{}'",
"f'{{}}}'",
"f'}'",
"f'x}'",
"f'x}x'",
r"f'\u007b}'",
# Can't have { or } in a format spec.
"f'{3:}>10}'",
"f'{3:}}>10}'",
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3:{{>10}'",
"f'{3'",
"f'{3!'",
"f'{3:'",
"f'{3!s'",
"f'{3!s:'",
"f'{3!s:3'",
"f'x{'",
"f'x{x'",
"f'{x'",
"f'{3:s'",
"f'{{{'",
"f'{{}}{'",
"f'{'",
])
# But these are just normal strings.
self.assertEqual(f'{"{"}', '{')
self.assertEqual(f'{"}"}', '}')
self.assertEqual(f'{3:{"}"}>10}', '}}}}}}}}}3')
self.assertEqual(f'{2:{"{"}>10}', '{{{{{{{{{2')
def test_if_conditional(self):
# There's special logic in compile.c to test if the
# conditional for an if (and while) are constants. Exercise
# that code.
def test_fstring(x, expected):
flag = 0
if f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_empty(x, expected):
flag = 0
if '' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_non_empty(x, expected):
flag = 0
if ' ' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
test_fstring('', 2)
test_fstring(' ', 1)
test_concat_empty('', 2)
test_concat_empty(' ', 1)
test_concat_non_empty('', 1)
test_concat_non_empty(' ', 1)
def test_empty_format_specifier(self):
x = 'test'
self.assertEqual(f'{x}', 'test')
self.assertEqual(f'{x:}', 'test')
self.assertEqual(f'{x!s:}', 'test')
self.assertEqual(f'{x!r:}', "'test'")
def test_str_format_differences(self):
d = {'a': 'string',
0: 'integer',
}
a = 0
self.assertEqual(f'{d[0]}', 'integer')
self.assertEqual(f'{d["a"]}', 'string')
self.assertEqual(f'{d[a]}', 'integer')
self.assertEqual('{d[a]}'.format(d=d), 'string')
self.assertEqual('{d[0]}'.format(d=d), 'integer')
def test_errors(self):
# see issue 26287
self.assertAllRaise(TypeError, 'unsupported',
[r"f'{(lambda: 0):x}'",
r"f'{(0,):x}'",
])
self.assertAllRaise(ValueError, 'Unknown format code',
[r"f'{1000:j}'",
r"f'{1000:j}'",
])
<<<<<<< HEAD
=======
@unittest.skipIf(use_old_parser(), "The old parser only supports <fstring> as the filename")
>>>>>>> 3.9
def test_filename_in_syntaxerror(self):
# see issue 38964
with temp_cwd() as cwd:
file_path = os.path.join(cwd, 't.py')
with open(file_path, 'w') as f:
f.write('f"{a b}"') # This generates a SyntaxError
_, _, stderr = assert_python_failure(file_path,
PYTHONIOENCODING='ascii')
self.assertIn(file_path.encode('ascii', 'backslashreplace'), stderr)
def test_loop(self):
for i in range(1000):
self.assertEqual(f'i:{i}', 'i:' + str(i))
def test_dict(self):
d = {'"': 'dquote',
"'": 'squote',
'foo': 'bar',
}
self.assertEqual(f'''{d["'"]}''', 'squote')
self.assertEqual(f"""{d['"']}""", 'dquote')
self.assertEqual(f'{d["foo"]}', 'bar')
self.assertEqual(f"{d['foo']}", 'bar')
def test_backslash_char(self):
# Check eval of a backslash followed by a control char.
# See bpo-30682: this used to raise an assert in pydebug mode.
self.assertEqual(eval('f"\\\n"'), '')
self.assertEqual(eval('f"\\\r"'), '')
def test_debug_conversion(self):
x = 'A string'
self.assertEqual(f'{x=}', 'x=' + repr(x))
self.assertEqual(f'{x =}', 'x =' + repr(x))
self.assertEqual(f'{x=!s}', 'x=' + str(x))
self.assertEqual(f'{x=!r}', 'x=' + repr(x))
self.assertEqual(f'{x=!a}', 'x=' + ascii(x))
x = 2.71828
self.assertEqual(f'{x=:.2f}', 'x=' + format(x, '.2f'))
self.assertEqual(f'{x=:}', 'x=' + format(x, ''))
self.assertEqual(f'{x=!r:^20}', 'x=' + format(repr(x), '^20'))
self.assertEqual(f'{x=!s:^20}', 'x=' + format(str(x), '^20'))
self.assertEqual(f'{x=!a:^20}', 'x=' + format(ascii(x), '^20'))
x = 9
self.assertEqual(f'{3*x+15=}', '3*x+15=42')
# There is code in ast.c that deals with non-ascii expression values. So,
# use a unicode identifier to trigger that.
tenπ = 31.4
self.assertEqual(f'{tenπ=:.2f}', 'tenπ=31.40')
# Also test with Unicode in non-identifiers.
self.assertEqual(f'{"Σ"=}', '"Σ"=\'Σ\'')
# Make sure nested fstrings still work.
self.assertEqual(f'{f"{3.1415=:.1f}":*^20}', '*****3.1415=3.1*****')
# Make sure text before and after an expression with = works
# correctly.
pi = 'π'
self.assertEqual(f'alpha α {pi=} ω omega', "alpha α pi='π' ω omega")
# Check multi-line expressions.
self.assertEqual(f'''{
3
=}''', '\n3\n=3')
# Since = is handled specially, make sure all existing uses of
# it still work.
self.assertEqual(f'{0==1}', 'False')
self.assertEqual(f'{0!=1}', 'True')
self.assertEqual(f'{0<=1}', 'True')
self.assertEqual(f'{0>=1}', 'False')
self.assertEqual(f'{(x:="5")}', '5')
self.assertEqual(x, '5')
self.assertEqual(f'{(x:=5)}', '5')
self.assertEqual(x, 5)
self.assertEqual(f'{"="}', '=')
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'. See test_walrus: you need to use parens.
self.assertEqual(f'{x:=10}', ' 20')
# Test named function parameters, to make sure '=' parsing works
# there.
def f(a):
nonlocal x
oldx = x
x = a
return oldx
x = 0
self.assertEqual(f'{f(a="3=")}', '0')
self.assertEqual(x, '3=')
self.assertEqual(f'{f(a=4)}', '3=')
self.assertEqual(x, 4)
# Make sure __format__ is being called.
class C:
def __format__(self, s):
return f'FORMAT-{s}'
def __repr__(self):
return 'REPR'
self.assertEqual(f'{C()=}', 'C()=REPR')
self.assertEqual(f'{C()=!r}', 'C()=REPR')
self.assertEqual(f'{C()=:}', 'C()=FORMAT-')
self.assertEqual(f'{C()=: }', 'C()=FORMAT- ')
self.assertEqual(f'{C()=:x}', 'C()=FORMAT-x')
self.assertEqual(f'{C()=!r:*^20}', 'C()=********REPR********')
self.assertRaises(SyntaxError, eval, "f'{C=]'")
# Make sure leading and following text works.
x = 'foo'
self.assertEqual(f'X{x=}Y', 'Xx='+repr(x)+'Y')
# Make sure whitespace around the = works.
self.assertEqual(f'X{x =}Y', 'Xx ='+repr(x)+'Y')
self.assertEqual(f'X{x= }Y', 'Xx= '+repr(x)+'Y')
self.assertEqual(f'X{x = }Y', 'Xx = '+repr(x)+'Y')
# These next lines contains tabs. Backslash escapes don't
# work in f-strings.
# patchcheck doesn't like these tabs. So the only way to test
# this will be to dynamically created and exec the f-strings. But
# that's such a hassle I'll save it for another day. For now, convert
# the tabs to spaces just to shut up patchcheck.
#self.assertEqual(f'X{x =}Y', 'Xx\t='+repr(x)+'Y')
#self.assertEqual(f'X{x = }Y', 'Xx\t=\t'+repr(x)+'Y')
def test_walrus(self):
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'.
self.assertEqual(f'{x:=10}', ' 20')
# This is an assignment expression, which requires parens.
self.assertEqual(f'{(x:=10)}', '10')
self.assertEqual(x, 10)
def test_invalid_syntax_error_message(self):
<<<<<<< HEAD
with self.assertRaisesRegex(SyntaxError, "f-string: invalid syntax"):
=======
err_msg = "invalid syntax" if use_old_parser() else "f-string: invalid syntax"
with self.assertRaisesRegex(SyntaxError, err_msg):
>>>>>>> 3.9
compile("f'{a $ b}'", "?", "exec")
def test_with_two_commas_in_format_specifier(self):
error_msg = re.escape("Cannot specify ',' with ','.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,,}'
def test_with_two_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify '_' with '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:__}'
def test_with_a_commas_and_an_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,_}'
def test_with_an_underscore_and_a_comma_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:_,}'
if __name__ == '__main__':
unittest.main()
|
import json
import logging
from pathlib import Path
import re
import numpy as np
import mtscomp
from brainbox.core import Bunch
from ibllib.ephys import neuropixel as neuropixel
from ibllib.io import hashfile
SAMPLE_SIZE = 2 # int16
DEFAULT_BATCH_SIZE = 1e6
_logger = logging.getLogger('ibllib')
class Reader:
"""
Class for SpikeGLX reading purposes
Some format description was found looking at the Matlab SDK here
https://github.com/billkarsh/SpikeGLX/blob/master/MATLAB-SDK/DemoReadSGLXData.m
"""
def __init__(self, sglx_file):
self.file_bin = Path(sglx_file)
self.nbytes = self.file_bin.stat().st_size
file_meta_data = Path(sglx_file).with_suffix('.meta')
if not file_meta_data.exists():
self.file_meta_data = None
self.meta = None
self.channel_conversion_sample2v = 1
_logger.warning(str(sglx_file) + " : no metadata file found. Very limited support")
return
# normal case we continue reading and interpreting the metadata file
self.file_meta_data = file_meta_data
self.meta = read_meta_data(file_meta_data)
self.channel_conversion_sample2v = _conversion_sample2v_from_meta(self.meta)
# if we are not looking at a compressed file, use a memmap, otherwise instantiate mtscomp
if self.is_mtscomp:
self._raw = mtscomp.Reader()
self._raw.open(self.file_bin, self.file_bin.with_suffix('.ch'))
else:
if self.nc * self.ns * 2 != self.nbytes:
ftsec = self.file_bin.stat().st_size / 2 / self.nc / self.fs
_logger.warning(f"{sglx_file} : meta data and filesize do not checkout\n"
f"File size: expected {self.meta["fileSizeBytes"]},"
f" actual {self.file_bin.stat().st_size}\n"
f"File duration: expected {self.meta["fileTimeSecs"]},"
f" actual {ftsec}\n"
f"Will attempt to fudge the meta-data information.")
self.meta['fileTimeSecs'] = ftsec
self._raw = np.memmap(sglx_file, dtype='int16', mode='r', shape=(self.ns, self.nc))
def __getitem__(self, item):
if isinstance(item, int) or isinstance(item, slice):
return self.read(nsel=item, sync=False)
elif len(item) == 2:
return self.read(nsel=item[0], csel=item[1], sync=False)
@property
def shape(self):
return self.ns, self.nc
@property
def is_mtscomp(self):
return 'cbin' in self.file_bin.suffix
@property
def version(self):
""":return: """
if not self.meta:
return None
return _get_neuropixel_version_from_meta(self.meta)
@property
def type(self):
""":return: ap, lf or nidq. Useful to index dictionaries """
if not self.meta:
return 0
return _get_type_from_meta(self.meta)
@property
def fs(self):
""" :return: sampling frequency (Hz) """
if not self.meta:
return 1
return _get_fs_from_meta(self.meta)
@property
def nc(self):
""" :return: number of channels """
if not self.meta:
return
return _get_nchannels_from_meta(self.meta)
@property
def ns(self):
""" :return: number of samples """
if not self.meta:
return
return int(np.round(self.meta.get('fileTimeSecs') * self.fs))
def read(self, nsel=slice(0, 10000), csel=slice(None), sync=True):
"""
Read from slices or indexes
:param slice_n: slice or sample indices
:param slice_c: slice or channel indices
:return: float32 array
"""
darray = np.float32(self._raw[nsel, csel])
darray *= self.channel_conversion_sample2v[self.type][csel]
if sync:
return darray, self.read_sync(nsel)
else:
return darray
def read_samples(self, first_sample=0, last_sample=10000, channels=None):
"""
reads all channels from first_sample to last_sample, following numpy slicing convention
sglx.read_samples(first=0, last=100) would be equivalent to slicing the array D
D[:,0:100] where the last axis represent time and the first channels.
:param first_sample: first sample to be read, python slice-wise
:param last_sample: last sample to be read, python slice-wise
:param channels: slice or numpy array of indices
:return: numpy array of int16
"""
if channels is None:
channels = slice(None)
return self.read(slice(first_sample, last_sample), channels)
def read_sync_digital(self, _slice=slice(0, 10000)):
"""
Reads only the digital sync trace at specified samples using slicing syntax
>>> sync_samples = sr.read_sync_digital(slice(0,10000))
"""
if not self.meta:
_logger.warning('Sync trace not labeled in metadata. Assuming last trace')
return split_sync(self._raw[_slice, _get_sync_trace_indices_from_meta(self.meta)])
def read_sync_analog(self, _slice=slice(0, 10000)):
"""
Reads only the analog sync traces at specified samples using slicing syntax
>>> sync_samples = sr.read_sync_analog(slice(0,10000))
"""
if not self.meta:
return
csel = _get_analog_sync_trace_indices_from_meta(self.meta)
if not csel:
return
else:
return self.read(nsel=_slice, csel=csel, sync=False)
def read_sync(self, _slice=slice(0, 10000), threshold=1.2):
"""
Reads all sync trace. Convert analog to digital with selected threshold and append to array
:param _slice: samples slice
:param threshold: (V) threshold for front detection, defaults to 1.2 V
:return: int8 array
"""
digital = self.read_sync_digital(_slice)
analog = self.read_sync_analog(_slice)
if analog is None:
return digital
analog[np.where(analog < threshold)] = 0
analog[np.where(analog >= threshold)] = 1
return np.concatenate((digital, np.int8(analog)), axis=1)
def compress_file(self, keep_original=True, **kwargs):
"""
Compresses
:param keep_original: defaults True. If False, the original uncompressed file is deleted
and the current spikeglx.Reader object is modified in place
:param kwargs:
:return: pathlib.Path of the compressed *.cbin file
"""
file_tmp = self.file_bin.with_suffix('.cbin_tmp')
assert not self.is_mtscomp
mtscomp.compress(self.file_bin,
out=file_tmp,
outmeta=self.file_bin.with_suffix('.ch'),
sample_rate=self.fs,
n_channels=self.nc,
dtype=np.int16,
**kwargs)
file_out = file_tmp.with_suffix('.cbin')
file_tmp.rename(file_out)
if not keep_original:
self.file_bin.unlink()
self.file_bin = file_out
return file_out
def decompress_file(self, keep_original=True, **kwargs):
"""
Decompresses a mtscomp file
:param keep_original: defaults True. If False, the original compressed file (input)
is deleted and the current spikeglx.Reader object is modified in place
NB: This is not equivalent to overwrite (which replaces the output file)
:return: pathlib.Path of the decompressed *.bin file
"""
if 'out' not in kwargs:
kwargs['out'] = self.file_bin.with_suffix('.bin')
assert self.is_mtscomp
mtscomp.decompress(self.file_bin, self.file_bin.with_suffix('.ch'), **kwargs)
if not keep_original:
self.file_bin.unlink()
self.file_bin.with_suffix('.ch').unlink()
self.file_bin = kwargs['out']
return kwargs['out']
def verify_hash(self):
"""
Computes SHA-1 hash and returns True if it matches metadata, False otherwise
:return: boolean
"""
if self.is_mtscomp:
with open(self.file_bin.with_suffix('.ch')) as fid:
mtscomp_params = json.load(fid)
sm = mtscomp_params.get('sha1_compressed', None)
if sm is None:
_logger.warning("SHA1 hash is not implemented for compressed ephys. To check "
"the spikeglx acquisition hash, uncompress the file first !")
return True
sm = sm.upper()
else:
sm = self.meta.fileSHA1
sc = hashfile.sha1(self.file_bin).upper()
if sm == sc:
log_func = _logger.info
else:
log_func = _logger.error
log_func(f"SHA1 metadata: {sm}")
log_func(f"SHA1 computed: {sc}")
return sm == sc
def read(sglx_file, first_sample=0, last_sample=10000):
"""
Function to read from a spikeglx binary file without instantiating the class.
Gets the meta-data as well.
>>> ibllib.io.spikeglx.read('/path/to/file.bin', first_sample=0, last_sample=1000)
:param sglx_file: full path the the binary file to read
:param first_sample: first sample to be read, python slice-wise
:param last_sample: last sample to be read, python slice-wise
:return: Data array, sync trace, meta-data
"""
sglxr = Reader(sglx_file)
D, sync = sglxr.read_samples(first_sample=first_sample, last_sample=last_sample)
return D, sync, sglxr.meta
def read_meta_data(md_file):
"""
Reads the spkike glx metadata file and parse in a dictionary
Agnostic: does not make any assumption on the keys/content, it just parses key=values
:param md_file: last sample to be read, python slice-wise
:return: Data array, sync trace, meta-data
"""
with open(md_file) as fid:
md = fid.read()
d = {}
for a in md.splitlines():
k, v = a.split('=')
# if all numbers, try to interpret the string
if v and re.fullmatch('[0-9,.]*', v) and v.count('.') < 2:
v = [float(val) for val in v.split(',')]
# scalars should not be nested
if len(v) == 1:
v = v[0]
# tildes in keynames removed
d[k.replace('~', '')] = v
d['neuropixelVersion'] = _get_neuropixel_version_from_meta(d)
d['serial'] = _get_serial_number_from_meta(d)
return Bunch(d)
def _get_serial_number_from_meta(md):
"""
Get neuropixel serial number from the metadata dictionary
"""
# imProbeSN for 3A, imDatPrb_sn for 3B2, None for nidq 3B2
serial = md.get('imProbeSN') or md.get('imDatPrb_sn')
if serial:
return int(serial)
def _get_neuropixel_version_from_meta(md):
"""
Get neuropixel version tag (3A, 3B1, 3B2) from the metadata dictionary
"""
if 'typeEnabled' in md.keys():
return '3A'
elif 'typeImEnabled' in md.keys() and 'typeNiEnabled' in md.keys():
if 'imDatPrb_port' in md.keys() and 'imDatPrb_slot' in md.keys():
return '3B2'
else:
return '3B1'
def _get_sync_trace_indices_from_meta(md):
"""
Returns a list containing indices of the sync traces in the original array
"""
typ = _get_type_from_meta(md)
ntr = int(_get_nchannels_from_meta(md))
if typ == 'nidq':
nsync = int(md.get('snsMnMaXaDw')[-1])
elif typ in ['lf', 'ap']:
nsync = int(md.get('snsApLfSy')[2])
return list(range(ntr - nsync, ntr))
def _get_analog_sync_trace_indices_from_meta(md):
"""
Returns a list containing indices of the sync traces in the original array
"""
typ = _get_type_from_meta(md)
if typ != 'nidq':
return []
tr = md.get('snsMnMaXaDw')
nsa = int(tr[-2])
return list(range(int(sum(tr[0:2])), int(sum(tr[0:2])) + nsa))
def _get_nchannels_from_meta(md):
typ = _get_type_from_meta(md)
if typ == 'nidq':
return int(np.round(np.sum(md.get('snsMnMaXaDw'))))
elif typ in ['lf', 'ap']:
return int(np.round(sum(md.get('snsApLfSy'))))
def _get_fs_from_meta(md):
if md.get('typeThis') == 'imec':
return md.get('imSampRate')
else:
return md.get('niSampRate')
def _get_type_from_meta(md):
"""
Get neuropixel data type (ap, lf or nidq) from metadata
"""
snsApLfSy = md.get('snsApLfSy', [-1, -1, -1])
if snsApLfSy[0] == 0 and snsApLfSy[1] != 0:
return 'lf'
elif snsApLfSy[0] != 0 and snsApLfSy[1] == 0:
return 'ap'
elif snsApLfSy == [-1, -1, -1] and md.get('typeThis', None) == 'nidq':
return 'nidq'
def _map_channels_from_meta(meta_data):
"""
Interpret the meta data string to extract an array of channel positions along the shank
:param meta_data: dictionary output from spikeglx.read_meta_data
:return: dictionary of arrays 'shank', 'col', 'row', 'flag', one value per active site
"""
if 'snsShankMap' in meta_data.keys():
chmap = re.findall(r'([0-9]*:[0-9]*:[0-9]*:[0-9]*)', meta_data['snsShankMap'])
# for digital nidq types, the key exists but does not contain any information
if not chmap:
return {'shank': None, 'col': None, 'row': None, 'flag': None}
# shank#, col#, row#, drawflag
# (nb: drawflag is one should be drawn and considered spatial average)
chmap = np.array([np.float32(cm.split(':')) for cm in chmap])
return {k: chmap[:, v] for (k, v) in {'shank': 0, 'col': 1, 'row': 2, 'flag': 3}.items()}
def _conversion_sample2v_from_meta(meta_data):
"""
Interpret the meta data to extract an array of conversion factors for each channel
so the output data is in Volts
Conversion factor is: int2volt / channelGain
For Lf/Ap interpret the gain string from metadata
For Nidq, repmat the gains from the trace counts in `snsMnMaXaDw`
:param meta_data: dictionary output from spikeglx.read_meta_data
:return: numpy array with one gain value per channel
"""
def int2volts(md):
""" :return: Conversion scalar to Volts. Needs to be combined with channel gains """
if md.get('typeThis', None) == 'imec':
return md.get('imAiRangeMax') / 512
else:
return md.get('niAiRangeMax') / 32768
int2volt = int2volts(meta_data)
# interprets the gain value from the metadata header:
if 'imroTbl' in meta_data.keys(): # binary from the probes: ap or lf
sy_gain = np.ones(int(meta_data['snsApLfSy'][-1]), dtype=np.float32)
# imroTbl has 384 entries regardless of no of channels saved, so need to index by n_ch
n_chn = _get_nchannels_from_meta(meta_data) - 1
# the sync traces are not included in the gain values, so are included for broadcast ops
gain = re.findall(r'([0-9]* [0-9]* [0-9]* [0-9]* [0-9]*)', meta_data['imroTbl'])[:n_chn]
out = {'lf': np.hstack((np.array([1 / np.float32(g.split(' ')[-1]) for g in gain]) *
int2volt, sy_gain)),
'ap': np.hstack((np.array([1 / np.float32(g.split(' ')[-2]) for g in gain]) *
int2volt, sy_gain))}
elif 'niMNGain' in meta_data.keys(): # binary from nidq
gain = np.r_[
np.ones(int(meta_data['snsMnMaXaDw'][0],)) / meta_data['niMNGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][1],)) / meta_data['niMAGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][2], )) * int2volt, # no gain for analog sync
np.ones(int(np.sum(meta_data['snsMnMaXaDw'][3]),))] # no unit for digital sync
out = {'nidq': gain}
return out
def split_sync(sync_tr):
"""
The synchronization channels are stored as single bits, this will split the int16 original
channel into 16 single bits channels
:param sync_tr: numpy vector: samples of synchronisation trace
:return: int8 numpy array of 16 channels, 1 column per sync trace
"""
sync_tr = np.int16(np.copy(sync_tr))
out = np.unpackbits(sync_tr.view(np.uint8)).reshape(sync_tr.size, 16)
out = np.flip(np.roll(out, 8, axis=1), axis=1)
return np.int8(out)
def get_neuropixel_version_from_folder(session_path):
ephys_files = glob_ephys_files(session_path)
return get_neuropixel_version_from_files(ephys_files)
def get_neuropixel_version_from_files(ephys_files):
if any([ef.get('nidq') for ef in ephys_files]):
return '3B'
else:
return '3A'
def glob_ephys_files(session_path, suffix='.meta', ext='bin', recursive=True, bin_exists=True):
"""
From an arbitrary folder (usually session folder) gets the ap and lf files and labels
Associated to the subfolders where they are
the expected folder tree is:
├── 3A
│ ├── imec0
│ ├── sync_testing_g0_t0.imec0.ap.bin
│ │ └── sync_testing_g0_t0.imec0.lf.bin
│ └── imec1
│ ├── sync_testing_g0_t0.imec1.ap.bin
│ └── sync_testing_g0_t0.imec1.lf.bin
└── 3B
├── sync_testing_g0_t0.nidq.bin
├── imec0
│ ├── sync_testing_g0_t0.imec0.ap.bin
│ └── sync_testing_g0_t0.imec0.lf.bin
└── imec1
├── sync_testing_g0_t0.imec1.ap.bin
└── sync_testing_g0_t0.imec1.lf.bin
:param bin_exists:
:param suffix:
:param ext: file extension to look for, default 'bin' but could also be 'meta' or 'ch'
:param recursive:
:param session_path: folder, string or pathlib.Path
:param glob_pattern: pattern to look recursively for (defaults to '*.ap.*bin)
:returns: a list of dictionaries with keys 'ap': apfile, 'lf': lffile and 'label'
"""
def get_label(raw_ephys_apfile):
if raw_ephys_apfile.parts[-2] != 'raw_ephys_data':
return raw_ephys_apfile.parts[-2]
else:
return ''
recurse = '**/' if recursive else ''
ephys_files = []
for raw_ephys_file in Path(session_path).glob(f'{recurse}*.ap{suffix}'):
raw_ephys_apfile = next(raw_ephys_file.parent.glob(raw_ephys_file.stem + f'.*{ext}'), None)
if not raw_ephys_apfile and bin_exists:
continue
elif not raw_ephys_apfile and ext != 'bin':
continue
elif not bin_exists and ext == 'bin':
raw_ephys_apfile = raw_ephys_file.with_suffix('.bin')
# first get the ap file
ephys_files.extend([Bunch({'label': None, 'ap': None, 'lf': None, 'path': None})])
ephys_files[-1].ap = raw_ephys_apfile
# then get the corresponding lf file if it exists
lf_file = raw_ephys_apfile.parent / raw_ephys_apfile.name.replace('.ap.', '.lf.')
ephys_files[-1].lf = next(lf_file.parent.glob(lf_file.stem + f'.*{ext}'), None)
# finally, the label is the current directory except if it is bare in raw_ephys_data
ephys_files[-1].label = get_label(raw_ephys_apfile)
ephys_files[-1].path = raw_ephys_apfile.parent
# for 3b probes, need also to get the nidq dataset type
for raw_ephys_file in Path(session_path).rglob(f'{recurse}*.nidq{suffix}'):
raw_ephys_nidqfile = next(raw_ephys_file.parent.glob(raw_ephys_file.stem + f'.*{ext}'),
None)
if not bin_exists and ext == 'bin':
raw_ephys_nidqfile = raw_ephys_file.with_suffix('.bin')
ephys_files.extend([Bunch({'label': get_label(raw_ephys_file),
'nidq': raw_ephys_nidqfile,
'path': raw_ephys_file.parent})])
return ephys_files
def _mock_spikeglx_file(mock_bin_file, meta_file, ns, nc, sync_depth,
random=False, int2volts=0.6 / 32768, corrupt=False):
"""
For testing purposes, create a binary file with sync pulses to test reading and extraction
"""
meta_file = Path(meta_file)
mock_path_bin = Path(mock_bin_file)
mock_path_meta = mock_path_bin.with_suffix('.meta')
md = read_meta_data(meta_file)
assert meta_file != mock_path_meta
fs = _get_fs_from_meta(md)
fid_source = open(meta_file)
fid_target = open(mock_path_meta, 'w+')
line = fid_source.readline()
while line:
line = fid_source.readline()
if line.startswith('fileSizeBytes'):
line = f'fileSizeBytes={ns * nc * 2}\n'
if line.startswith('fileTimeSecs'):
if corrupt:
line = f'fileTimeSecs={ns / fs + 1.8324}\n'
else:
line = f'fileTimeSecs={ns / fs}\n'
fid_target.write(line)
fid_source.close()
fid_target.close()
if random:
D = np.random.randint(-32767, 32767, size=(ns, nc), dtype=np.int16)
else: # each channel as an int of chn + 1
D = np.tile(np.int16((np.arange(nc) + 1) / int2volts), (ns, 1))
D[0:16, :] = 0
# the last channel is the sync that we fill with
sync = np.int16(2 ** np.float32(np.arange(-1, sync_depth)))
D[:, -1] = 0
D[:sync.size, -1] = sync
with open(mock_path_bin, 'w+') as fid:
D.tofile(fid)
return {'bin_file': mock_path_bin, 'ns': ns, 'nc': nc, 'sync_depth': sync_depth, 'D': D}
def get_hardware_config(config_file):
"""
Reads the neuropixel_wirings.json file containing sync mapping and parameters
:param config_file: folder or json file
:return: dictionary or None
"""
config_file = Path(config_file)
if config_file.is_dir():
config_file = list(config_file.glob('*.wiring.json'))
if config_file:
config_file = config_file[0]
if not config_file or not config_file.exists():
return
with open(config_file) as fid:
par = json.loads(fid.read())
return par
def _sync_map_from_hardware_config(hardware_config):
"""
:param hardware_config: dictonary from json read of neuropixel_wirings.json
:return: dictionary where key names refer to object and values to sync channel index
"""
pin_out = neuropixel.SYNC_PIN_OUT[hardware_config['SYSTEM']]
sync_map = {hardware_config['SYNC_WIRING_DIGITAL'][pin]: pin_out[pin]
for pin in hardware_config['SYNC_WIRING_DIGITAL']
if pin_out[pin] is not None}
analog = hardware_config.get('SYNC_WIRING_ANALOG')
if analog:
sync_map.update({analog[pin]: int(pin[2:]) + 16 for pin in analog})
return sync_map
def get_sync_map(folder_ephys):
hc = get_hardware_config(folder_ephys)
if not hc:
_logger.warning(f"No channel map for {str(folder_ephys)}")
return None
else:
return _sync_map_from_hardware_config(hc)
| import json
import logging
from pathlib import Path
import re
import numpy as np
import mtscomp
from brainbox.core import Bunch
from ibllib.ephys import neuropixel as neuropixel
from ibllib.io import hashfile
SAMPLE_SIZE = 2 # int16
DEFAULT_BATCH_SIZE = 1e6
_logger = logging.getLogger('ibllib')
class Reader:
"""
Class for SpikeGLX reading purposes
Some format description was found looking at the Matlab SDK here
https://github.com/billkarsh/SpikeGLX/blob/master/MATLAB-SDK/DemoReadSGLXData.m
"""
def __init__(self, sglx_file):
self.file_bin = Path(sglx_file)
self.nbytes = self.file_bin.stat().st_size
file_meta_data = Path(sglx_file).with_suffix('.meta')
if not file_meta_data.exists():
self.file_meta_data = None
self.meta = None
self.channel_conversion_sample2v = 1
_logger.warning(str(sglx_file) + " : no metadata file found. Very limited support")
return
# normal case we continue reading and interpreting the metadata file
self.file_meta_data = file_meta_data
self.meta = read_meta_data(file_meta_data)
self.channel_conversion_sample2v = _conversion_sample2v_from_meta(self.meta)
# if we are not looking at a compressed file, use a memmap, otherwise instantiate mtscomp
if self.is_mtscomp:
self._raw = mtscomp.Reader()
self._raw.open(self.file_bin, self.file_bin.with_suffix('.ch'))
else:
if self.nc * self.ns * 2 != self.nbytes:
ftsec = self.file_bin.stat().st_size / 2 / self.nc / self.fs
_logger.warning(f"{sglx_file} : meta data and filesize do not checkout\n"
f"File size: expected {self.meta['fileSizeBytes']},"
f" actual {self.file_bin.stat().st_size}\n"
f"File duration: expected {self.meta['fileTimeSecs']},"
f" actual {ftsec}\n"
f"Will attempt to fudge the meta-data information.")
self.meta['fileTimeSecs'] = ftsec
self._raw = np.memmap(sglx_file, dtype='int16', mode='r', shape=(self.ns, self.nc))
def __getitem__(self, item):
if isinstance(item, int) or isinstance(item, slice):
return self.read(nsel=item, sync=False)
elif len(item) == 2:
return self.read(nsel=item[0], csel=item[1], sync=False)
@property
def shape(self):
return self.ns, self.nc
@property
def is_mtscomp(self):
return 'cbin' in self.file_bin.suffix
@property
def version(self):
""":return: """
if not self.meta:
return None
return _get_neuropixel_version_from_meta(self.meta)
@property
def type(self):
""":return: ap, lf or nidq. Useful to index dictionaries """
if not self.meta:
return 0
return _get_type_from_meta(self.meta)
@property
def fs(self):
""" :return: sampling frequency (Hz) """
if not self.meta:
return 1
return _get_fs_from_meta(self.meta)
@property
def nc(self):
""" :return: number of channels """
if not self.meta:
return
return _get_nchannels_from_meta(self.meta)
@property
def ns(self):
""" :return: number of samples """
if not self.meta:
return
return int(np.round(self.meta.get('fileTimeSecs') * self.fs))
def read(self, nsel=slice(0, 10000), csel=slice(None), sync=True):
"""
Read from slices or indexes
:param slice_n: slice or sample indices
:param slice_c: slice or channel indices
:return: float32 array
"""
darray = np.float32(self._raw[nsel, csel])
darray *= self.channel_conversion_sample2v[self.type][csel]
if sync:
return darray, self.read_sync(nsel)
else:
return darray
def read_samples(self, first_sample=0, last_sample=10000, channels=None):
"""
reads all channels from first_sample to last_sample, following numpy slicing convention
sglx.read_samples(first=0, last=100) would be equivalent to slicing the array D
D[:,0:100] where the last axis represent time and the first channels.
:param first_sample: first sample to be read, python slice-wise
:param last_sample: last sample to be read, python slice-wise
:param channels: slice or numpy array of indices
:return: numpy array of int16
"""
if channels is None:
channels = slice(None)
return self.read(slice(first_sample, last_sample), channels)
def read_sync_digital(self, _slice=slice(0, 10000)):
"""
Reads only the digital sync trace at specified samples using slicing syntax
>>> sync_samples = sr.read_sync_digital(slice(0,10000))
"""
if not self.meta:
_logger.warning('Sync trace not labeled in metadata. Assuming last trace')
return split_sync(self._raw[_slice, _get_sync_trace_indices_from_meta(self.meta)])
def read_sync_analog(self, _slice=slice(0, 10000)):
"""
Reads only the analog sync traces at specified samples using slicing syntax
>>> sync_samples = sr.read_sync_analog(slice(0,10000))
"""
if not self.meta:
return
csel = _get_analog_sync_trace_indices_from_meta(self.meta)
if not csel:
return
else:
return self.read(nsel=_slice, csel=csel, sync=False)
def read_sync(self, _slice=slice(0, 10000), threshold=1.2):
"""
Reads all sync trace. Convert analog to digital with selected threshold and append to array
:param _slice: samples slice
:param threshold: (V) threshold for front detection, defaults to 1.2 V
:return: int8 array
"""
digital = self.read_sync_digital(_slice)
analog = self.read_sync_analog(_slice)
if analog is None:
return digital
analog[np.where(analog < threshold)] = 0
analog[np.where(analog >= threshold)] = 1
return np.concatenate((digital, np.int8(analog)), axis=1)
def compress_file(self, keep_original=True, **kwargs):
"""
Compresses
:param keep_original: defaults True. If False, the original uncompressed file is deleted
and the current spikeglx.Reader object is modified in place
:param kwargs:
:return: pathlib.Path of the compressed *.cbin file
"""
file_tmp = self.file_bin.with_suffix('.cbin_tmp')
assert not self.is_mtscomp
mtscomp.compress(self.file_bin,
out=file_tmp,
outmeta=self.file_bin.with_suffix('.ch'),
sample_rate=self.fs,
n_channels=self.nc,
dtype=np.int16,
**kwargs)
file_out = file_tmp.with_suffix('.cbin')
file_tmp.rename(file_out)
if not keep_original:
self.file_bin.unlink()
self.file_bin = file_out
return file_out
def decompress_file(self, keep_original=True, **kwargs):
"""
Decompresses a mtscomp file
:param keep_original: defaults True. If False, the original compressed file (input)
is deleted and the current spikeglx.Reader object is modified in place
NB: This is not equivalent to overwrite (which replaces the output file)
:return: pathlib.Path of the decompressed *.bin file
"""
if 'out' not in kwargs:
kwargs['out'] = self.file_bin.with_suffix('.bin')
assert self.is_mtscomp
mtscomp.decompress(self.file_bin, self.file_bin.with_suffix('.ch'), **kwargs)
if not keep_original:
self.file_bin.unlink()
self.file_bin.with_suffix('.ch').unlink()
self.file_bin = kwargs['out']
return kwargs['out']
def verify_hash(self):
"""
Computes SHA-1 hash and returns True if it matches metadata, False otherwise
:return: boolean
"""
if self.is_mtscomp:
with open(self.file_bin.with_suffix('.ch')) as fid:
mtscomp_params = json.load(fid)
sm = mtscomp_params.get('sha1_compressed', None)
if sm is None:
_logger.warning("SHA1 hash is not implemented for compressed ephys. To check "
"the spikeglx acquisition hash, uncompress the file first !")
return True
sm = sm.upper()
else:
sm = self.meta.fileSHA1
sc = hashfile.sha1(self.file_bin).upper()
if sm == sc:
log_func = _logger.info
else:
log_func = _logger.error
log_func(f"SHA1 metadata: {sm}")
log_func(f"SHA1 computed: {sc}")
return sm == sc
def read(sglx_file, first_sample=0, last_sample=10000):
"""
Function to read from a spikeglx binary file without instantiating the class.
Gets the meta-data as well.
>>> ibllib.io.spikeglx.read('/path/to/file.bin', first_sample=0, last_sample=1000)
:param sglx_file: full path the the binary file to read
:param first_sample: first sample to be read, python slice-wise
:param last_sample: last sample to be read, python slice-wise
:return: Data array, sync trace, meta-data
"""
sglxr = Reader(sglx_file)
D, sync = sglxr.read_samples(first_sample=first_sample, last_sample=last_sample)
return D, sync, sglxr.meta
def read_meta_data(md_file):
"""
Reads the spkike glx metadata file and parse in a dictionary
Agnostic: does not make any assumption on the keys/content, it just parses key=values
:param md_file: last sample to be read, python slice-wise
:return: Data array, sync trace, meta-data
"""
with open(md_file) as fid:
md = fid.read()
d = {}
for a in md.splitlines():
k, v = a.split('=')
# if all numbers, try to interpret the string
if v and re.fullmatch('[0-9,.]*', v) and v.count('.') < 2:
v = [float(val) for val in v.split(',')]
# scalars should not be nested
if len(v) == 1:
v = v[0]
# tildes in keynames removed
d[k.replace('~', '')] = v
d['neuropixelVersion'] = _get_neuropixel_version_from_meta(d)
d['serial'] = _get_serial_number_from_meta(d)
return Bunch(d)
def _get_serial_number_from_meta(md):
"""
Get neuropixel serial number from the metadata dictionary
"""
# imProbeSN for 3A, imDatPrb_sn for 3B2, None for nidq 3B2
serial = md.get('imProbeSN') or md.get('imDatPrb_sn')
if serial:
return int(serial)
def _get_neuropixel_version_from_meta(md):
"""
Get neuropixel version tag (3A, 3B1, 3B2) from the metadata dictionary
"""
if 'typeEnabled' in md.keys():
return '3A'
elif 'typeImEnabled' in md.keys() and 'typeNiEnabled' in md.keys():
if 'imDatPrb_port' in md.keys() and 'imDatPrb_slot' in md.keys():
return '3B2'
else:
return '3B1'
def _get_sync_trace_indices_from_meta(md):
"""
Returns a list containing indices of the sync traces in the original array
"""
typ = _get_type_from_meta(md)
ntr = int(_get_nchannels_from_meta(md))
if typ == 'nidq':
nsync = int(md.get('snsMnMaXaDw')[-1])
elif typ in ['lf', 'ap']:
nsync = int(md.get('snsApLfSy')[2])
return list(range(ntr - nsync, ntr))
def _get_analog_sync_trace_indices_from_meta(md):
"""
Returns a list containing indices of the sync traces in the original array
"""
typ = _get_type_from_meta(md)
if typ != 'nidq':
return []
tr = md.get('snsMnMaXaDw')
nsa = int(tr[-2])
return list(range(int(sum(tr[0:2])), int(sum(tr[0:2])) + nsa))
def _get_nchannels_from_meta(md):
typ = _get_type_from_meta(md)
if typ == 'nidq':
return int(np.round(np.sum(md.get('snsMnMaXaDw'))))
elif typ in ['lf', 'ap']:
return int(np.round(sum(md.get('snsApLfSy'))))
def _get_fs_from_meta(md):
if md.get('typeThis') == 'imec':
return md.get('imSampRate')
else:
return md.get('niSampRate')
def _get_type_from_meta(md):
"""
Get neuropixel data type (ap, lf or nidq) from metadata
"""
snsApLfSy = md.get('snsApLfSy', [-1, -1, -1])
if snsApLfSy[0] == 0 and snsApLfSy[1] != 0:
return 'lf'
elif snsApLfSy[0] != 0 and snsApLfSy[1] == 0:
return 'ap'
elif snsApLfSy == [-1, -1, -1] and md.get('typeThis', None) == 'nidq':
return 'nidq'
def _map_channels_from_meta(meta_data):
"""
Interpret the meta data string to extract an array of channel positions along the shank
:param meta_data: dictionary output from spikeglx.read_meta_data
:return: dictionary of arrays 'shank', 'col', 'row', 'flag', one value per active site
"""
if 'snsShankMap' in meta_data.keys():
chmap = re.findall(r'([0-9]*:[0-9]*:[0-9]*:[0-9]*)', meta_data['snsShankMap'])
# for digital nidq types, the key exists but does not contain any information
if not chmap:
return {'shank': None, 'col': None, 'row': None, 'flag': None}
# shank#, col#, row#, drawflag
# (nb: drawflag is one should be drawn and considered spatial average)
chmap = np.array([np.float32(cm.split(':')) for cm in chmap])
return {k: chmap[:, v] for (k, v) in {'shank': 0, 'col': 1, 'row': 2, 'flag': 3}.items()}
def _conversion_sample2v_from_meta(meta_data):
"""
Interpret the meta data to extract an array of conversion factors for each channel
so the output data is in Volts
Conversion factor is: int2volt / channelGain
For Lf/Ap interpret the gain string from metadata
For Nidq, repmat the gains from the trace counts in `snsMnMaXaDw`
:param meta_data: dictionary output from spikeglx.read_meta_data
:return: numpy array with one gain value per channel
"""
def int2volts(md):
""" :return: Conversion scalar to Volts. Needs to be combined with channel gains """
if md.get('typeThis', None) == 'imec':
return md.get('imAiRangeMax') / 512
else:
return md.get('niAiRangeMax') / 32768
int2volt = int2volts(meta_data)
# interprets the gain value from the metadata header:
if 'imroTbl' in meta_data.keys(): # binary from the probes: ap or lf
sy_gain = np.ones(int(meta_data['snsApLfSy'][-1]), dtype=np.float32)
# imroTbl has 384 entries regardless of no of channels saved, so need to index by n_ch
n_chn = _get_nchannels_from_meta(meta_data) - 1
# the sync traces are not included in the gain values, so are included for broadcast ops
gain = re.findall(r'([0-9]* [0-9]* [0-9]* [0-9]* [0-9]*)', meta_data['imroTbl'])[:n_chn]
out = {'lf': np.hstack((np.array([1 / np.float32(g.split(' ')[-1]) for g in gain]) *
int2volt, sy_gain)),
'ap': np.hstack((np.array([1 / np.float32(g.split(' ')[-2]) for g in gain]) *
int2volt, sy_gain))}
elif 'niMNGain' in meta_data.keys(): # binary from nidq
gain = np.r_[
np.ones(int(meta_data['snsMnMaXaDw'][0],)) / meta_data['niMNGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][1],)) / meta_data['niMAGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][2], )) * int2volt, # no gain for analog sync
np.ones(int(np.sum(meta_data['snsMnMaXaDw'][3]),))] # no unit for digital sync
out = {'nidq': gain}
return out
def split_sync(sync_tr):
"""
The synchronization channels are stored as single bits, this will split the int16 original
channel into 16 single bits channels
:param sync_tr: numpy vector: samples of synchronisation trace
:return: int8 numpy array of 16 channels, 1 column per sync trace
"""
sync_tr = np.int16(np.copy(sync_tr))
out = np.unpackbits(sync_tr.view(np.uint8)).reshape(sync_tr.size, 16)
out = np.flip(np.roll(out, 8, axis=1), axis=1)
return np.int8(out)
def get_neuropixel_version_from_folder(session_path):
ephys_files = glob_ephys_files(session_path)
return get_neuropixel_version_from_files(ephys_files)
def get_neuropixel_version_from_files(ephys_files):
if any([ef.get('nidq') for ef in ephys_files]):
return '3B'
else:
return '3A'
def glob_ephys_files(session_path, suffix='.meta', ext='bin', recursive=True, bin_exists=True):
"""
From an arbitrary folder (usually session folder) gets the ap and lf files and labels
Associated to the subfolders where they are
the expected folder tree is:
├── 3A
│ ├── imec0
│ ├── sync_testing_g0_t0.imec0.ap.bin
│ │ └── sync_testing_g0_t0.imec0.lf.bin
│ └── imec1
│ ├── sync_testing_g0_t0.imec1.ap.bin
│ └── sync_testing_g0_t0.imec1.lf.bin
└── 3B
├── sync_testing_g0_t0.nidq.bin
├── imec0
│ ├── sync_testing_g0_t0.imec0.ap.bin
│ └── sync_testing_g0_t0.imec0.lf.bin
└── imec1
├── sync_testing_g0_t0.imec1.ap.bin
└── sync_testing_g0_t0.imec1.lf.bin
:param bin_exists:
:param suffix:
:param ext: file extension to look for, default 'bin' but could also be 'meta' or 'ch'
:param recursive:
:param session_path: folder, string or pathlib.Path
:param glob_pattern: pattern to look recursively for (defaults to '*.ap.*bin)
:returns: a list of dictionaries with keys 'ap': apfile, 'lf': lffile and 'label'
"""
def get_label(raw_ephys_apfile):
if raw_ephys_apfile.parts[-2] != 'raw_ephys_data':
return raw_ephys_apfile.parts[-2]
else:
return ''
recurse = '**/' if recursive else ''
ephys_files = []
for raw_ephys_file in Path(session_path).glob(f'{recurse}*.ap{suffix}'):
raw_ephys_apfile = next(raw_ephys_file.parent.glob(raw_ephys_file.stem + f'.*{ext}'), None)
if not raw_ephys_apfile and bin_exists:
continue
elif not raw_ephys_apfile and ext != 'bin':
continue
elif not bin_exists and ext == 'bin':
raw_ephys_apfile = raw_ephys_file.with_suffix('.bin')
# first get the ap file
ephys_files.extend([Bunch({'label': None, 'ap': None, 'lf': None, 'path': None})])
ephys_files[-1].ap = raw_ephys_apfile
# then get the corresponding lf file if it exists
lf_file = raw_ephys_apfile.parent / raw_ephys_apfile.name.replace('.ap.', '.lf.')
ephys_files[-1].lf = next(lf_file.parent.glob(lf_file.stem + f'.*{ext}'), None)
# finally, the label is the current directory except if it is bare in raw_ephys_data
ephys_files[-1].label = get_label(raw_ephys_apfile)
ephys_files[-1].path = raw_ephys_apfile.parent
# for 3b probes, need also to get the nidq dataset type
for raw_ephys_file in Path(session_path).rglob(f'{recurse}*.nidq{suffix}'):
raw_ephys_nidqfile = next(raw_ephys_file.parent.glob(raw_ephys_file.stem + f'.*{ext}'),
None)
if not bin_exists and ext == 'bin':
raw_ephys_nidqfile = raw_ephys_file.with_suffix('.bin')
ephys_files.extend([Bunch({'label': get_label(raw_ephys_file),
'nidq': raw_ephys_nidqfile,
'path': raw_ephys_file.parent})])
return ephys_files
def _mock_spikeglx_file(mock_bin_file, meta_file, ns, nc, sync_depth,
random=False, int2volts=0.6 / 32768, corrupt=False):
"""
For testing purposes, create a binary file with sync pulses to test reading and extraction
"""
meta_file = Path(meta_file)
mock_path_bin = Path(mock_bin_file)
mock_path_meta = mock_path_bin.with_suffix('.meta')
md = read_meta_data(meta_file)
assert meta_file != mock_path_meta
fs = _get_fs_from_meta(md)
fid_source = open(meta_file)
fid_target = open(mock_path_meta, 'w+')
line = fid_source.readline()
while line:
line = fid_source.readline()
if line.startswith('fileSizeBytes'):
line = f'fileSizeBytes={ns * nc * 2}\n'
if line.startswith('fileTimeSecs'):
if corrupt:
line = f'fileTimeSecs={ns / fs + 1.8324}\n'
else:
line = f'fileTimeSecs={ns / fs}\n'
fid_target.write(line)
fid_source.close()
fid_target.close()
if random:
D = np.random.randint(-32767, 32767, size=(ns, nc), dtype=np.int16)
else: # each channel as an int of chn + 1
D = np.tile(np.int16((np.arange(nc) + 1) / int2volts), (ns, 1))
D[0:16, :] = 0
# the last channel is the sync that we fill with
sync = np.int16(2 ** np.float32(np.arange(-1, sync_depth)))
D[:, -1] = 0
D[:sync.size, -1] = sync
with open(mock_path_bin, 'w+') as fid:
D.tofile(fid)
return {'bin_file': mock_path_bin, 'ns': ns, 'nc': nc, 'sync_depth': sync_depth, 'D': D}
def get_hardware_config(config_file):
"""
Reads the neuropixel_wirings.json file containing sync mapping and parameters
:param config_file: folder or json file
:return: dictionary or None
"""
config_file = Path(config_file)
if config_file.is_dir():
config_file = list(config_file.glob('*.wiring.json'))
if config_file:
config_file = config_file[0]
if not config_file or not config_file.exists():
return
with open(config_file) as fid:
par = json.loads(fid.read())
return par
def _sync_map_from_hardware_config(hardware_config):
"""
:param hardware_config: dictonary from json read of neuropixel_wirings.json
:return: dictionary where key names refer to object and values to sync channel index
"""
pin_out = neuropixel.SYNC_PIN_OUT[hardware_config['SYSTEM']]
sync_map = {hardware_config['SYNC_WIRING_DIGITAL'][pin]: pin_out[pin]
for pin in hardware_config['SYNC_WIRING_DIGITAL']
if pin_out[pin] is not None}
analog = hardware_config.get('SYNC_WIRING_ANALOG')
if analog:
sync_map.update({analog[pin]: int(pin[2:]) + 16 for pin in analog})
return sync_map
def get_sync_map(folder_ephys):
hc = get_hardware_config(folder_ephys)
if not hc:
_logger.warning(f"No channel map for {str(folder_ephys)}")
return None
else:
return _sync_map_from_hardware_config(hc)
|
from datetime import datetime, timedelta
from pathlib import Path
from pytils.translit import slugify
from appliances.data import (COMPANY_NAME, MANAGER_NAME, CONTACT_PHONE,
ADDRESS, CATEGORY, GOODS_TYPE, AD_TYPE,
get_description, CONDITION, PHOTO_STORAGE,
START_TIME)
from appliances.root_xml import save_root_xml
from utils import (get_list_of_dicts_from_csv_file, get_datetime,
get_repr_world_time)
def main():
ad_dict_list = []
products = get_list_of_dicts_from_csv_file('Товары.csv')
current_date = get_datetime(START_TIME)
for products in products:
if len(products['Заголовок']) < 20:
title = f'Уплотнитель двери холодильника {products['Заголовок']}'
elif len(products['Заголовок']) < 26:
title = f'Уплотнитель холодильника {products['Заголовок']}'
else:
title = f'Уплотнитель для {products['Заголовок']}'
images = [''.join([PHOTO_STORAGE, x])
for x in products['Ссылки на картинки'].split(', ')]
ad_dict_list.append(
{
'Id': f'{datetime.now().strftime('%Y-%m')}-{slugify(title)}',
'DateBegin': get_repr_world_time(current_date),
'ListingFee': 'Package',
'AdStatus': 'Free',
'ManagerName': MANAGER_NAME,
'ContactPhone': CONTACT_PHONE,
'Address': ADDRESS,
'Category': CATEGORY,
'GoodsType': GOODS_TYPE,
'AdType': AD_TYPE,
'Title': title,
'Description': get_description(products['Заголовок']),
'Price': products['Цена'],
'Condition': CONDITION,
'Images': images,
}
)
current_date += timedelta(minutes=45)
if current_date.hour >= 20 and current_date.minute > 0:
day = current_date.day + 1
current_date = current_date.replace(day=day, hour=8, minute=0)
now = datetime.now().strftime('%d-%m-%Y')
file_name = ''.join([slugify(COMPANY_NAME), '.', now, '.xml'])
file_path = Path('out_xml') / file_name
save_root_xml(file_path, ad_dict_list)
if __name__ == '__main__':
main()
| from datetime import datetime, timedelta
from pathlib import Path
from pytils.translit import slugify
from appliances.data import (COMPANY_NAME, MANAGER_NAME, CONTACT_PHONE,
ADDRESS, CATEGORY, GOODS_TYPE, AD_TYPE,
get_description, CONDITION, PHOTO_STORAGE,
START_TIME)
from appliances.root_xml import save_root_xml
from utils import (get_list_of_dicts_from_csv_file, get_datetime,
get_repr_world_time)
def main():
ad_dict_list = []
products = get_list_of_dicts_from_csv_file('Товары.csv')
current_date = get_datetime(START_TIME)
for products in products:
if len(products['Заголовок']) < 20:
title = f'Уплотнитель двери холодильника {products["Заголовок"]}'
elif len(products['Заголовок']) < 26:
title = f'Уплотнитель холодильника {products["Заголовок"]}'
else:
title = f'Уплотнитель для {products["Заголовок"]}'
images = [''.join([PHOTO_STORAGE, x])
for x in products['Ссылки на картинки'].split(', ')]
ad_dict_list.append(
{
'Id': f'{datetime.now().strftime("%Y-%m")}-{slugify(title)}',
'DateBegin': get_repr_world_time(current_date),
'ListingFee': 'Package',
'AdStatus': 'Free',
'ManagerName': MANAGER_NAME,
'ContactPhone': CONTACT_PHONE,
'Address': ADDRESS,
'Category': CATEGORY,
'GoodsType': GOODS_TYPE,
'AdType': AD_TYPE,
'Title': title,
'Description': get_description(products['Заголовок']),
'Price': products['Цена'],
'Condition': CONDITION,
'Images': images,
}
)
current_date += timedelta(minutes=45)
if current_date.hour >= 20 and current_date.minute > 0:
day = current_date.day + 1
current_date = current_date.replace(day=day, hour=8, minute=0)
now = datetime.now().strftime('%d-%m-%Y')
file_name = ''.join([slugify(COMPANY_NAME), '.', now, '.xml'])
file_path = Path('out_xml') / file_name
save_root_xml(file_path, ad_dict_list)
if __name__ == '__main__':
main()
|
import os
import requests
import sqlite3,csv
import click
from flask import current_app
#from flask import cli (inutile ?)
from flask.cli import with_appcontext
'''
Mode BOURRIN
Je fais une connection globale pour l'application, contrairement au tuto qui fait une connexion dans g (donc si j'ai bien compris par request)
donc problèmes potentiels de concurrence -> lenteur et/ou écritures concurrentes
'''
def get_db():
#current_app.logger.debug("Début d'appel à get_db()")
if current_app.config.get('db') is None:
current_app.config['db'] = sqlite3.connect(
current_app.DB_FILE,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False # pour éviter l'erreur "sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread" mais mystère sur la concurrence
)
current_app.config['db'].row_factory = sqlite3.Row
return current_app.config['db']
# def close_db(e=None):
# db = current_app.config.get('db')
# if db is not None:
# db.close()
# current_app.logger.debug("Fin d'appel à close_db()")
def init_db():
# suppression des fichiers
try:
os.remove(current_app.DB_FILE)
except: pass
try:
for dir in [current_app.staticDownloadPath,current_app.downloadsPath]:
files=os.listdir(dir)
for f in files:
os.remove(dir+'/'+f)
except: pass
db = get_db()
with current_app.open_resource('db/schema.sql') as f:
db.executescript(f.read().decode('utf8'))
def insertDB(url,fileName,save=True):
db=get_db()
db.execute(
"insert into downloads(url,fileName) "
"values(?,?) ",(url,fileName)
)
db.commit()
if save:
saveDB()
def deleteDB(fileName,save=True):
db=get_db()
db.execute(
"delete from downloads "
"where fileName=? ",(fileName,)
)
db.commit()
if save:
saveDB()
colonnesSauvegardees=['url','fileName']
urlSauvegardeHook='/save-downloads.php'
def saveDB():
db=get_db()
lignes = db.execute(
'SELECT * '
'FROM downloads '
'ORDER BY id'
).fetchall()
txt=''
for colonne in colonnesSauvegardees:
if colonne!=colonnesSauvegardees[0]:
txt+=','
txt+= f'"{colonne}"'
txt+='\n'
for ligne in lignes:
for colonne in colonnesSauvegardees:
if colonne!=colonnesSauvegardees[0]:
txt+=','
txt+= f'"{ligne[colonne]}"'
txt+='\n'
fileSaveDownloads=current_app.instance_path+'/'+current_app.fileSaveDownloads
with open(fileSaveDownloads,'w') as f:
f.write(txt)
# sauvegarde externe du fichier fileSaveDownloads
url=current_app.urlSaveDownloads+urlSauvegardeHook
content=bytes(txt,'utf8')
datas={'file':current_app.fileSaveDownloads,'content':content}
p=requests.post(url,datas)
def getFileDownlodsExternal():
url=current_app.urlSaveDownloads+'/'+current_app.fileSaveDownloads
res=requests.get(url).content.decode('utf8')
return res
def getCsvFileSaveDownloads(txt=None):
if txt is None: # sauvegarde texte (txt) dans instance
try:
with open(current_app.instance_path+'/'+current_app.fileSaveDownloads) as f:
return list(csv.DictReader(f))
except:
return None
else: # lire le txt issu normalement de la sauvegarde externe
return list(csv.DictReader(txt.splitlines()))
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Supprime la base et les fichiers, puis crée la structure de la base"""
init_db()
click.echo('Remise à zéro des fichiers et de la base.')
# "register" des fonctions
def init_app(app):
# app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
app.cli.add_command(showDB)
app.cli.add_command(reloadDownloads)
def getTableContentString(resultSet,colonnes):
widths = []
tavnit = '|'
separator = '+'
for entete in colonnes:
widths.append(len(entete))
for ligne in resultSet:
index=0
for colonne in colonnes:
widths[index]=max(widths[index],len(str(ligne[colonne])))
index+=1
for w in widths:
tavnit += " %-"+"%ss |" % (w,)
separator += '-'*w + '--+'
res=separator+'\n'
res+= tavnit % tuple(colonnes)+'\n'
res+= separator+'\n'
for ligne in resultSet:
tab=[]
for colonne in colonnes:
tab.append(ligne[colonne])
res+= tavnit % tuple(tab)+'\n'
res+= separator+'\n'
res+= f"{len(resultSet)} ligne{"" if len(resultSet)<=1 else "s"}"
return res
@click.command('show-db')
@with_appcontext
def showDB():
"""Affiche le contenu de la base"""
db=get_db()
tout = db.execute(
'SELECT * '
'FROM downloads'
).fetchall()
colonnes=['url','fileName']
click.echo(getTableContentString(tout,colonnes))
from ..download import telecharger
@click.command('reload-downloads')
@with_appcontext
def reloadDownloads():
'''
Vérifie la cohérence des fichiers dans les "downloads", de la base
et recharge ceux qui manquent en fonction de la sauvegarde externe
'''
filesStatic=os.listdir(current_app.staticDownloadPath)
filesInstance=os.listdir(current_app.downloadsPath)
sauvegardeExterne=getCsvFileSaveDownloads(getFileDownlodsExternal())
db=get_db()
# lire static et supprimer les liens cassés
for f in filesStatic:
if f not in filesInstance:
current_app.logger.info(f'Supression de "{f}" dans static')
os.remove(current_app.staticDownloadPath+'/'+f)
# lire instance et supprimer les fichiers qui ne sont pas dans static
for f in filesInstance:
if f not in filesStatic:
current_app.logger.info(f'Supression de "{f}" dans instance')
os.remove(current_app.downloadsPath+'/'+f)
# remettre la base en état
filesInstance=os.listdir(current_app.downloadsPath)
tout = db.execute(
'SELECT * '
'FROM downloads '
).fetchall()
for ligne in tout:
if ligne['fileName'] not in filesStatic:
deleteDB(ligne['fileName'],save=False)
# lire la sauvegarde externe et recharger ce qui manque
for f in sauvegardeExterne:
cherche = db.execute(
'SELECT * '
'FROM downloads '
'WHERE fileName=?',(f['fileName'],)
).fetchall()
if len(cherche)!=1:
url=f['fileName']
current_app.logger.info(f'Téléchargement de {url}')
error,fileName= telecharger(f['url'],gererSession=False)
if error is None:
insertDB(f['url'],f['fileName'])
| import os
import requests
import sqlite3,csv
import click
from flask import current_app
#from flask import cli (inutile ?)
from flask.cli import with_appcontext
'''
Mode BOURRIN
Je fais une connection globale pour l'application, contrairement au tuto qui fait une connexion dans g (donc si j'ai bien compris par request)
donc problèmes potentiels de concurrence -> lenteur et/ou écritures concurrentes
'''
def get_db():
#current_app.logger.debug("Début d'appel à get_db()")
if current_app.config.get('db') is None:
current_app.config['db'] = sqlite3.connect(
current_app.DB_FILE,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False # pour éviter l'erreur "sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread" mais mystère sur la concurrence
)
current_app.config['db'].row_factory = sqlite3.Row
return current_app.config['db']
# def close_db(e=None):
# db = current_app.config.get('db')
# if db is not None:
# db.close()
# current_app.logger.debug("Fin d'appel à close_db()")
def init_db():
# suppression des fichiers
try:
os.remove(current_app.DB_FILE)
except: pass
try:
for dir in [current_app.staticDownloadPath,current_app.downloadsPath]:
files=os.listdir(dir)
for f in files:
os.remove(dir+'/'+f)
except: pass
db = get_db()
with current_app.open_resource('db/schema.sql') as f:
db.executescript(f.read().decode('utf8'))
def insertDB(url,fileName,save=True):
db=get_db()
db.execute(
"insert into downloads(url,fileName) "
"values(?,?) ",(url,fileName)
)
db.commit()
if save:
saveDB()
def deleteDB(fileName,save=True):
db=get_db()
db.execute(
"delete from downloads "
"where fileName=? ",(fileName,)
)
db.commit()
if save:
saveDB()
colonnesSauvegardees=['url','fileName']
urlSauvegardeHook='/save-downloads.php'
def saveDB():
db=get_db()
lignes = db.execute(
'SELECT * '
'FROM downloads '
'ORDER BY id'
).fetchall()
txt=''
for colonne in colonnesSauvegardees:
if colonne!=colonnesSauvegardees[0]:
txt+=','
txt+= f'"{colonne}"'
txt+='\n'
for ligne in lignes:
for colonne in colonnesSauvegardees:
if colonne!=colonnesSauvegardees[0]:
txt+=','
txt+= f'"{ligne[colonne]}"'
txt+='\n'
fileSaveDownloads=current_app.instance_path+'/'+current_app.fileSaveDownloads
with open(fileSaveDownloads,'w') as f:
f.write(txt)
# sauvegarde externe du fichier fileSaveDownloads
url=current_app.urlSaveDownloads+urlSauvegardeHook
content=bytes(txt,'utf8')
datas={'file':current_app.fileSaveDownloads,'content':content}
p=requests.post(url,datas)
def getFileDownlodsExternal():
url=current_app.urlSaveDownloads+'/'+current_app.fileSaveDownloads
res=requests.get(url).content.decode('utf8')
return res
def getCsvFileSaveDownloads(txt=None):
if txt is None: # sauvegarde texte (txt) dans instance
try:
with open(current_app.instance_path+'/'+current_app.fileSaveDownloads) as f:
return list(csv.DictReader(f))
except:
return None
else: # lire le txt issu normalement de la sauvegarde externe
return list(csv.DictReader(txt.splitlines()))
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Supprime la base et les fichiers, puis crée la structure de la base"""
init_db()
click.echo('Remise à zéro des fichiers et de la base.')
# "register" des fonctions
def init_app(app):
# app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
app.cli.add_command(showDB)
app.cli.add_command(reloadDownloads)
def getTableContentString(resultSet,colonnes):
widths = []
tavnit = '|'
separator = '+'
for entete in colonnes:
widths.append(len(entete))
for ligne in resultSet:
index=0
for colonne in colonnes:
widths[index]=max(widths[index],len(str(ligne[colonne])))
index+=1
for w in widths:
tavnit += " %-"+"%ss |" % (w,)
separator += '-'*w + '--+'
res=separator+'\n'
res+= tavnit % tuple(colonnes)+'\n'
res+= separator+'\n'
for ligne in resultSet:
tab=[]
for colonne in colonnes:
tab.append(ligne[colonne])
res+= tavnit % tuple(tab)+'\n'
res+= separator+'\n'
res+= f"{len(resultSet)} ligne{'' if len(resultSet)<=1 else 's'}"
return res
@click.command('show-db')
@with_appcontext
def showDB():
"""Affiche le contenu de la base"""
db=get_db()
tout = db.execute(
'SELECT * '
'FROM downloads'
).fetchall()
colonnes=['url','fileName']
click.echo(getTableContentString(tout,colonnes))
from ..download import telecharger
@click.command('reload-downloads')
@with_appcontext
def reloadDownloads():
'''
Vérifie la cohérence des fichiers dans les "downloads", de la base
et recharge ceux qui manquent en fonction de la sauvegarde externe
'''
filesStatic=os.listdir(current_app.staticDownloadPath)
filesInstance=os.listdir(current_app.downloadsPath)
sauvegardeExterne=getCsvFileSaveDownloads(getFileDownlodsExternal())
db=get_db()
# lire static et supprimer les liens cassés
for f in filesStatic:
if f not in filesInstance:
current_app.logger.info(f'Supression de "{f}" dans static')
os.remove(current_app.staticDownloadPath+'/'+f)
# lire instance et supprimer les fichiers qui ne sont pas dans static
for f in filesInstance:
if f not in filesStatic:
current_app.logger.info(f'Supression de "{f}" dans instance')
os.remove(current_app.downloadsPath+'/'+f)
# remettre la base en état
filesInstance=os.listdir(current_app.downloadsPath)
tout = db.execute(
'SELECT * '
'FROM downloads '
).fetchall()
for ligne in tout:
if ligne['fileName'] not in filesStatic:
deleteDB(ligne['fileName'],save=False)
# lire la sauvegarde externe et recharger ce qui manque
for f in sauvegardeExterne:
cherche = db.execute(
'SELECT * '
'FROM downloads '
'WHERE fileName=?',(f['fileName'],)
).fetchall()
if len(cherche)!=1:
url=f['fileName']
current_app.logger.info(f'Téléchargement de {url}')
error,fileName= telecharger(f['url'],gererSession=False)
if error is None:
insertDB(f['url'],f['fileName'])
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from pathlib import Path
from typing import Any, Dict, cast
from unittest.case import TestCase
from lisa import secret, variable
from lisa.util import LisaException, constants
from lisa.util.logger import get_logger
class VariableTestCase(TestCase):
def setUp(self) -> None:
secret.reset()
def test_in_env(self) -> None:
os.environ["LISA_normal_value"] = "value_from_env"
os.environ["S_LISA_normal_entry"] = "s_value_from_env"
variables = self._get_default_variables()
variables.update(variable._load_from_env())
data = self._replace_and_validate(variables, {"normal_entry": "******"})
self.assertEqual("value_from_env", data["nested"]["normal_value"])
self.assertEqual("s_value_from_env", data["normal_entry"])
def test_in_pair(self) -> None:
pair1 = "normal_value:nv_from_pair"
pair2 = "S:normal_entry:s_value_from_env"
variables = self._get_default_variables()
variables.update(variable.add_secrets_from_pairs([pair1, pair2]))
data = self._replace_and_validate(variables, {"normal_entry": "******"})
self.assertEqual("nv_from_pair", data["nested"]["normal_value"])
self.assertEqual("s_value_from_env", data["normal_entry"])
def test_in_normal_file_outside_secret(self) -> None:
self._test_files(
"variable_normal.yml",
True,
{
"normal_value": "******",
"normal_entry": "******",
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
)
def test_in_normal_file(self) -> None:
self._test_files(
"variable_normal.yml",
False,
{
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
)
def test_in_secret_file_outside_secret(self) -> None:
self._test_files(
"variable_secret.yml",
True,
{
"normal_value": "******",
"normal_entry": "******",
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
)
def test_in_secret_file(self) -> None:
self._test_files(
"variable_secret.yml",
False,
{},
)
def test_in_runbook_format_file(self) -> None:
runbook_data: Dict[str, Any] = {"variable": [{"file": "variable_normal.yml"}]}
data = self._test_runbook_file_entry(
runbook_data,
{
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
{},
)
self.assertEqual("12345678-abcd-efab-cdef-1234567890ab", data["list"][0])
self.assertEqual(1234567890, data["list"][1]["dictInList"])
self.assertEqual("abcdefgh", data["headtail"])
self.assertEqual("normal_value", data["nested"]["normal_value"])
self.assertEqual("entry_value", data["normal_entry"])
def test_in_variable_path_with_variable(self) -> None:
runbook_data: Dict[str, Any] = {
"variable": [
{"file": "variable_$(var_in_var1).yml"},
{"name": "var_in_var1", "value": "$(var_in_var2)"},
{"name": "var_in_var2", "value": "normal"},
]
}
data = self._test_runbook_file_entry(
runbook_data,
{
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
{},
)
self.assertEqual("12345678-abcd-efab-cdef-1234567890ab", data["list"][0])
self.assertEqual(1234567890, data["list"][1]["dictInList"])
self.assertEqual("abcdefgh", data["headtail"])
self.assertEqual("normal_value", data["nested"]["normal_value"])
self.assertEqual("entry_value", data["normal_entry"])
def test_in_runbook_path_with_variable(self) -> None:
runbook_data: Dict[str, Any] = {
"variable": [{"file": "variable_$(var_in_cmd).yml"}]
}
data = self._test_runbook_file_entry(
runbook_data,
{
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
{"var_in_cmd": variable.VariableEntry("normal", False)},
)
self.assertEqual("12345678-abcd-efab-cdef-1234567890ab", data["list"][0])
self.assertEqual(1234567890, data["list"][1]["dictInList"])
self.assertEqual("abcdefgh", data["headtail"])
self.assertEqual("normal_value", data["nested"]["normal_value"])
self.assertEqual("entry_value", data["normal_entry"])
def test_in_runbook_format_variable(self) -> None:
runbook_data: Dict[str, Any] = {
"variable": [
{"name": "normal_value", "value": "normal_value"},
{"name": "normal_entry", "value": {"value": "entry_value"}},
{
"name": "secret_guid",
"value": {
"value": "12345678-abcd-efab-cdef-1234567890ab",
"is_secret": True,
"mask": "guid",
},
},
{
"name": "secret_int",
"value": {
"value": 1234567890,
"is_secret": True,
"mask": "headtail",
},
},
{
"name": "secret_head_tail",
"value": {
"value": "abcdefgh",
"is_secret": True,
"mask": "headtail",
},
},
]
}
data = self._test_runbook_file_entry(
runbook_data,
{
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
{},
)
self.assertEqual("12345678-abcd-efab-cdef-1234567890ab", data["list"][0])
self.assertEqual(1234567890, data["list"][1]["dictInList"])
self.assertEqual("abcdefgh", data["headtail"])
self.assertEqual("normal_value", data["nested"]["normal_value"])
self.assertEqual("entry_value", data["normal_entry"])
def test_in_runbook_ordered(self) -> None:
runbook_data: Dict[str, Any] = {
"variable": [
{"file": "variable_normal.yml"},
{"name": "normal_value", "value": "normal_value1"},
{"name": "normal_entry", "value": {"value": "entry_value1"}},
{
"name": "secret_guid",
"value": {
"value": "12345678-abcd-efab-cdef-1234567890ac",
"is_secret": True,
"mask": "guid",
},
},
{
"name": "secret_int",
"value": {
"value": 1234567891,
"is_secret": True,
"mask": "headtail",
},
},
{
"name": "secret_head_tail",
"value": {
"value": "abcdefgi",
"is_secret": True,
"mask": "headtail",
},
},
]
}
data = self._test_runbook_file_entry(
runbook_data,
{
"secret_guid": "12345678-****-****-****-********90ac",
"secret_int": "1****1",
"secret_head_tail": "a****i",
},
{},
)
self.assertEqual("12345678-abcd-efab-cdef-1234567890ac", data["list"][0])
self.assertEqual(1234567891, data["list"][1]["dictInList"])
self.assertEqual("abcdefgi", data["headtail"])
self.assertEqual("normal_value1", data["nested"]["normal_value"])
self.assertEqual("entry_value1", data["normal_entry"])
def test_variable_not_found(self) -> None:
variables = self._get_default_variables()
with self.assertRaises(LisaException) as cm:
variable.replace_variables({"item": "$(notexists)"}, variables)
self.assertIsInstance(cm.exception, LisaException)
self.assertIn("cannot find variable", str(cm.exception))
def test_variable_not_used(self) -> None:
variables = self._get_default_variables()
variables["unused"] = variable.VariableEntry("value")
self.assertFalse(variables["unused"].is_used)
self.assertFalse(variables["normal_value"].is_used)
self._replace_and_validate(variables, {"normal_entry": "original"})
self.assertFalse(variables["unused"].is_used)
self.assertTrue(variables["normal_value"].is_used)
def test_invalid_file_extension(self) -> None:
variables = self._get_default_variables()
with self.assertRaises(LisaException) as cm:
variables.update(variable._load_from_file("file.xml"))
self.assertIsInstance(cm.exception, LisaException)
self.assertIn("variable support only yaml and yml", str(cm.exception))
def _test_runbook_file_entry(
self,
data: Any,
secret_variables: Dict[str, str],
current_variables: Dict[str, variable.VariableEntry],
) -> Any:
constants.RUNBOOK_PATH = Path(__file__).parent
variables = self._get_default_variables()
variables.update(variable._load_from_runbook(data, current_variables))
data = self._replace_and_validate(variables, secret_variables)
return data
def _test_files(
self, file_name: str, all_secret: bool, secret_variables: Dict[str, str]
) -> Any:
constants.RUNBOOK_PATH = Path(__file__).parent
variables = self._get_default_variables()
variables.update(variable._load_from_file(file_name, is_secret=all_secret))
data = self._replace_and_validate(variables, secret_variables)
self.assertEqual("normal_value", data["nested"]["normal_value"])
self.assertEqual("entry_value", data["normal_entry"])
self.assertEqual("12345678-abcd-efab-cdef-1234567890ab", data["list"][0])
self.assertEqual(1234567890, data["list"][1]["dictInList"])
self.assertEqual("abcdefgh", data["headtail"])
return data
def _verify_secret(
self, variables: Dict[str, variable.VariableEntry], secrets: Dict[str, str]
) -> None:
log = get_logger()
copied_variables = dict(variables)
for secret_name, expected_value in secrets.items():
secret_name = secret_name.lower()
value = copied_variables[secret_name].data
del copied_variables[secret_name]
with self.assertLogs("lisa") as cm:
log.info(f"MUST_SECRET[{value}]")
self.assertListEqual(
[f"INFO:lisa:MUST_SECRET[{expected_value}]"],
cm.output,
f"key: {secret_name}, value: {value}, "
f"expected: {expected_value} should be secret",
)
for key, unsecured_value in copied_variables.items():
with self.assertLogs("lisa") as cm:
log.info(f"MUST_NOT_SECRET[{unsecured_value}]")
self.assertListEqual(
[f"INFO:lisa:MUST_NOT_SECRET[{unsecured_value}]"],
cm.output,
f"key: {key}, value: {unsecured_value} shouldn't be secret",
)
def _get_default_variables(self) -> Dict[str, variable.VariableEntry]:
data = {
"normal_value": variable.VariableEntry("original"),
"normal_entry": variable.VariableEntry("original"),
"secret_guid": variable.VariableEntry("original"),
"secret_int": variable.VariableEntry("original"),
"secret_head_tail": variable.VariableEntry("original"),
}
return data
def _replace_and_validate(
self, variables: Dict[str, variable.VariableEntry], secrets: Dict[str, str]
) -> Dict[str, Any]:
data = variable.replace_variables(self._get_default_data(), variables=variables)
assert isinstance(data, dict), f"actual: {type(data)}"
self.assertDictEqual(
{
"keep": "normal",
"normal_entry": variables["normal_entry"].data,
"headtail": variables["secret_head_tail"].data,
"nested": {"normal_value": variables["normal_value"].data},
"list": [
variables["secret_guid"].data,
{"dictInList": variables["secret_int"].data},
],
"two_entries": f"1{variables["normal_entry"].data}"
f"2-$-()3{variables["normal_entry"].data}4",
},
data,
)
self._verify_secret(variables, secrets=secrets)
data = cast(Dict[str, Any], data)
return data
def _get_default_data(self) -> Dict[str, Any]:
data = {
"keep": "normal",
"normal_entry": "$(normal_entry)",
"headtail": "$(secret_head_tail)",
"nested": {"normal_value": "$(normal_value)"},
"list": ["$(secret_guid)", {"dictInList": "$(secret_int)"}],
"two_entries": "1$(normal_entry)2-$-()3$(normal_entry)4",
}
return data
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from pathlib import Path
from typing import Any, Dict, cast
from unittest.case import TestCase
from lisa import secret, variable
from lisa.util import LisaException, constants
from lisa.util.logger import get_logger
class VariableTestCase(TestCase):
def setUp(self) -> None:
secret.reset()
def test_in_env(self) -> None:
os.environ["LISA_normal_value"] = "value_from_env"
os.environ["S_LISA_normal_entry"] = "s_value_from_env"
variables = self._get_default_variables()
variables.update(variable._load_from_env())
data = self._replace_and_validate(variables, {"normal_entry": "******"})
self.assertEqual("value_from_env", data["nested"]["normal_value"])
self.assertEqual("s_value_from_env", data["normal_entry"])
def test_in_pair(self) -> None:
pair1 = "normal_value:nv_from_pair"
pair2 = "S:normal_entry:s_value_from_env"
variables = self._get_default_variables()
variables.update(variable.add_secrets_from_pairs([pair1, pair2]))
data = self._replace_and_validate(variables, {"normal_entry": "******"})
self.assertEqual("nv_from_pair", data["nested"]["normal_value"])
self.assertEqual("s_value_from_env", data["normal_entry"])
def test_in_normal_file_outside_secret(self) -> None:
self._test_files(
"variable_normal.yml",
True,
{
"normal_value": "******",
"normal_entry": "******",
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
)
def test_in_normal_file(self) -> None:
self._test_files(
"variable_normal.yml",
False,
{
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
)
def test_in_secret_file_outside_secret(self) -> None:
self._test_files(
"variable_secret.yml",
True,
{
"normal_value": "******",
"normal_entry": "******",
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
)
def test_in_secret_file(self) -> None:
self._test_files(
"variable_secret.yml",
False,
{},
)
def test_in_runbook_format_file(self) -> None:
runbook_data: Dict[str, Any] = {"variable": [{"file": "variable_normal.yml"}]}
data = self._test_runbook_file_entry(
runbook_data,
{
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
{},
)
self.assertEqual("12345678-abcd-efab-cdef-1234567890ab", data["list"][0])
self.assertEqual(1234567890, data["list"][1]["dictInList"])
self.assertEqual("abcdefgh", data["headtail"])
self.assertEqual("normal_value", data["nested"]["normal_value"])
self.assertEqual("entry_value", data["normal_entry"])
def test_in_variable_path_with_variable(self) -> None:
runbook_data: Dict[str, Any] = {
"variable": [
{"file": "variable_$(var_in_var1).yml"},
{"name": "var_in_var1", "value": "$(var_in_var2)"},
{"name": "var_in_var2", "value": "normal"},
]
}
data = self._test_runbook_file_entry(
runbook_data,
{
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
{},
)
self.assertEqual("12345678-abcd-efab-cdef-1234567890ab", data["list"][0])
self.assertEqual(1234567890, data["list"][1]["dictInList"])
self.assertEqual("abcdefgh", data["headtail"])
self.assertEqual("normal_value", data["nested"]["normal_value"])
self.assertEqual("entry_value", data["normal_entry"])
def test_in_runbook_path_with_variable(self) -> None:
runbook_data: Dict[str, Any] = {
"variable": [{"file": "variable_$(var_in_cmd).yml"}]
}
data = self._test_runbook_file_entry(
runbook_data,
{
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
{"var_in_cmd": variable.VariableEntry("normal", False)},
)
self.assertEqual("12345678-abcd-efab-cdef-1234567890ab", data["list"][0])
self.assertEqual(1234567890, data["list"][1]["dictInList"])
self.assertEqual("abcdefgh", data["headtail"])
self.assertEqual("normal_value", data["nested"]["normal_value"])
self.assertEqual("entry_value", data["normal_entry"])
def test_in_runbook_format_variable(self) -> None:
runbook_data: Dict[str, Any] = {
"variable": [
{"name": "normal_value", "value": "normal_value"},
{"name": "normal_entry", "value": {"value": "entry_value"}},
{
"name": "secret_guid",
"value": {
"value": "12345678-abcd-efab-cdef-1234567890ab",
"is_secret": True,
"mask": "guid",
},
},
{
"name": "secret_int",
"value": {
"value": 1234567890,
"is_secret": True,
"mask": "headtail",
},
},
{
"name": "secret_head_tail",
"value": {
"value": "abcdefgh",
"is_secret": True,
"mask": "headtail",
},
},
]
}
data = self._test_runbook_file_entry(
runbook_data,
{
"secret_guid": "12345678-****-****-****-********90ab",
"secret_int": "1****0",
"secret_head_tail": "a****h",
},
{},
)
self.assertEqual("12345678-abcd-efab-cdef-1234567890ab", data["list"][0])
self.assertEqual(1234567890, data["list"][1]["dictInList"])
self.assertEqual("abcdefgh", data["headtail"])
self.assertEqual("normal_value", data["nested"]["normal_value"])
self.assertEqual("entry_value", data["normal_entry"])
def test_in_runbook_ordered(self) -> None:
runbook_data: Dict[str, Any] = {
"variable": [
{"file": "variable_normal.yml"},
{"name": "normal_value", "value": "normal_value1"},
{"name": "normal_entry", "value": {"value": "entry_value1"}},
{
"name": "secret_guid",
"value": {
"value": "12345678-abcd-efab-cdef-1234567890ac",
"is_secret": True,
"mask": "guid",
},
},
{
"name": "secret_int",
"value": {
"value": 1234567891,
"is_secret": True,
"mask": "headtail",
},
},
{
"name": "secret_head_tail",
"value": {
"value": "abcdefgi",
"is_secret": True,
"mask": "headtail",
},
},
]
}
data = self._test_runbook_file_entry(
runbook_data,
{
"secret_guid": "12345678-****-****-****-********90ac",
"secret_int": "1****1",
"secret_head_tail": "a****i",
},
{},
)
self.assertEqual("12345678-abcd-efab-cdef-1234567890ac", data["list"][0])
self.assertEqual(1234567891, data["list"][1]["dictInList"])
self.assertEqual("abcdefgi", data["headtail"])
self.assertEqual("normal_value1", data["nested"]["normal_value"])
self.assertEqual("entry_value1", data["normal_entry"])
def test_variable_not_found(self) -> None:
variables = self._get_default_variables()
with self.assertRaises(LisaException) as cm:
variable.replace_variables({"item": "$(notexists)"}, variables)
self.assertIsInstance(cm.exception, LisaException)
self.assertIn("cannot find variable", str(cm.exception))
def test_variable_not_used(self) -> None:
variables = self._get_default_variables()
variables["unused"] = variable.VariableEntry("value")
self.assertFalse(variables["unused"].is_used)
self.assertFalse(variables["normal_value"].is_used)
self._replace_and_validate(variables, {"normal_entry": "original"})
self.assertFalse(variables["unused"].is_used)
self.assertTrue(variables["normal_value"].is_used)
def test_invalid_file_extension(self) -> None:
variables = self._get_default_variables()
with self.assertRaises(LisaException) as cm:
variables.update(variable._load_from_file("file.xml"))
self.assertIsInstance(cm.exception, LisaException)
self.assertIn("variable support only yaml and yml", str(cm.exception))
def _test_runbook_file_entry(
self,
data: Any,
secret_variables: Dict[str, str],
current_variables: Dict[str, variable.VariableEntry],
) -> Any:
constants.RUNBOOK_PATH = Path(__file__).parent
variables = self._get_default_variables()
variables.update(variable._load_from_runbook(data, current_variables))
data = self._replace_and_validate(variables, secret_variables)
return data
def _test_files(
self, file_name: str, all_secret: bool, secret_variables: Dict[str, str]
) -> Any:
constants.RUNBOOK_PATH = Path(__file__).parent
variables = self._get_default_variables()
variables.update(variable._load_from_file(file_name, is_secret=all_secret))
data = self._replace_and_validate(variables, secret_variables)
self.assertEqual("normal_value", data["nested"]["normal_value"])
self.assertEqual("entry_value", data["normal_entry"])
self.assertEqual("12345678-abcd-efab-cdef-1234567890ab", data["list"][0])
self.assertEqual(1234567890, data["list"][1]["dictInList"])
self.assertEqual("abcdefgh", data["headtail"])
return data
def _verify_secret(
self, variables: Dict[str, variable.VariableEntry], secrets: Dict[str, str]
) -> None:
log = get_logger()
copied_variables = dict(variables)
for secret_name, expected_value in secrets.items():
secret_name = secret_name.lower()
value = copied_variables[secret_name].data
del copied_variables[secret_name]
with self.assertLogs("lisa") as cm:
log.info(f"MUST_SECRET[{value}]")
self.assertListEqual(
[f"INFO:lisa:MUST_SECRET[{expected_value}]"],
cm.output,
f"key: {secret_name}, value: {value}, "
f"expected: {expected_value} should be secret",
)
for key, unsecured_value in copied_variables.items():
with self.assertLogs("lisa") as cm:
log.info(f"MUST_NOT_SECRET[{unsecured_value}]")
self.assertListEqual(
[f"INFO:lisa:MUST_NOT_SECRET[{unsecured_value}]"],
cm.output,
f"key: {key}, value: {unsecured_value} shouldn't be secret",
)
def _get_default_variables(self) -> Dict[str, variable.VariableEntry]:
data = {
"normal_value": variable.VariableEntry("original"),
"normal_entry": variable.VariableEntry("original"),
"secret_guid": variable.VariableEntry("original"),
"secret_int": variable.VariableEntry("original"),
"secret_head_tail": variable.VariableEntry("original"),
}
return data
def _replace_and_validate(
self, variables: Dict[str, variable.VariableEntry], secrets: Dict[str, str]
) -> Dict[str, Any]:
data = variable.replace_variables(self._get_default_data(), variables=variables)
assert isinstance(data, dict), f"actual: {type(data)}"
self.assertDictEqual(
{
"keep": "normal",
"normal_entry": variables["normal_entry"].data,
"headtail": variables["secret_head_tail"].data,
"nested": {"normal_value": variables["normal_value"].data},
"list": [
variables["secret_guid"].data,
{"dictInList": variables["secret_int"].data},
],
"two_entries": f"1{variables['normal_entry'].data}"
f"2-$-()3{variables['normal_entry'].data}4",
},
data,
)
self._verify_secret(variables, secrets=secrets)
data = cast(Dict[str, Any], data)
return data
def _get_default_data(self) -> Dict[str, Any]:
data = {
"keep": "normal",
"normal_entry": "$(normal_entry)",
"headtail": "$(secret_head_tail)",
"nested": {"normal_value": "$(normal_value)"},
"list": ["$(secret_guid)", {"dictInList": "$(secret_int)"}],
"two_entries": "1$(normal_entry)2-$-()3$(normal_entry)4",
}
return data
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-05-08 20:51
import functools
import os
from typing import Union, Any, List
import torch
from alnlp.modules.util import lengths_to_mask
from torch import nn
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.data import DataLoader
from elit.common.constant import UNK, IDX
from elit.common.dataset import PadSequenceDataLoader
from elit.common.structure import History
from elit.common.torch_component import TorchComponent
from elit.common.transform import LowerCase, FieldLength, PunctuationMask, TransformList
from elit.common.vocab import Vocab, VocabCounter
from elit.common.conll import CoNLLWord, CoNLLSentence
from elit.components.parsers.constituency.treecrf import CRF2oDependency
from elit.components.parsers.second_order.model import DependencyModel
from elit.components.parsers.second_order.treecrf_decoder import TreeCRFDecoder
from elit.datasets.parsing.conll_dataset import CoNLLParsingDataset, append_bos, get_sibs
from elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding, ContextualWordEmbeddingModule
from elit.layers.embeddings.embedding import Embedding, EmbeddingList, ConcatModuleList
from elit.layers.embeddings.util import index_word2vec_with_vocab
from elit.layers.transformers.pt_imports import AutoModel_
from elit.layers.transformers.utils import build_optimizer_scheduler_with_transformer
from elit.metrics.parsing.attachmentscore import AttachmentScore
from elit.transform.transformer_tokenizer import TransformerSequenceTokenizer
from elit.utils.time_util import CountdownTimer
from elit.common.util import merge_locals_kwargs, merge_dict, reorder
class TreeConditionalRandomFieldDependencyParser(TorchComponent):
def __init__(self) -> None:
super().__init__()
self.model: DependencyModel = self.model
self._transformer_transform = None
def predict(self, data: Any, batch_size=None, batch_max_tokens=None, output_format='conllx', **kwargs):
if not data:
return []
use_pos = self.use_pos
flat = self.input_is_flat(data, use_pos)
if flat:
data = [data]
samples = self.build_samples(data, use_pos)
if not batch_max_tokens:
batch_max_tokens = self.config.batch_max_tokens
if not batch_size:
batch_size = self.config.batch_size
dataloader = self.build_dataloader(samples,
device=self.devices[0], shuffle=False,
**merge_dict(self.config,
batch_size=batch_size,
batch_max_tokens=batch_max_tokens,
overwrite=True,
**kwargs))
predictions, build_data, data, order = self.before_outputs(data)
for batch in dataloader:
arc_scores, rel_scores, mask, puncts = self.feed_batch(batch)
self.collect_outputs(arc_scores, rel_scores, mask, batch, predictions, order, data, use_pos,
build_data)
outputs = self.post_outputs(predictions, data, order, use_pos, build_data)
if flat:
return outputs[0]
return outputs
def build_samples(self, data, use_pos=None):
samples = []
for idx, each in enumerate(data):
sample = {IDX: idx}
if use_pos:
token, pos = zip(*each)
sample.update({'FORM': list(token), 'CPOS': list(pos)})
else:
token = each
sample.update({'FORM': list(token)})
samples.append(sample)
return samples
def input_is_flat(self, data, use_pos=None):
if use_pos:
flat = isinstance(data[0], (list, tuple)) and isinstance(data[0][0], str)
else:
flat = isinstance(data[0], str)
return flat
def before_outputs(self, data):
predictions, order = [], []
build_data = data is None
if build_data:
data = []
return predictions, build_data, data, order
def post_outputs(self, predictions, data, order, use_pos, build_data):
predictions = reorder(predictions, order)
if build_data:
data = reorder(data, order)
outputs = []
self.predictions_to_human(predictions, outputs, data, use_pos)
return outputs
def predictions_to_human(self, predictions, outputs, data, use_pos):
for d, (arcs, rels) in zip(data, predictions):
sent = CoNLLSentence()
for idx, (cell, a, r) in enumerate(zip(d, arcs, rels)):
if use_pos:
token, pos = cell
else:
token, pos = cell, None
sent.append(CoNLLWord(idx + 1, token, cpos=pos, head=a, deprel=self.vocabs['rel'][r]))
outputs.append(sent)
def collect_outputs(self, arc_scores, rel_scores, mask, batch, predictions, order, data, use_pos,
build_data):
lens = [len(token) - 1 for token in batch['token']]
arc_preds, rel_preds = self.decode(arc_scores, rel_scores, mask, batch)
self.collect_outputs_extend(predictions, arc_preds, rel_preds, lens, mask)
order.extend(batch[IDX])
if build_data:
if use_pos:
data.extend(zip(batch['FORM'], batch['CPOS']))
else:
data.extend(batch['FORM'])
def collect_outputs_extend(self, predictions: list, arc_preds, rel_preds, lens, mask):
predictions.extend(zip([seq.tolist() for seq in arc_preds[mask].split(lens)],
[seq.tolist() for seq in rel_preds[mask].split(lens)]))
def fit(self,
trn_data,
dev_data,
save_dir,
embed,
n_mlp_arc=500,
n_mlp_rel=100,
n_mlp_sib=100,
mlp_dropout=.33,
lr=2e-3,
transformer_lr=5e-5,
mu=.9,
nu=.9,
epsilon=1e-12,
grad_norm=5.0,
decay=.75,
decay_steps=5000,
weight_decay=0,
warmup_steps=0.1,
separate_optimizer=True,
patience=100,
lowercase=False,
epochs=50000,
tree=False,
proj=True,
mbr=True,
partial=False,
punct=False,
min_freq=2,
logger=None,
verbose=True,
unk=UNK,
max_sequence_length=512,
batch_size=None,
sampler_builder=None,
gradient_accumulation=1,
devices: Union[float, int, List[int]] = None,
transform=None,
eval_trn=False,
bos='\0',
**kwargs):
return super().fit(**merge_locals_kwargs(locals(), kwargs))
def execute_training_loop(self, trn, dev, devices, epochs, logger, patience, save_dir, optimizer,
gradient_accumulation, **kwargs):
optimizer, scheduler, transformer_optimizer, transformer_scheduler = optimizer
criterion = self.build_criterion()
best_e, best_metric = 0, self.build_metric()
timer = CountdownTimer(epochs)
history = History()
ratio_width = len(f'{len(trn) // gradient_accumulation}/{len(trn) // gradient_accumulation}')
for epoch in range(1, epochs + 1):
# train one epoch and update the parameters
logger.info(f"[yellow]Epoch {epoch} / {epochs}:[/yellow]")
self.fit_dataloader(trn, optimizer, scheduler, criterion, epoch, logger, history,
transformer_optimizer, transformer_scheduler,
gradient_accumulation=gradient_accumulation, eval_trn=self.config.eval_trn)
loss, dev_metric = self.evaluate_dataloader(dev, criterion, ratio_width=ratio_width, logger=logger)
timer.update()
# logger.info(f"{"Dev" + " " * ratio_width} loss: {loss:.4f} {dev_metric}")
# save the model if it is the best so far
report = f"{timer.elapsed_human} / {timer.total_time_human} ETA: {timer.eta_human}"
if dev_metric > best_metric:
best_e, best_metric = epoch, dev_metric
self.save_weights(save_dir)
report += ' ([red]saved[/red])'
else:
if patience != epochs:
report += f' ({epoch - best_e}/{patience})'
else:
report += f' ({epoch - best_e})'
logger.info(report)
if patience is not None and epoch - best_e >= patience:
logger.info(f'LAS has stopped improving for {patience} epochs, early stop.')
break
timer.stop()
if not best_e:
self.save_weights(save_dir)
elif best_e != epoch:
self.load_weights(save_dir)
logger.info(f"Max score of dev is {best_metric.score:.2%} at epoch {best_e}")
logger.info(f"Average time of each epoch is {timer.elapsed_average_human}")
logger.info(f"{timer.elapsed_human} elapsed")
def build_optimizer(self, epochs, trn, gradient_accumulation, **kwargs):
config = self.config
model = self.model
if isinstance(model, nn.DataParallel):
model = model.module
transformer = self._get_transformer_builder()
if transformer and transformer.trainable:
transformer = self._get_transformer()
optimizer = Adam(set(model.parameters()) - set(transformer.parameters()),
config.lr,
(config.mu, config.nu),
config.epsilon)
if self.config.transformer_lr:
num_training_steps = len(trn) * epochs // gradient_accumulation
if not self.config.separate_optimizer:
optimizer, scheduler = build_optimizer_scheduler_with_transformer(model,
transformer,
config.lr,
config.transformer_lr,
num_training_steps,
config.warmup_steps,
config.weight_decay,
config.epsilon)
transformer_optimizer, transformer_scheduler = None, None
else:
transformer_optimizer, transformer_scheduler = \
build_optimizer_scheduler_with_transformer(transformer,
transformer,
config.lr,
config.transformer_lr,
num_training_steps,
config.warmup_steps,
config.weight_decay,
config.epsilon)
else:
transformer.requires_grad_(False)
transformer_optimizer, transformer_scheduler = None, None
else:
optimizer = Adam(model.parameters(),
config.lr,
(config.mu, config.nu),
config.epsilon)
transformer_optimizer, transformer_scheduler = None, None
if self.config.separate_optimizer:
scheduler = ExponentialLR(optimizer, config.decay ** (1 / config.decay_steps))
# noinspection PyUnboundLocalVariable
optimizer = Adam(model.parameters(), **{'lr': 0.002, 'betas': (0.9, 0.9), 'eps': 1e-12})
scheduler = ExponentialLR(optimizer, **{'gamma': 0.9999424652406974})
return optimizer, scheduler, transformer_optimizer, transformer_scheduler
# noinspection PyMethodOverriding
def build_dataloader(self,
data,
shuffle,
device,
embed: Embedding,
training=False,
logger=None,
gradient_accumulation=1,
sampler_builder=None,
batch_size=None,
bos='\0',
**kwargs) -> DataLoader:
first_transform = TransformList(functools.partial(append_bos, bos=bos))
embed_transform = embed.transform(vocabs=self.vocabs)
transformer_transform = self._get_transformer_transform_from_transforms(embed_transform)
if embed_transform:
if transformer_transform and isinstance(embed_transform, TransformList):
embed_transform.remove(transformer_transform)
first_transform.append(embed_transform)
dataset = self.build_dataset(data, first_transform=first_transform)
if self.config.get('transform', None):
dataset.append_transform(self.config.transform)
if self.vocabs.mutable:
self.build_vocabs(dataset, logger, self._transformer_trainable())
if transformer_transform and isinstance(embed_transform, TransformList):
embed_transform.append(transformer_transform)
dataset.append_transform(FieldLength('token', 'sent_length'))
if isinstance(data, str):
dataset.purge_cache()
if len(dataset) > 1000 and isinstance(data, str):
timer = CountdownTimer(len(dataset))
self.cache_dataset(dataset, timer, training, logger)
if sampler_builder:
lens = [sample['sent_length'] for sample in dataset]
sampler = sampler_builder.build(lens, shuffle, gradient_accumulation)
else:
sampler = None
loader = PadSequenceDataLoader(dataset=dataset,
batch_sampler=sampler,
batch_size=batch_size,
pad=self.get_pad_dict(),
device=device,
vocabs=self.vocabs)
return loader
def cache_dataset(self, dataset, timer, training=False, logger=None):
for each in dataset:
timer.log('Preprocessing and caching samples [blink][yellow]...[/yellow][/blink]')
def get_pad_dict(self):
return {'arc': 0}
def build_dataset(self, data, first_transform=None):
if not first_transform:
first_transform = append_bos
transform = [first_transform, get_sibs]
if self.config.get('lowercase', False):
transform.append(LowerCase('token'))
transform.append(self.vocabs)
if not self.config.punct:
transform.append(PunctuationMask('token', 'punct_mask'))
return CoNLLParsingDataset(data, transform=transform)
def build_tokenizer_transform(self):
return TransformerSequenceTokenizer(self.transformer_tokenizer, 'token', '',
ret_token_span=True, cls_is_bos=True,
max_seq_length=self.config.get('max_sequence_length',
512),
truncate_long_sequences=False)
def build_vocabs(self, dataset, logger=None, transformer=False):
rel_vocab = self.vocabs.get('rel', None)
if rel_vocab is None:
rel_vocab = Vocab(unk_token=None, pad_token=self.config.get('pad_rel', None))
self.vocabs.put(rel=rel_vocab)
timer = CountdownTimer(len(dataset))
if transformer:
token_vocab = None
else:
self.vocabs.token = token_vocab = VocabCounter(unk_token=self.config.get('unk', UNK))
for i, sample in enumerate(dataset):
timer.log('Building vocab [blink][yellow]...[/yellow][/blink]', ratio_percentage=True)
min_freq = self.config.get('min_freq', None)
if min_freq:
token_vocab.trim(min_freq)
rel_vocab.set_unk_as_safe_unk() # Some relation in dev set is OOV
self.vocabs.lock()
self.vocabs.summary(logger=logger)
if token_vocab:
self.config.n_words = len(self.vocabs['token'])
self.config.n_rels = len(self.vocabs['rel'])
if token_vocab:
self.config.pad_index = self.vocabs['token'].pad_idx
self.config.unk_index = self.vocabs['token'].unk_idx
# noinspection PyMethodOverriding
def build_model(self, embed: Embedding, encoder, n_mlp_arc, n_mlp_rel, mlp_dropout, n_mlp_sib, training=True,
**kwargs) -> torch.nn.Module:
model = DependencyModel(
embed=embed.module(vocabs=self.vocabs),
encoder=encoder,
decoder=TreeCRFDecoder(encoder.get_output_dim(), n_mlp_arc, n_mlp_sib, n_mlp_rel, mlp_dropout,
len(self.vocabs['rel']))
)
return model
def build_embeddings(self, training=True):
pretrained_embed = None
if self.config.get('pretrained_embed', None):
pretrained_embed = index_word2vec_with_vocab(self.config.pretrained_embed, self.vocabs['token'],
init='zeros', normalize=True)
transformer = self.config.transformer
if transformer:
transformer = AutoModel_.from_pretrained(transformer, training=training)
return pretrained_embed, transformer
# noinspection PyMethodOverriding
def fit_dataloader(self,
trn,
optimizer,
scheduler,
criterion,
epoch,
logger,
history: History,
transformer_optimizer=None,
transformer_scheduler=None,
gradient_accumulation=1,
eval_trn=False,
**kwargs):
self.model.train()
timer = CountdownTimer(history.num_training_steps(len(trn), gradient_accumulation))
metric = self.build_metric(training=True)
total_loss = 0
for idx, batch in enumerate(trn):
optimizer.zero_grad()
(s_arc, s_sib, s_rel), mask, puncts = self.feed_batch(batch)
arcs, sibs, rels = batch['arc'], batch['sib_id'], batch['rel_id']
loss, s_arc = self.compute_loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask)
if gradient_accumulation > 1:
loss /= gradient_accumulation
loss.backward()
total_loss += loss.item()
if eval_trn:
arc_preds, rel_preds = self.decode(s_arc, s_sib, s_rel, mask)
self.update_metric(arc_preds, rel_preds, arcs, rels, mask, puncts, metric)
if history.step(gradient_accumulation):
self._step(optimizer, scheduler, transformer_optimizer, transformer_scheduler)
report = self._report(total_loss / (timer.current + 1), metric if eval_trn else None)
lr = scheduler.get_last_lr()[0]
report += f' lr: {lr:.4e}'
timer.log(report, ratio_percentage=False, logger=logger)
del loss
def _step(self, optimizer, scheduler, transformer_optimizer, transformer_scheduler):
if self.config.get('grad_norm', None):
nn.utils.clip_grad_norm_(self.model.parameters(),
self.config.grad_norm)
optimizer.step()
scheduler.step()
if self._transformer_transform and self.config.transformer_lr and transformer_optimizer:
transformer_optimizer.step()
transformer_optimizer.zero_grad()
transformer_scheduler.step()
def feed_batch(self, batch):
words, feats, lens, puncts = batch.get('token_id', None), batch.get('pos_id', None), batch['sent_length'], \
batch.get('punct_mask', None)
mask = lengths_to_mask(lens)
logits = self.model(batch, mask)
if self.model.training:
mask = mask.clone()
# ignore the first token of each sentence
mask[:, 0] = 0
return logits, mask, puncts
def _report(self, loss, metric: AttachmentScore = None):
return f'loss: {loss:.4f} {metric}' if metric else f'loss: {loss:.4f}'
def compute_loss(self, s_arc, s_sib, s_rel, arcs, sibs, rels, mask):
crf: CRF2oDependency = self.model.decoder.crf
return crf.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.config.mbr, self.config.partial)
# noinspection PyUnboundLocalVariable
@torch.no_grad()
def evaluate_dataloader(self, loader: PadSequenceDataLoader, criterion, logger=None, filename=None, output=False,
ratio_width=None,
metric=None,
**kwargs):
self.model.eval()
total_loss = 0
if not metric:
metric = self.build_metric()
timer = CountdownTimer(len(loader))
for batch in loader:
(s_arc, s_sib, s_rel), mask, puncts = self.feed_batch(batch)
arcs, sibs, rels = batch['arc'], batch['sib_id'], batch['rel_id']
loss, s_arc = self.compute_loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask)
total_loss += float(loss)
arc_preds, rel_preds = self.decode(s_arc, s_sib, s_rel, mask)
self.update_metric(arc_preds, rel_preds, arcs, rels, mask, puncts, metric)
report = self._report(total_loss / (timer.current + 1), metric)
if filename:
report = f'{os.path.basename(filename)} ' + report
timer.log(report, ratio_percentage=False, logger=logger, ratio_width=ratio_width)
total_loss /= len(loader)
return total_loss, metric
def update_metric(self, arc_preds, rel_preds, arcs, rels, mask, puncts, metric):
# ignore all punctuation if not specified
if not self.config.punct:
mask &= puncts
metric(arc_preds, rel_preds, arcs, rels, mask)
def decode(self, s_arc, s_sib, s_rel, mask):
crf: CRF2oDependency = self.model.decoder.crf
return crf.decode(s_arc, s_sib, s_rel, mask, self.config.tree and not self.model.training, self.config.mbr,
self.config.proj)
def build_criterion(self, **kwargs):
return None
def build_metric(self, **kwargs):
return AttachmentScore()
def _get_transformer_transform_from_transforms(self, transform: Union[
TransformList, TransformerSequenceTokenizer]) -> TransformerSequenceTokenizer:
def _get():
if isinstance(transform, TransformerSequenceTokenizer):
# noinspection PyTypeChecker
return transform
elif isinstance(transform, TransformList):
# noinspection PyTypeChecker,PyArgumentList
for each in transform:
if isinstance(each, TransformerSequenceTokenizer):
return each
if self._transformer_transform is None:
self._transformer_transform = _get()
return self._transformer_transform
def _get_transformer(self):
embed = self.model.embed
if isinstance(embed, ContextualWordEmbeddingModule):
return embed
if isinstance(embed, ConcatModuleList):
for each in embed:
if isinstance(each, ContextualWordEmbeddingModule):
return each
def _get_transformer_builder(self):
embed: Embedding = self.config.embed
if isinstance(embed, ContextualWordEmbedding):
return embed
if isinstance(embed, EmbeddingList):
for each in embed.to_list():
if isinstance(embed, ContextualWordEmbedding):
return each
def _transformer_trainable(self):
builder = self._get_transformer_builder()
if not builder:
return False
return builder.trainable
| # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-05-08 20:51
import functools
import os
from typing import Union, Any, List
import torch
from alnlp.modules.util import lengths_to_mask
from torch import nn
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.data import DataLoader
from elit.common.constant import UNK, IDX
from elit.common.dataset import PadSequenceDataLoader
from elit.common.structure import History
from elit.common.torch_component import TorchComponent
from elit.common.transform import LowerCase, FieldLength, PunctuationMask, TransformList
from elit.common.vocab import Vocab, VocabCounter
from elit.common.conll import CoNLLWord, CoNLLSentence
from elit.components.parsers.constituency.treecrf import CRF2oDependency
from elit.components.parsers.second_order.model import DependencyModel
from elit.components.parsers.second_order.treecrf_decoder import TreeCRFDecoder
from elit.datasets.parsing.conll_dataset import CoNLLParsingDataset, append_bos, get_sibs
from elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding, ContextualWordEmbeddingModule
from elit.layers.embeddings.embedding import Embedding, EmbeddingList, ConcatModuleList
from elit.layers.embeddings.util import index_word2vec_with_vocab
from elit.layers.transformers.pt_imports import AutoModel_
from elit.layers.transformers.utils import build_optimizer_scheduler_with_transformer
from elit.metrics.parsing.attachmentscore import AttachmentScore
from elit.transform.transformer_tokenizer import TransformerSequenceTokenizer
from elit.utils.time_util import CountdownTimer
from elit.common.util import merge_locals_kwargs, merge_dict, reorder
class TreeConditionalRandomFieldDependencyParser(TorchComponent):
def __init__(self) -> None:
super().__init__()
self.model: DependencyModel = self.model
self._transformer_transform = None
def predict(self, data: Any, batch_size=None, batch_max_tokens=None, output_format='conllx', **kwargs):
if not data:
return []
use_pos = self.use_pos
flat = self.input_is_flat(data, use_pos)
if flat:
data = [data]
samples = self.build_samples(data, use_pos)
if not batch_max_tokens:
batch_max_tokens = self.config.batch_max_tokens
if not batch_size:
batch_size = self.config.batch_size
dataloader = self.build_dataloader(samples,
device=self.devices[0], shuffle=False,
**merge_dict(self.config,
batch_size=batch_size,
batch_max_tokens=batch_max_tokens,
overwrite=True,
**kwargs))
predictions, build_data, data, order = self.before_outputs(data)
for batch in dataloader:
arc_scores, rel_scores, mask, puncts = self.feed_batch(batch)
self.collect_outputs(arc_scores, rel_scores, mask, batch, predictions, order, data, use_pos,
build_data)
outputs = self.post_outputs(predictions, data, order, use_pos, build_data)
if flat:
return outputs[0]
return outputs
def build_samples(self, data, use_pos=None):
samples = []
for idx, each in enumerate(data):
sample = {IDX: idx}
if use_pos:
token, pos = zip(*each)
sample.update({'FORM': list(token), 'CPOS': list(pos)})
else:
token = each
sample.update({'FORM': list(token)})
samples.append(sample)
return samples
def input_is_flat(self, data, use_pos=None):
if use_pos:
flat = isinstance(data[0], (list, tuple)) and isinstance(data[0][0], str)
else:
flat = isinstance(data[0], str)
return flat
def before_outputs(self, data):
predictions, order = [], []
build_data = data is None
if build_data:
data = []
return predictions, build_data, data, order
def post_outputs(self, predictions, data, order, use_pos, build_data):
predictions = reorder(predictions, order)
if build_data:
data = reorder(data, order)
outputs = []
self.predictions_to_human(predictions, outputs, data, use_pos)
return outputs
def predictions_to_human(self, predictions, outputs, data, use_pos):
for d, (arcs, rels) in zip(data, predictions):
sent = CoNLLSentence()
for idx, (cell, a, r) in enumerate(zip(d, arcs, rels)):
if use_pos:
token, pos = cell
else:
token, pos = cell, None
sent.append(CoNLLWord(idx + 1, token, cpos=pos, head=a, deprel=self.vocabs['rel'][r]))
outputs.append(sent)
def collect_outputs(self, arc_scores, rel_scores, mask, batch, predictions, order, data, use_pos,
build_data):
lens = [len(token) - 1 for token in batch['token']]
arc_preds, rel_preds = self.decode(arc_scores, rel_scores, mask, batch)
self.collect_outputs_extend(predictions, arc_preds, rel_preds, lens, mask)
order.extend(batch[IDX])
if build_data:
if use_pos:
data.extend(zip(batch['FORM'], batch['CPOS']))
else:
data.extend(batch['FORM'])
def collect_outputs_extend(self, predictions: list, arc_preds, rel_preds, lens, mask):
predictions.extend(zip([seq.tolist() for seq in arc_preds[mask].split(lens)],
[seq.tolist() for seq in rel_preds[mask].split(lens)]))
def fit(self,
trn_data,
dev_data,
save_dir,
embed,
n_mlp_arc=500,
n_mlp_rel=100,
n_mlp_sib=100,
mlp_dropout=.33,
lr=2e-3,
transformer_lr=5e-5,
mu=.9,
nu=.9,
epsilon=1e-12,
grad_norm=5.0,
decay=.75,
decay_steps=5000,
weight_decay=0,
warmup_steps=0.1,
separate_optimizer=True,
patience=100,
lowercase=False,
epochs=50000,
tree=False,
proj=True,
mbr=True,
partial=False,
punct=False,
min_freq=2,
logger=None,
verbose=True,
unk=UNK,
max_sequence_length=512,
batch_size=None,
sampler_builder=None,
gradient_accumulation=1,
devices: Union[float, int, List[int]] = None,
transform=None,
eval_trn=False,
bos='\0',
**kwargs):
return super().fit(**merge_locals_kwargs(locals(), kwargs))
def execute_training_loop(self, trn, dev, devices, epochs, logger, patience, save_dir, optimizer,
gradient_accumulation, **kwargs):
optimizer, scheduler, transformer_optimizer, transformer_scheduler = optimizer
criterion = self.build_criterion()
best_e, best_metric = 0, self.build_metric()
timer = CountdownTimer(epochs)
history = History()
ratio_width = len(f'{len(trn) // gradient_accumulation}/{len(trn) // gradient_accumulation}')
for epoch in range(1, epochs + 1):
# train one epoch and update the parameters
logger.info(f"[yellow]Epoch {epoch} / {epochs}:[/yellow]")
self.fit_dataloader(trn, optimizer, scheduler, criterion, epoch, logger, history,
transformer_optimizer, transformer_scheduler,
gradient_accumulation=gradient_accumulation, eval_trn=self.config.eval_trn)
loss, dev_metric = self.evaluate_dataloader(dev, criterion, ratio_width=ratio_width, logger=logger)
timer.update()
# logger.info(f"{'Dev' + ' ' * ratio_width} loss: {loss:.4f} {dev_metric}")
# save the model if it is the best so far
report = f"{timer.elapsed_human} / {timer.total_time_human} ETA: {timer.eta_human}"
if dev_metric > best_metric:
best_e, best_metric = epoch, dev_metric
self.save_weights(save_dir)
report += ' ([red]saved[/red])'
else:
if patience != epochs:
report += f' ({epoch - best_e}/{patience})'
else:
report += f' ({epoch - best_e})'
logger.info(report)
if patience is not None and epoch - best_e >= patience:
logger.info(f'LAS has stopped improving for {patience} epochs, early stop.')
break
timer.stop()
if not best_e:
self.save_weights(save_dir)
elif best_e != epoch:
self.load_weights(save_dir)
logger.info(f"Max score of dev is {best_metric.score:.2%} at epoch {best_e}")
logger.info(f"Average time of each epoch is {timer.elapsed_average_human}")
logger.info(f"{timer.elapsed_human} elapsed")
def build_optimizer(self, epochs, trn, gradient_accumulation, **kwargs):
config = self.config
model = self.model
if isinstance(model, nn.DataParallel):
model = model.module
transformer = self._get_transformer_builder()
if transformer and transformer.trainable:
transformer = self._get_transformer()
optimizer = Adam(set(model.parameters()) - set(transformer.parameters()),
config.lr,
(config.mu, config.nu),
config.epsilon)
if self.config.transformer_lr:
num_training_steps = len(trn) * epochs // gradient_accumulation
if not self.config.separate_optimizer:
optimizer, scheduler = build_optimizer_scheduler_with_transformer(model,
transformer,
config.lr,
config.transformer_lr,
num_training_steps,
config.warmup_steps,
config.weight_decay,
config.epsilon)
transformer_optimizer, transformer_scheduler = None, None
else:
transformer_optimizer, transformer_scheduler = \
build_optimizer_scheduler_with_transformer(transformer,
transformer,
config.lr,
config.transformer_lr,
num_training_steps,
config.warmup_steps,
config.weight_decay,
config.epsilon)
else:
transformer.requires_grad_(False)
transformer_optimizer, transformer_scheduler = None, None
else:
optimizer = Adam(model.parameters(),
config.lr,
(config.mu, config.nu),
config.epsilon)
transformer_optimizer, transformer_scheduler = None, None
if self.config.separate_optimizer:
scheduler = ExponentialLR(optimizer, config.decay ** (1 / config.decay_steps))
# noinspection PyUnboundLocalVariable
optimizer = Adam(model.parameters(), **{'lr': 0.002, 'betas': (0.9, 0.9), 'eps': 1e-12})
scheduler = ExponentialLR(optimizer, **{'gamma': 0.9999424652406974})
return optimizer, scheduler, transformer_optimizer, transformer_scheduler
# noinspection PyMethodOverriding
def build_dataloader(self,
data,
shuffle,
device,
embed: Embedding,
training=False,
logger=None,
gradient_accumulation=1,
sampler_builder=None,
batch_size=None,
bos='\0',
**kwargs) -> DataLoader:
first_transform = TransformList(functools.partial(append_bos, bos=bos))
embed_transform = embed.transform(vocabs=self.vocabs)
transformer_transform = self._get_transformer_transform_from_transforms(embed_transform)
if embed_transform:
if transformer_transform and isinstance(embed_transform, TransformList):
embed_transform.remove(transformer_transform)
first_transform.append(embed_transform)
dataset = self.build_dataset(data, first_transform=first_transform)
if self.config.get('transform', None):
dataset.append_transform(self.config.transform)
if self.vocabs.mutable:
self.build_vocabs(dataset, logger, self._transformer_trainable())
if transformer_transform and isinstance(embed_transform, TransformList):
embed_transform.append(transformer_transform)
dataset.append_transform(FieldLength('token', 'sent_length'))
if isinstance(data, str):
dataset.purge_cache()
if len(dataset) > 1000 and isinstance(data, str):
timer = CountdownTimer(len(dataset))
self.cache_dataset(dataset, timer, training, logger)
if sampler_builder:
lens = [sample['sent_length'] for sample in dataset]
sampler = sampler_builder.build(lens, shuffle, gradient_accumulation)
else:
sampler = None
loader = PadSequenceDataLoader(dataset=dataset,
batch_sampler=sampler,
batch_size=batch_size,
pad=self.get_pad_dict(),
device=device,
vocabs=self.vocabs)
return loader
def cache_dataset(self, dataset, timer, training=False, logger=None):
for each in dataset:
timer.log('Preprocessing and caching samples [blink][yellow]...[/yellow][/blink]')
def get_pad_dict(self):
return {'arc': 0}
def build_dataset(self, data, first_transform=None):
if not first_transform:
first_transform = append_bos
transform = [first_transform, get_sibs]
if self.config.get('lowercase', False):
transform.append(LowerCase('token'))
transform.append(self.vocabs)
if not self.config.punct:
transform.append(PunctuationMask('token', 'punct_mask'))
return CoNLLParsingDataset(data, transform=transform)
def build_tokenizer_transform(self):
return TransformerSequenceTokenizer(self.transformer_tokenizer, 'token', '',
ret_token_span=True, cls_is_bos=True,
max_seq_length=self.config.get('max_sequence_length',
512),
truncate_long_sequences=False)
def build_vocabs(self, dataset, logger=None, transformer=False):
rel_vocab = self.vocabs.get('rel', None)
if rel_vocab is None:
rel_vocab = Vocab(unk_token=None, pad_token=self.config.get('pad_rel', None))
self.vocabs.put(rel=rel_vocab)
timer = CountdownTimer(len(dataset))
if transformer:
token_vocab = None
else:
self.vocabs.token = token_vocab = VocabCounter(unk_token=self.config.get('unk', UNK))
for i, sample in enumerate(dataset):
timer.log('Building vocab [blink][yellow]...[/yellow][/blink]', ratio_percentage=True)
min_freq = self.config.get('min_freq', None)
if min_freq:
token_vocab.trim(min_freq)
rel_vocab.set_unk_as_safe_unk() # Some relation in dev set is OOV
self.vocabs.lock()
self.vocabs.summary(logger=logger)
if token_vocab:
self.config.n_words = len(self.vocabs['token'])
self.config.n_rels = len(self.vocabs['rel'])
if token_vocab:
self.config.pad_index = self.vocabs['token'].pad_idx
self.config.unk_index = self.vocabs['token'].unk_idx
# noinspection PyMethodOverriding
def build_model(self, embed: Embedding, encoder, n_mlp_arc, n_mlp_rel, mlp_dropout, n_mlp_sib, training=True,
**kwargs) -> torch.nn.Module:
model = DependencyModel(
embed=embed.module(vocabs=self.vocabs),
encoder=encoder,
decoder=TreeCRFDecoder(encoder.get_output_dim(), n_mlp_arc, n_mlp_sib, n_mlp_rel, mlp_dropout,
len(self.vocabs['rel']))
)
return model
def build_embeddings(self, training=True):
pretrained_embed = None
if self.config.get('pretrained_embed', None):
pretrained_embed = index_word2vec_with_vocab(self.config.pretrained_embed, self.vocabs['token'],
init='zeros', normalize=True)
transformer = self.config.transformer
if transformer:
transformer = AutoModel_.from_pretrained(transformer, training=training)
return pretrained_embed, transformer
# noinspection PyMethodOverriding
def fit_dataloader(self,
trn,
optimizer,
scheduler,
criterion,
epoch,
logger,
history: History,
transformer_optimizer=None,
transformer_scheduler=None,
gradient_accumulation=1,
eval_trn=False,
**kwargs):
self.model.train()
timer = CountdownTimer(history.num_training_steps(len(trn), gradient_accumulation))
metric = self.build_metric(training=True)
total_loss = 0
for idx, batch in enumerate(trn):
optimizer.zero_grad()
(s_arc, s_sib, s_rel), mask, puncts = self.feed_batch(batch)
arcs, sibs, rels = batch['arc'], batch['sib_id'], batch['rel_id']
loss, s_arc = self.compute_loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask)
if gradient_accumulation > 1:
loss /= gradient_accumulation
loss.backward()
total_loss += loss.item()
if eval_trn:
arc_preds, rel_preds = self.decode(s_arc, s_sib, s_rel, mask)
self.update_metric(arc_preds, rel_preds, arcs, rels, mask, puncts, metric)
if history.step(gradient_accumulation):
self._step(optimizer, scheduler, transformer_optimizer, transformer_scheduler)
report = self._report(total_loss / (timer.current + 1), metric if eval_trn else None)
lr = scheduler.get_last_lr()[0]
report += f' lr: {lr:.4e}'
timer.log(report, ratio_percentage=False, logger=logger)
del loss
def _step(self, optimizer, scheduler, transformer_optimizer, transformer_scheduler):
if self.config.get('grad_norm', None):
nn.utils.clip_grad_norm_(self.model.parameters(),
self.config.grad_norm)
optimizer.step()
scheduler.step()
if self._transformer_transform and self.config.transformer_lr and transformer_optimizer:
transformer_optimizer.step()
transformer_optimizer.zero_grad()
transformer_scheduler.step()
def feed_batch(self, batch):
words, feats, lens, puncts = batch.get('token_id', None), batch.get('pos_id', None), batch['sent_length'], \
batch.get('punct_mask', None)
mask = lengths_to_mask(lens)
logits = self.model(batch, mask)
if self.model.training:
mask = mask.clone()
# ignore the first token of each sentence
mask[:, 0] = 0
return logits, mask, puncts
def _report(self, loss, metric: AttachmentScore = None):
return f'loss: {loss:.4f} {metric}' if metric else f'loss: {loss:.4f}'
def compute_loss(self, s_arc, s_sib, s_rel, arcs, sibs, rels, mask):
crf: CRF2oDependency = self.model.decoder.crf
return crf.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.config.mbr, self.config.partial)
# noinspection PyUnboundLocalVariable
@torch.no_grad()
def evaluate_dataloader(self, loader: PadSequenceDataLoader, criterion, logger=None, filename=None, output=False,
ratio_width=None,
metric=None,
**kwargs):
self.model.eval()
total_loss = 0
if not metric:
metric = self.build_metric()
timer = CountdownTimer(len(loader))
for batch in loader:
(s_arc, s_sib, s_rel), mask, puncts = self.feed_batch(batch)
arcs, sibs, rels = batch['arc'], batch['sib_id'], batch['rel_id']
loss, s_arc = self.compute_loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask)
total_loss += float(loss)
arc_preds, rel_preds = self.decode(s_arc, s_sib, s_rel, mask)
self.update_metric(arc_preds, rel_preds, arcs, rels, mask, puncts, metric)
report = self._report(total_loss / (timer.current + 1), metric)
if filename:
report = f'{os.path.basename(filename)} ' + report
timer.log(report, ratio_percentage=False, logger=logger, ratio_width=ratio_width)
total_loss /= len(loader)
return total_loss, metric
def update_metric(self, arc_preds, rel_preds, arcs, rels, mask, puncts, metric):
# ignore all punctuation if not specified
if not self.config.punct:
mask &= puncts
metric(arc_preds, rel_preds, arcs, rels, mask)
def decode(self, s_arc, s_sib, s_rel, mask):
crf: CRF2oDependency = self.model.decoder.crf
return crf.decode(s_arc, s_sib, s_rel, mask, self.config.tree and not self.model.training, self.config.mbr,
self.config.proj)
def build_criterion(self, **kwargs):
return None
def build_metric(self, **kwargs):
return AttachmentScore()
def _get_transformer_transform_from_transforms(self, transform: Union[
TransformList, TransformerSequenceTokenizer]) -> TransformerSequenceTokenizer:
def _get():
if isinstance(transform, TransformerSequenceTokenizer):
# noinspection PyTypeChecker
return transform
elif isinstance(transform, TransformList):
# noinspection PyTypeChecker,PyArgumentList
for each in transform:
if isinstance(each, TransformerSequenceTokenizer):
return each
if self._transformer_transform is None:
self._transformer_transform = _get()
return self._transformer_transform
def _get_transformer(self):
embed = self.model.embed
if isinstance(embed, ContextualWordEmbeddingModule):
return embed
if isinstance(embed, ConcatModuleList):
for each in embed:
if isinstance(each, ContextualWordEmbeddingModule):
return each
def _get_transformer_builder(self):
embed: Embedding = self.config.embed
if isinstance(embed, ContextualWordEmbedding):
return embed
if isinstance(embed, EmbeddingList):
for each in embed.to_list():
if isinstance(embed, ContextualWordEmbedding):
return each
def _transformer_trainable(self):
builder = self._get_transformer_builder()
if not builder:
return False
return builder.trainable
|
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
import aiosqlite
from chia.consensus.block_record import BlockRecord
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chia.types.coin_spend import CoinSpend
from chia.types.header_block import HeaderBlock
from chia.util.db_wrapper import DBWrapper
from chia.util.ints import uint32, uint64
from chia.util.lru_cache import LRUCache
from chia.util.streamable import Streamable, streamable
from chia.wallet.block_record import HeaderBlockRecord
@dataclass(frozen=True)
@streamable
class AdditionalCoinSpends(Streamable):
coin_spends_list: List[CoinSpend]
class WalletBlockStore:
"""
This object handles HeaderBlocks and Blocks stored in DB used by wallet.
"""
db: aiosqlite.Connection
db_wrapper: DBWrapper
block_cache: LRUCache
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db = db_wrapper.db
await self.db.execute("pragma journal_mode=wal")
await self.db.execute("pragma synchronous=2")
await self.db.execute(
"CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, height int,"
" timestamp int, block blob)"
)
await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)")
# Block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS block_records(header_hash "
"text PRIMARY KEY, prev_hash text, height bigint, weight bigint, total_iters text,"
"block blob, sub_epoch_summary blob, is_peak tinyint)"
)
await self.db.execute(
"CREATE TABLE IF NOT EXISTS additional_coin_spends(header_hash text PRIMARY KEY, spends_list_blob blob)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)")
await self.db.commit()
self.block_cache = LRUCache(1000)
return self
async def _clear_database(self):
cursor_2 = await self.db.execute("DELETE FROM header_blocks")
await cursor_2.close()
await self.db.commit()
async def add_block_record(
self,
header_block_record: HeaderBlockRecord,
block_record: BlockRecord,
additional_coin_spends: List[CoinSpend],
):
"""
Adds a block record to the database. This block record is assumed to be connected
to the chain, but it may or may not be in the LCA path.
"""
cached = self.block_cache.get(header_block_record.header_hash)
if cached is not None:
# Since write to db can fail, we remove from cache here to avoid potential inconsistency
# Adding to cache only from reading
self.block_cache.put(header_block_record.header_hash, None)
if header_block_record.header.foliage_transaction_block is not None:
timestamp = header_block_record.header.foliage_transaction_block.timestamp
else:
timestamp = uint64(0)
cursor = await self.db.execute(
"INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?)",
(
header_block_record.header_hash.hex(),
header_block_record.height,
timestamp,
bytes(header_block_record),
),
)
await cursor.close()
cursor_2 = await self.db.execute(
"INSERT OR REPLACE INTO block_records VALUES(?, ?, ?, ?, ?, ?, ?,?)",
(
header_block_record.header.header_hash.hex(),
header_block_record.header.prev_header_hash.hex(),
header_block_record.header.height,
header_block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(),
header_block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(),
bytes(block_record),
None
if block_record.sub_epoch_summary_included is None
else bytes(block_record.sub_epoch_summary_included),
False,
),
)
await cursor_2.close()
if len(additional_coin_spends) > 0:
blob: bytes = bytes(AdditionalCoinSpends(additional_coin_spends))
cursor_3 = await self.db.execute(
"INSERT OR REPLACE INTO additional_coin_spends VALUES(?, ?)",
(header_block_record.header_hash.hex(), blob),
)
await cursor_3.close()
async def get_header_block_at(self, heights: List[uint32]) -> List[HeaderBlock]:
if len(heights) == 0:
return []
heights_db = tuple(heights)
formatted_str = f'SELECT block from header_blocks WHERE height in ({'?,' * (len(heights_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, heights_db)
rows = await cursor.fetchall()
await cursor.close()
return [HeaderBlock.from_bytes(row[0]) for row in rows]
async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]:
"""Gets a block record from the database, if present"""
cached = self.block_cache.get(header_hash)
if cached is not None:
return cached
cursor = await self.db.execute("SELECT block from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr: HeaderBlockRecord = HeaderBlockRecord.from_bytes(row[0])
self.block_cache.put(hbr.header_hash, hbr)
return hbr
else:
return None
async def get_additional_coin_spends(self, header_hash: bytes32) -> Optional[List[CoinSpend]]:
cursor = await self.db.execute(
"SELECT spends_list_blob from additional_coin_spends WHERE header_hash=?", (header_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin_spends: AdditionalCoinSpends = AdditionalCoinSpends.from_bytes(row[0])
return coin_spends.coin_spends_list
else:
return None
async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
cursor = await self.db.execute(
"SELECT block from block_records WHERE header_hash=?",
(header_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return BlockRecord.from_bytes(row[0])
return None
async def get_block_records(
self,
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
cursor = await self.db.execute("SELECT header_hash, block, is_peak from block_records")
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
peak: Optional[bytes32] = None
for row in rows:
header_hash_bytes, block_record_bytes, is_peak = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
if is_peak:
assert peak is None # Sanity check, only one peak
peak = header_hash
return ret, peak
def rollback_cache_block(self, header_hash: bytes32):
self.block_cache.remove(header_hash)
async def set_peak(self, header_hash: bytes32) -> None:
cursor_1 = await self.db.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1")
await cursor_1.close()
cursor_2 = await self.db.execute(
"UPDATE block_records SET is_peak=1 WHERE header_hash=?",
(header_hash.hex(),),
)
await cursor_2.close()
async def get_block_records_close_to_peak(
self, blocks_n: int
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
res = await self.db.execute("SELECT header_hash, height from block_records WHERE is_peak = 1")
row = await res.fetchone()
await res.close()
if row is None:
return {}, None
header_hash_bytes, peak_height = row
peak: bytes32 = bytes32(bytes.fromhex(header_hash_bytes))
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak_height - blocks_n}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
return ret, peak
async def get_header_blocks_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, HeaderBlock]:
formatted_str = f"SELECT header_hash, block from header_blocks WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, HeaderBlock] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = HeaderBlock.from_bytes(block_record_bytes)
return ret
async def get_block_records_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, BlockRecord]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
return ret
async def get_peak_heights_dicts(self) -> Tuple[Dict[uint32, bytes32], Dict[uint32, SubEpochSummary]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
res = await self.db.execute("SELECT header_hash from block_records WHERE is_peak = 1")
row = await res.fetchone()
await res.close()
if row is None:
return {}, {}
peak: bytes32 = bytes.fromhex(row[0])
cursor = await self.db.execute("SELECT header_hash,prev_hash,height,sub_epoch_summary from block_records")
rows = await cursor.fetchall()
await cursor.close()
hash_to_prev_hash: Dict[bytes32, bytes32] = {}
hash_to_height: Dict[bytes32, uint32] = {}
hash_to_summary: Dict[bytes32, SubEpochSummary] = {}
for row in rows:
hash_to_prev_hash[bytes.fromhex(row[0])] = bytes.fromhex(row[1])
hash_to_height[bytes.fromhex(row[0])] = row[2]
if row[3] is not None:
hash_to_summary[bytes.fromhex(row[0])] = SubEpochSummary.from_bytes(row[3])
height_to_hash: Dict[uint32, bytes32] = {}
sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
curr_header_hash = peak
curr_height = hash_to_height[curr_header_hash]
while True:
height_to_hash[curr_height] = curr_header_hash
if curr_header_hash in hash_to_summary:
sub_epoch_summaries[curr_height] = hash_to_summary[curr_header_hash]
if curr_height == 0:
break
curr_header_hash = hash_to_prev_hash[curr_header_hash]
curr_height = hash_to_height[curr_header_hash]
return height_to_hash, sub_epoch_summaries
| from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
import aiosqlite
from chia.consensus.block_record import BlockRecord
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chia.types.coin_spend import CoinSpend
from chia.types.header_block import HeaderBlock
from chia.util.db_wrapper import DBWrapper
from chia.util.ints import uint32, uint64
from chia.util.lru_cache import LRUCache
from chia.util.streamable import Streamable, streamable
from chia.wallet.block_record import HeaderBlockRecord
@dataclass(frozen=True)
@streamable
class AdditionalCoinSpends(Streamable):
coin_spends_list: List[CoinSpend]
class WalletBlockStore:
"""
This object handles HeaderBlocks and Blocks stored in DB used by wallet.
"""
db: aiosqlite.Connection
db_wrapper: DBWrapper
block_cache: LRUCache
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db = db_wrapper.db
await self.db.execute("pragma journal_mode=wal")
await self.db.execute("pragma synchronous=2")
await self.db.execute(
"CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, height int,"
" timestamp int, block blob)"
)
await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)")
# Block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS block_records(header_hash "
"text PRIMARY KEY, prev_hash text, height bigint, weight bigint, total_iters text,"
"block blob, sub_epoch_summary blob, is_peak tinyint)"
)
await self.db.execute(
"CREATE TABLE IF NOT EXISTS additional_coin_spends(header_hash text PRIMARY KEY, spends_list_blob blob)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)")
await self.db.commit()
self.block_cache = LRUCache(1000)
return self
async def _clear_database(self):
cursor_2 = await self.db.execute("DELETE FROM header_blocks")
await cursor_2.close()
await self.db.commit()
async def add_block_record(
self,
header_block_record: HeaderBlockRecord,
block_record: BlockRecord,
additional_coin_spends: List[CoinSpend],
):
"""
Adds a block record to the database. This block record is assumed to be connected
to the chain, but it may or may not be in the LCA path.
"""
cached = self.block_cache.get(header_block_record.header_hash)
if cached is not None:
# Since write to db can fail, we remove from cache here to avoid potential inconsistency
# Adding to cache only from reading
self.block_cache.put(header_block_record.header_hash, None)
if header_block_record.header.foliage_transaction_block is not None:
timestamp = header_block_record.header.foliage_transaction_block.timestamp
else:
timestamp = uint64(0)
cursor = await self.db.execute(
"INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?)",
(
header_block_record.header_hash.hex(),
header_block_record.height,
timestamp,
bytes(header_block_record),
),
)
await cursor.close()
cursor_2 = await self.db.execute(
"INSERT OR REPLACE INTO block_records VALUES(?, ?, ?, ?, ?, ?, ?,?)",
(
header_block_record.header.header_hash.hex(),
header_block_record.header.prev_header_hash.hex(),
header_block_record.header.height,
header_block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(),
header_block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(),
bytes(block_record),
None
if block_record.sub_epoch_summary_included is None
else bytes(block_record.sub_epoch_summary_included),
False,
),
)
await cursor_2.close()
if len(additional_coin_spends) > 0:
blob: bytes = bytes(AdditionalCoinSpends(additional_coin_spends))
cursor_3 = await self.db.execute(
"INSERT OR REPLACE INTO additional_coin_spends VALUES(?, ?)",
(header_block_record.header_hash.hex(), blob),
)
await cursor_3.close()
async def get_header_block_at(self, heights: List[uint32]) -> List[HeaderBlock]:
if len(heights) == 0:
return []
heights_db = tuple(heights)
formatted_str = f'SELECT block from header_blocks WHERE height in ({"?," * (len(heights_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, heights_db)
rows = await cursor.fetchall()
await cursor.close()
return [HeaderBlock.from_bytes(row[0]) for row in rows]
async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]:
"""Gets a block record from the database, if present"""
cached = self.block_cache.get(header_hash)
if cached is not None:
return cached
cursor = await self.db.execute("SELECT block from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr: HeaderBlockRecord = HeaderBlockRecord.from_bytes(row[0])
self.block_cache.put(hbr.header_hash, hbr)
return hbr
else:
return None
async def get_additional_coin_spends(self, header_hash: bytes32) -> Optional[List[CoinSpend]]:
cursor = await self.db.execute(
"SELECT spends_list_blob from additional_coin_spends WHERE header_hash=?", (header_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin_spends: AdditionalCoinSpends = AdditionalCoinSpends.from_bytes(row[0])
return coin_spends.coin_spends_list
else:
return None
async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
cursor = await self.db.execute(
"SELECT block from block_records WHERE header_hash=?",
(header_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return BlockRecord.from_bytes(row[0])
return None
async def get_block_records(
self,
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
cursor = await self.db.execute("SELECT header_hash, block, is_peak from block_records")
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
peak: Optional[bytes32] = None
for row in rows:
header_hash_bytes, block_record_bytes, is_peak = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
if is_peak:
assert peak is None # Sanity check, only one peak
peak = header_hash
return ret, peak
def rollback_cache_block(self, header_hash: bytes32):
self.block_cache.remove(header_hash)
async def set_peak(self, header_hash: bytes32) -> None:
cursor_1 = await self.db.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1")
await cursor_1.close()
cursor_2 = await self.db.execute(
"UPDATE block_records SET is_peak=1 WHERE header_hash=?",
(header_hash.hex(),),
)
await cursor_2.close()
async def get_block_records_close_to_peak(
self, blocks_n: int
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
res = await self.db.execute("SELECT header_hash, height from block_records WHERE is_peak = 1")
row = await res.fetchone()
await res.close()
if row is None:
return {}, None
header_hash_bytes, peak_height = row
peak: bytes32 = bytes32(bytes.fromhex(header_hash_bytes))
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak_height - blocks_n}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
return ret, peak
async def get_header_blocks_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, HeaderBlock]:
formatted_str = f"SELECT header_hash, block from header_blocks WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, HeaderBlock] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = HeaderBlock.from_bytes(block_record_bytes)
return ret
async def get_block_records_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, BlockRecord]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
return ret
async def get_peak_heights_dicts(self) -> Tuple[Dict[uint32, bytes32], Dict[uint32, SubEpochSummary]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
res = await self.db.execute("SELECT header_hash from block_records WHERE is_peak = 1")
row = await res.fetchone()
await res.close()
if row is None:
return {}, {}
peak: bytes32 = bytes.fromhex(row[0])
cursor = await self.db.execute("SELECT header_hash,prev_hash,height,sub_epoch_summary from block_records")
rows = await cursor.fetchall()
await cursor.close()
hash_to_prev_hash: Dict[bytes32, bytes32] = {}
hash_to_height: Dict[bytes32, uint32] = {}
hash_to_summary: Dict[bytes32, SubEpochSummary] = {}
for row in rows:
hash_to_prev_hash[bytes.fromhex(row[0])] = bytes.fromhex(row[1])
hash_to_height[bytes.fromhex(row[0])] = row[2]
if row[3] is not None:
hash_to_summary[bytes.fromhex(row[0])] = SubEpochSummary.from_bytes(row[3])
height_to_hash: Dict[uint32, bytes32] = {}
sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
curr_header_hash = peak
curr_height = hash_to_height[curr_header_hash]
while True:
height_to_hash[curr_height] = curr_header_hash
if curr_header_hash in hash_to_summary:
sub_epoch_summaries[curr_height] = hash_to_summary[curr_header_hash]
if curr_height == 0:
break
curr_header_hash = hash_to_prev_hash[curr_header_hash]
curr_height = hash_to_height[curr_header_hash]
return height_to_hash, sub_epoch_summaries
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Views used by the SqlAlchemy connector"""
import logging
import re
from dataclasses import dataclass, field
from typing import Any, cast, Dict, List, Union
from flask import current_app, flash, Markup, redirect
from flask_appbuilder import CompactCRUDMixin, expose
from flask_appbuilder.actions import action
from flask_appbuilder.fieldwidgets import Select2Widget
from flask_appbuilder.hooks import before_request
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access
from flask_babel import gettext as __, lazy_gettext as _
from werkzeug.exceptions import NotFound
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import Regexp
from superset import app, db, is_feature_enabled
from superset.connectors.base.views import DatasourceModelView
from superset.connectors.sqla import models
from superset.constants import MODEL_VIEW_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.typing import FlaskResponse
from superset.utils import core as utils
from superset.views.base import (
check_ownership,
create_table_permissions,
DatasourceFilter,
DeleteMixin,
ListWidgetWithCheckboxes,
SupersetListWidget,
SupersetModelView,
validate_sqlatable,
YamlExportMixin,
)
logger = logging.getLogger(__name__)
class TableColumnInlineView( # pylint: disable=too-many-ancestors
CompactCRUDMixin, SupersetModelView
):
datamodel = SQLAInterface(models.TableColumn)
# TODO TODO, review need for this on related_views
class_permission_name = "Dataset"
method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP
include_route_methods = RouteMethod.RELATED_VIEW_SET | RouteMethod.API_SET
list_title = _("Columns")
show_title = _("Show Column")
add_title = _("Add Column")
edit_title = _("Edit Column")
can_delete = False
list_widget = ListWidgetWithCheckboxes
edit_columns = [
"column_name",
"verbose_name",
"description",
"type",
"groupby",
"filterable",
"table",
"expression",
"is_dttm",
"python_date_format",
]
add_columns = edit_columns
list_columns = [
"column_name",
"verbose_name",
"type",
"groupby",
"filterable",
"is_dttm",
]
page_size = 500
description_columns = {
"is_dttm": _(
"Whether to make this column available as a "
"[Time Granularity] option, column has to be DATETIME or "
"DATETIME-like"
),
"filterable": _(
"Whether this column is exposed in the `Filters` section "
"of the explore view."
),
"type": _(
"The data type that was inferred by the database. "
"It may be necessary to input a type manually for "
"expression-defined columns in some cases. In most case "
"users should not need to alter this."
),
"expression": utils.markdown(
"a valid, *non-aggregating* SQL expression as supported by the "
"underlying backend. Example: `substr(name, 1, 1)`",
True,
),
"python_date_format": utils.markdown(
Markup(
"The pattern of timestamp format. For strings use "
'<a href="https://docs.python.org/2/library/'
'datetime.html#strftime-strptime-behavior">'
"python datetime string pattern</a> expression which needs to "
'adhere to the <a href="https://en.wikipedia.org/wiki/ISO_8601">'
"ISO 8601</a> standard to ensure that the lexicographical ordering "
"coincides with the chronological ordering. If the timestamp "
"format does not adhere to the ISO 8601 standard you will need to "
"define an expression and type for transforming the string into a "
"date or timestamp. Note currently time zones are not supported. "
"If time is stored in epoch format, put `epoch_s` or `epoch_ms`."
"If no pattern is specified we fall back to using the optional "
"defaults on a per database/column name level via the extra parameter."
""
),
True,
),
}
label_columns = {
"column_name": _("Column"),
"verbose_name": _("Verbose Name"),
"description": _("Description"),
"groupby": _("Groupable"),
"filterable": _("Filterable"),
"table": _("Table"),
"expression": _("Expression"),
"is_dttm": _("Is temporal"),
"python_date_format": _("Datetime Format"),
"type": _("Type"),
}
validators_columns = {
"python_date_format": [
# Restrict viable values to epoch_s, epoch_ms, or a strftime format
# which adhere's to the ISO 8601 format (without time zone).
Regexp(
re.compile(
r"""
^(
epoch_s|epoch_ms|
(?P<date>%Y(-%m(-%d)?)?)([\sT](?P<time>%H(:%M(:%S(\.%f)?)?)?))?
)$
""",
re.VERBOSE,
),
message=_("Invalid date/timestamp format"),
)
]
}
add_form_extra_fields = {
"table": QuerySelectField(
"Table",
query_factory=lambda: db.session.query(models.SqlaTable),
allow_blank=True,
widget=Select2Widget(extra_classes="readonly"),
)
}
edit_form_extra_fields = add_form_extra_fields
def pre_add(self, item: "models.SqlMetric") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item.table)
def pre_update(self, item: "models.SqlMetric") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item.table)
def pre_delete(self, item: "models.SqlMetric") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item.table)
class SqlMetricInlineView( # pylint: disable=too-many-ancestors
CompactCRUDMixin, SupersetModelView
):
datamodel = SQLAInterface(models.SqlMetric)
class_permission_name = "Dataset"
method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP
include_route_methods = RouteMethod.RELATED_VIEW_SET | RouteMethod.API_SET
list_title = _("Metrics")
show_title = _("Show Metric")
add_title = _("Add Metric")
edit_title = _("Edit Metric")
list_columns = ["metric_name", "verbose_name", "metric_type"]
edit_columns = [
"metric_name",
"description",
"verbose_name",
"metric_type",
"expression",
"table",
"d3format",
"extra",
"warning_text",
]
description_columns = {
"expression": utils.markdown(
"a valid, *aggregating* SQL expression as supported by the "
"underlying backend. Example: `count(DISTINCT userid)`",
True,
),
"d3format": utils.markdown(
"d3 formatting string as defined [here]"
"(https://github.com/d3/d3-format/blob/master/README.md#format). "
"For instance, this default formatting applies in the Table "
"visualization and allow for different metric to use different "
"formats",
True,
),
"extra": utils.markdown(
"Extra data to specify metric metadata. Currently supports "
'metadata of the format: `{ "certification": { "certified_by": '
'"Data Platform Team", "details": "This metric is the source of truth." '
'}, "warning_markdown": "This is a warning." }`. This should be modified '
"from the edit datasource model in Explore to ensure correct formatting.",
True,
),
}
add_columns = edit_columns
page_size = 500
label_columns = {
"metric_name": _("Metric"),
"description": _("Description"),
"verbose_name": _("Verbose Name"),
"metric_type": _("Type"),
"expression": _("SQL Expression"),
"table": _("Table"),
"d3format": _("D3 Format"),
"extra": _("Extra"),
"warning_text": _("Warning Message"),
}
add_form_extra_fields = {
"table": QuerySelectField(
"Table",
query_factory=lambda: db.session.query(models.SqlaTable),
allow_blank=True,
widget=Select2Widget(extra_classes="readonly"),
)
}
edit_form_extra_fields = add_form_extra_fields
def pre_add(self, item: "models.SqlMetric") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item.table)
def pre_update(self, item: "models.SqlMetric") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item.table)
def pre_delete(self, item: "models.SqlMetric") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item.table)
class RowLevelSecurityListWidget(
SupersetListWidget
): # pylint: disable=too-few-public-methods
template = "superset/models/rls/list.html"
def __init__(self, **kwargs: Any):
kwargs["appbuilder"] = current_app.appbuilder
super().__init__(**kwargs)
class RowLevelSecurityFiltersModelView( # pylint: disable=too-many-ancestors
SupersetModelView, DeleteMixin
):
datamodel = SQLAInterface(models.RowLevelSecurityFilter)
list_widget = cast(SupersetListWidget, RowLevelSecurityListWidget)
list_title = _("Row level security filter")
show_title = _("Show Row level security filter")
add_title = _("Add Row level security filter")
edit_title = _("Edit Row level security filter")
list_columns = [
"filter_type",
"tables",
"roles",
"group_key",
"clause",
"creator",
"modified",
]
order_columns = ["filter_type", "group_key", "clause", "modified"]
edit_columns = ["filter_type", "tables", "roles", "group_key", "clause"]
show_columns = edit_columns
search_columns = ("filter_type", "tables", "roles", "group_key", "clause")
add_columns = edit_columns
base_order = ("changed_on", "desc")
description_columns = {
"filter_type": _(
"Regular filters add where clauses to queries if a user belongs to a "
"role referenced in the filter. Base filters apply filters to all queries "
"except the roles defined in the filter, and can be used to define what "
"users can see if no RLS filters within a filter group apply to them."
),
"tables": _("These are the tables this filter will be applied to."),
"roles": _(
"For regular filters, these are the roles this filter will be "
"applied to. For base filters, these are the roles that the "
"filter DOES NOT apply to, e.g. Admin if admin should see all "
"data."
),
"group_key": _(
"Filters with the same group key will be ORed together within the group, "
"while different filter groups will be ANDed together. Undefined group "
"keys are treated as unique groups, i.e. are not grouped together. "
"For example, if a table has three filters, of which two are for "
"departments Finance and Marketing (group key = 'department'), and one "
"refers to the region Europe (group key = 'region'), the filter clause "
"would apply the filter (department = 'Finance' OR department = "
"'Marketing') AND (region = 'Europe')."
),
"clause": _(
"This is the condition that will be added to the WHERE clause. "
"For example, to only return rows for a particular client, "
"you might define a regular filter with the clause `client_id = 9`. To "
"display no rows unless a user belongs to a RLS filter role, a base "
"filter can be created with the clause `1 = 0` (always false)."
),
}
label_columns = {
"tables": _("Tables"),
"roles": _("Roles"),
"clause": _("Clause"),
"creator": _("Creator"),
"modified": _("Modified"),
}
if app.config["RLS_FORM_QUERY_REL_FIELDS"]:
add_form_query_rel_fields = app.config["RLS_FORM_QUERY_REL_FIELDS"]
edit_form_query_rel_fields = add_form_query_rel_fields
@staticmethod
def is_enabled() -> bool:
return is_feature_enabled("ROW_LEVEL_SECURITY")
@before_request
def ensure_enabled(self) -> None:
if not self.is_enabled():
raise NotFound()
class TableModelView( # pylint: disable=too-many-ancestors
DatasourceModelView, DeleteMixin, YamlExportMixin
):
datamodel = SQLAInterface(models.SqlaTable)
class_permission_name = "Dataset"
method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP
include_route_methods = RouteMethod.CRUD_SET
list_title = _("Tables")
show_title = _("Show Table")
add_title = _("Import a table definition")
edit_title = _("Edit Table")
list_columns = ["link", "database_name", "changed_by_", "modified"]
order_columns = ["modified"]
add_columns = ["database", "schema", "table_name"]
edit_columns = [
"table_name",
"sql",
"filter_select_enabled",
"fetch_values_predicate",
"database",
"schema",
"description",
"owners",
"main_dttm_col",
"default_endpoint",
"offset",
"cache_timeout",
"is_sqllab_view",
"template_params",
"extra",
]
base_filters = [["id", DatasourceFilter, lambda: []]]
show_columns = edit_columns + ["perm", "slices"]
related_views = [
TableColumnInlineView,
SqlMetricInlineView,
]
base_order = ("changed_on", "desc")
search_columns = ("database", "schema", "table_name", "owners", "is_sqllab_view")
description_columns = {
"slices": _(
"The list of charts associated with this table. By "
"altering this datasource, you may change how these associated "
"charts behave. "
"Also note that charts need to point to a datasource, so "
"this form will fail at saving if removing charts from a "
"datasource. If you want to change the datasource for a chart, "
"overwrite the chart from the 'explore view'"
),
"offset": _("Timezone offset (in hours) for this datasource"),
"table_name": _("Name of the table that exists in the source database"),
"schema": _(
"Schema, as used only in some databases like Postgres, Redshift " "and DB2"
),
"description": Markup(
'Supports <a href="https://daringfireball.net/projects/markdown/">'
"markdown</a>"
),
"sql": _(
"This fields acts a Superset view, meaning that Superset will "
"run a query against this string as a subquery."
),
"fetch_values_predicate": _(
"Predicate applied when fetching distinct value to "
"populate the filter control component. Supports "
"jinja template syntax. Applies only when "
"`Enable Filter Select` is on."
),
"default_endpoint": _(
"Redirects to this endpoint when clicking on the table "
"from the table list"
),
"filter_select_enabled": _(
"Whether to populate the filter's dropdown in the explore "
"view's filter section with a list of distinct values fetched "
"from the backend on the fly"
),
"is_sqllab_view": _(
"Whether the table was generated by the 'Visualize' flow " "in SQL Lab"
),
"template_params": _(
"A set of parameters that become available in the query using "
"Jinja templating syntax"
),
"cache_timeout": _(
"Duration (in seconds) of the caching timeout for this table. "
"A timeout of 0 indicates that the cache never expires. "
"Note this defaults to the database timeout if undefined."
),
"extra": utils.markdown(
"Extra data to specify table metadata. Currently supports "
'metadata of the format: `{ "certification": { "certified_by": '
'"Data Platform Team", "details": "This table is the source of truth." '
'}, "warning_markdown": "This is a warning." }`.',
True,
),
}
label_columns = {
"slices": _("Associated Charts"),
"link": _("Table"),
"changed_by_": _("Changed By"),
"database": _("Database"),
"database_name": _("Database"),
"changed_on_": _("Last Changed"),
"filter_select_enabled": _("Enable Filter Select"),
"schema": _("Schema"),
"default_endpoint": _("Default Endpoint"),
"offset": _("Offset"),
"cache_timeout": _("Cache Timeout"),
"table_name": _("Table Name"),
"fetch_values_predicate": _("Fetch Values Predicate"),
"owners": _("Owners"),
"main_dttm_col": _("Main Datetime Column"),
"description": _("Description"),
"is_sqllab_view": _("SQL Lab View"),
"template_params": _("Template parameters"),
"extra": _("Extra"),
"modified": _("Modified"),
}
edit_form_extra_fields = {
"database": QuerySelectField(
"Database",
query_factory=lambda: db.session.query(models.Database),
widget=Select2Widget(extra_classes="readonly"),
)
}
def pre_add(self, item: "TableModelView") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
validate_sqlatable(item)
def pre_update(self, item: "TableModelView") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item)
def post_add( # pylint: disable=arguments-differ
self,
item: "TableModelView",
flash_message: bool = True,
fetch_metadata: bool = True,
) -> None:
if fetch_metadata:
item.fetch_metadata()
create_table_permissions(item)
if flash_message:
flash(
_(
"The table was created. "
"As part of this two-phase configuration "
"process, you should now click the edit button by "
"the new table to configure it."
),
"info",
)
def post_update(self, item: "TableModelView") -> None:
self.post_add(item, flash_message=False, fetch_metadata=False)
def _delete(self, pk: int) -> None:
DeleteMixin._delete(self, pk)
@expose("/edit/<pk>", methods=["GET", "POST"])
@has_access
def edit(self, pk: str) -> FlaskResponse:
"""Simple hack to redirect to explore view after saving"""
resp = super().edit(pk)
if isinstance(resp, str):
return resp
return redirect("/superset/explore/table/{}/".format(pk))
@action(
"refresh", __("Refresh Metadata"), __("Refresh column metadata"), "fa-refresh"
)
def refresh( # pylint: disable=no-self-use, too-many-branches
self, tables: Union["TableModelView", List["TableModelView"]]
) -> FlaskResponse:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if not isinstance(tables, list):
tables = [tables]
@dataclass
class RefreshResults:
successes: List[TableModelView] = field(default_factory=list)
failures: List[TableModelView] = field(default_factory=list)
added: Dict[str, List[str]] = field(default_factory=dict)
removed: Dict[str, List[str]] = field(default_factory=dict)
modified: Dict[str, List[str]] = field(default_factory=dict)
results = RefreshResults()
for table_ in tables:
try:
metadata_results = table_.fetch_metadata()
if metadata_results.added:
results.added[table_.table_name] = metadata_results.added
if metadata_results.removed:
results.removed[table_.table_name] = metadata_results.removed
if metadata_results.modified:
results.modified[table_.table_name] = metadata_results.modified
results.successes.append(table_)
except Exception: # pylint: disable=broad-except
results.failures.append(table_)
if len(results.successes) > 0:
success_msg = _(
"Metadata refreshed for the following table(s): %(tables)s",
tables=", ".join([t.table_name for t in results.successes]),
)
flash(success_msg, "info")
if results.added:
added_tables = []
for table, cols in results.added.items():
added_tables.append(f"{table} ({", ".join(cols)})")
flash(
_(
"The following tables added new columns: %(tables)s",
tables=", ".join(added_tables),
),
"info",
)
if results.removed:
removed_tables = []
for table, cols in results.removed.items():
removed_tables.append(f"{table} ({", ".join(cols)})")
flash(
_(
"The following tables removed columns: %(tables)s",
tables=", ".join(removed_tables),
),
"info",
)
if results.modified:
modified_tables = []
for table, cols in results.modified.items():
modified_tables.append(f"{table} ({", ".join(cols)})")
flash(
_(
"The following tables update column metadata: %(tables)s",
tables=", ".join(modified_tables),
),
"info",
)
if len(results.failures) > 0:
failure_msg = _(
"Unable to refresh metadata for the following table(s): %(tables)s",
tables=", ".join([t.table_name for t in results.failures]),
)
flash(failure_msg, "danger")
return redirect("/tablemodelview/list/")
@expose("/list/")
@has_access
def list(self) -> FlaskResponse:
if not is_feature_enabled("ENABLE_REACT_CRUD_VIEWS"):
return super().list()
return super().render_app_template()
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Views used by the SqlAlchemy connector"""
import logging
import re
from dataclasses import dataclass, field
from typing import Any, cast, Dict, List, Union
from flask import current_app, flash, Markup, redirect
from flask_appbuilder import CompactCRUDMixin, expose
from flask_appbuilder.actions import action
from flask_appbuilder.fieldwidgets import Select2Widget
from flask_appbuilder.hooks import before_request
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access
from flask_babel import gettext as __, lazy_gettext as _
from werkzeug.exceptions import NotFound
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import Regexp
from superset import app, db, is_feature_enabled
from superset.connectors.base.views import DatasourceModelView
from superset.connectors.sqla import models
from superset.constants import MODEL_VIEW_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.typing import FlaskResponse
from superset.utils import core as utils
from superset.views.base import (
check_ownership,
create_table_permissions,
DatasourceFilter,
DeleteMixin,
ListWidgetWithCheckboxes,
SupersetListWidget,
SupersetModelView,
validate_sqlatable,
YamlExportMixin,
)
logger = logging.getLogger(__name__)
class TableColumnInlineView( # pylint: disable=too-many-ancestors
CompactCRUDMixin, SupersetModelView
):
datamodel = SQLAInterface(models.TableColumn)
# TODO TODO, review need for this on related_views
class_permission_name = "Dataset"
method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP
include_route_methods = RouteMethod.RELATED_VIEW_SET | RouteMethod.API_SET
list_title = _("Columns")
show_title = _("Show Column")
add_title = _("Add Column")
edit_title = _("Edit Column")
can_delete = False
list_widget = ListWidgetWithCheckboxes
edit_columns = [
"column_name",
"verbose_name",
"description",
"type",
"groupby",
"filterable",
"table",
"expression",
"is_dttm",
"python_date_format",
]
add_columns = edit_columns
list_columns = [
"column_name",
"verbose_name",
"type",
"groupby",
"filterable",
"is_dttm",
]
page_size = 500
description_columns = {
"is_dttm": _(
"Whether to make this column available as a "
"[Time Granularity] option, column has to be DATETIME or "
"DATETIME-like"
),
"filterable": _(
"Whether this column is exposed in the `Filters` section "
"of the explore view."
),
"type": _(
"The data type that was inferred by the database. "
"It may be necessary to input a type manually for "
"expression-defined columns in some cases. In most case "
"users should not need to alter this."
),
"expression": utils.markdown(
"a valid, *non-aggregating* SQL expression as supported by the "
"underlying backend. Example: `substr(name, 1, 1)`",
True,
),
"python_date_format": utils.markdown(
Markup(
"The pattern of timestamp format. For strings use "
'<a href="https://docs.python.org/2/library/'
'datetime.html#strftime-strptime-behavior">'
"python datetime string pattern</a> expression which needs to "
'adhere to the <a href="https://en.wikipedia.org/wiki/ISO_8601">'
"ISO 8601</a> standard to ensure that the lexicographical ordering "
"coincides with the chronological ordering. If the timestamp "
"format does not adhere to the ISO 8601 standard you will need to "
"define an expression and type for transforming the string into a "
"date or timestamp. Note currently time zones are not supported. "
"If time is stored in epoch format, put `epoch_s` or `epoch_ms`."
"If no pattern is specified we fall back to using the optional "
"defaults on a per database/column name level via the extra parameter."
""
),
True,
),
}
label_columns = {
"column_name": _("Column"),
"verbose_name": _("Verbose Name"),
"description": _("Description"),
"groupby": _("Groupable"),
"filterable": _("Filterable"),
"table": _("Table"),
"expression": _("Expression"),
"is_dttm": _("Is temporal"),
"python_date_format": _("Datetime Format"),
"type": _("Type"),
}
validators_columns = {
"python_date_format": [
# Restrict viable values to epoch_s, epoch_ms, or a strftime format
# which adhere's to the ISO 8601 format (without time zone).
Regexp(
re.compile(
r"""
^(
epoch_s|epoch_ms|
(?P<date>%Y(-%m(-%d)?)?)([\sT](?P<time>%H(:%M(:%S(\.%f)?)?)?))?
)$
""",
re.VERBOSE,
),
message=_("Invalid date/timestamp format"),
)
]
}
add_form_extra_fields = {
"table": QuerySelectField(
"Table",
query_factory=lambda: db.session.query(models.SqlaTable),
allow_blank=True,
widget=Select2Widget(extra_classes="readonly"),
)
}
edit_form_extra_fields = add_form_extra_fields
def pre_add(self, item: "models.SqlMetric") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item.table)
def pre_update(self, item: "models.SqlMetric") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item.table)
def pre_delete(self, item: "models.SqlMetric") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item.table)
class SqlMetricInlineView( # pylint: disable=too-many-ancestors
CompactCRUDMixin, SupersetModelView
):
datamodel = SQLAInterface(models.SqlMetric)
class_permission_name = "Dataset"
method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP
include_route_methods = RouteMethod.RELATED_VIEW_SET | RouteMethod.API_SET
list_title = _("Metrics")
show_title = _("Show Metric")
add_title = _("Add Metric")
edit_title = _("Edit Metric")
list_columns = ["metric_name", "verbose_name", "metric_type"]
edit_columns = [
"metric_name",
"description",
"verbose_name",
"metric_type",
"expression",
"table",
"d3format",
"extra",
"warning_text",
]
description_columns = {
"expression": utils.markdown(
"a valid, *aggregating* SQL expression as supported by the "
"underlying backend. Example: `count(DISTINCT userid)`",
True,
),
"d3format": utils.markdown(
"d3 formatting string as defined [here]"
"(https://github.com/d3/d3-format/blob/master/README.md#format). "
"For instance, this default formatting applies in the Table "
"visualization and allow for different metric to use different "
"formats",
True,
),
"extra": utils.markdown(
"Extra data to specify metric metadata. Currently supports "
'metadata of the format: `{ "certification": { "certified_by": '
'"Data Platform Team", "details": "This metric is the source of truth." '
'}, "warning_markdown": "This is a warning." }`. This should be modified '
"from the edit datasource model in Explore to ensure correct formatting.",
True,
),
}
add_columns = edit_columns
page_size = 500
label_columns = {
"metric_name": _("Metric"),
"description": _("Description"),
"verbose_name": _("Verbose Name"),
"metric_type": _("Type"),
"expression": _("SQL Expression"),
"table": _("Table"),
"d3format": _("D3 Format"),
"extra": _("Extra"),
"warning_text": _("Warning Message"),
}
add_form_extra_fields = {
"table": QuerySelectField(
"Table",
query_factory=lambda: db.session.query(models.SqlaTable),
allow_blank=True,
widget=Select2Widget(extra_classes="readonly"),
)
}
edit_form_extra_fields = add_form_extra_fields
def pre_add(self, item: "models.SqlMetric") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item.table)
def pre_update(self, item: "models.SqlMetric") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item.table)
def pre_delete(self, item: "models.SqlMetric") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item.table)
class RowLevelSecurityListWidget(
SupersetListWidget
): # pylint: disable=too-few-public-methods
template = "superset/models/rls/list.html"
def __init__(self, **kwargs: Any):
kwargs["appbuilder"] = current_app.appbuilder
super().__init__(**kwargs)
class RowLevelSecurityFiltersModelView( # pylint: disable=too-many-ancestors
SupersetModelView, DeleteMixin
):
datamodel = SQLAInterface(models.RowLevelSecurityFilter)
list_widget = cast(SupersetListWidget, RowLevelSecurityListWidget)
list_title = _("Row level security filter")
show_title = _("Show Row level security filter")
add_title = _("Add Row level security filter")
edit_title = _("Edit Row level security filter")
list_columns = [
"filter_type",
"tables",
"roles",
"group_key",
"clause",
"creator",
"modified",
]
order_columns = ["filter_type", "group_key", "clause", "modified"]
edit_columns = ["filter_type", "tables", "roles", "group_key", "clause"]
show_columns = edit_columns
search_columns = ("filter_type", "tables", "roles", "group_key", "clause")
add_columns = edit_columns
base_order = ("changed_on", "desc")
description_columns = {
"filter_type": _(
"Regular filters add where clauses to queries if a user belongs to a "
"role referenced in the filter. Base filters apply filters to all queries "
"except the roles defined in the filter, and can be used to define what "
"users can see if no RLS filters within a filter group apply to them."
),
"tables": _("These are the tables this filter will be applied to."),
"roles": _(
"For regular filters, these are the roles this filter will be "
"applied to. For base filters, these are the roles that the "
"filter DOES NOT apply to, e.g. Admin if admin should see all "
"data."
),
"group_key": _(
"Filters with the same group key will be ORed together within the group, "
"while different filter groups will be ANDed together. Undefined group "
"keys are treated as unique groups, i.e. are not grouped together. "
"For example, if a table has three filters, of which two are for "
"departments Finance and Marketing (group key = 'department'), and one "
"refers to the region Europe (group key = 'region'), the filter clause "
"would apply the filter (department = 'Finance' OR department = "
"'Marketing') AND (region = 'Europe')."
),
"clause": _(
"This is the condition that will be added to the WHERE clause. "
"For example, to only return rows for a particular client, "
"you might define a regular filter with the clause `client_id = 9`. To "
"display no rows unless a user belongs to a RLS filter role, a base "
"filter can be created with the clause `1 = 0` (always false)."
),
}
label_columns = {
"tables": _("Tables"),
"roles": _("Roles"),
"clause": _("Clause"),
"creator": _("Creator"),
"modified": _("Modified"),
}
if app.config["RLS_FORM_QUERY_REL_FIELDS"]:
add_form_query_rel_fields = app.config["RLS_FORM_QUERY_REL_FIELDS"]
edit_form_query_rel_fields = add_form_query_rel_fields
@staticmethod
def is_enabled() -> bool:
return is_feature_enabled("ROW_LEVEL_SECURITY")
@before_request
def ensure_enabled(self) -> None:
if not self.is_enabled():
raise NotFound()
class TableModelView( # pylint: disable=too-many-ancestors
DatasourceModelView, DeleteMixin, YamlExportMixin
):
datamodel = SQLAInterface(models.SqlaTable)
class_permission_name = "Dataset"
method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP
include_route_methods = RouteMethod.CRUD_SET
list_title = _("Tables")
show_title = _("Show Table")
add_title = _("Import a table definition")
edit_title = _("Edit Table")
list_columns = ["link", "database_name", "changed_by_", "modified"]
order_columns = ["modified"]
add_columns = ["database", "schema", "table_name"]
edit_columns = [
"table_name",
"sql",
"filter_select_enabled",
"fetch_values_predicate",
"database",
"schema",
"description",
"owners",
"main_dttm_col",
"default_endpoint",
"offset",
"cache_timeout",
"is_sqllab_view",
"template_params",
"extra",
]
base_filters = [["id", DatasourceFilter, lambda: []]]
show_columns = edit_columns + ["perm", "slices"]
related_views = [
TableColumnInlineView,
SqlMetricInlineView,
]
base_order = ("changed_on", "desc")
search_columns = ("database", "schema", "table_name", "owners", "is_sqllab_view")
description_columns = {
"slices": _(
"The list of charts associated with this table. By "
"altering this datasource, you may change how these associated "
"charts behave. "
"Also note that charts need to point to a datasource, so "
"this form will fail at saving if removing charts from a "
"datasource. If you want to change the datasource for a chart, "
"overwrite the chart from the 'explore view'"
),
"offset": _("Timezone offset (in hours) for this datasource"),
"table_name": _("Name of the table that exists in the source database"),
"schema": _(
"Schema, as used only in some databases like Postgres, Redshift " "and DB2"
),
"description": Markup(
'Supports <a href="https://daringfireball.net/projects/markdown/">'
"markdown</a>"
),
"sql": _(
"This fields acts a Superset view, meaning that Superset will "
"run a query against this string as a subquery."
),
"fetch_values_predicate": _(
"Predicate applied when fetching distinct value to "
"populate the filter control component. Supports "
"jinja template syntax. Applies only when "
"`Enable Filter Select` is on."
),
"default_endpoint": _(
"Redirects to this endpoint when clicking on the table "
"from the table list"
),
"filter_select_enabled": _(
"Whether to populate the filter's dropdown in the explore "
"view's filter section with a list of distinct values fetched "
"from the backend on the fly"
),
"is_sqllab_view": _(
"Whether the table was generated by the 'Visualize' flow " "in SQL Lab"
),
"template_params": _(
"A set of parameters that become available in the query using "
"Jinja templating syntax"
),
"cache_timeout": _(
"Duration (in seconds) of the caching timeout for this table. "
"A timeout of 0 indicates that the cache never expires. "
"Note this defaults to the database timeout if undefined."
),
"extra": utils.markdown(
"Extra data to specify table metadata. Currently supports "
'metadata of the format: `{ "certification": { "certified_by": '
'"Data Platform Team", "details": "This table is the source of truth." '
'}, "warning_markdown": "This is a warning." }`.',
True,
),
}
label_columns = {
"slices": _("Associated Charts"),
"link": _("Table"),
"changed_by_": _("Changed By"),
"database": _("Database"),
"database_name": _("Database"),
"changed_on_": _("Last Changed"),
"filter_select_enabled": _("Enable Filter Select"),
"schema": _("Schema"),
"default_endpoint": _("Default Endpoint"),
"offset": _("Offset"),
"cache_timeout": _("Cache Timeout"),
"table_name": _("Table Name"),
"fetch_values_predicate": _("Fetch Values Predicate"),
"owners": _("Owners"),
"main_dttm_col": _("Main Datetime Column"),
"description": _("Description"),
"is_sqllab_view": _("SQL Lab View"),
"template_params": _("Template parameters"),
"extra": _("Extra"),
"modified": _("Modified"),
}
edit_form_extra_fields = {
"database": QuerySelectField(
"Database",
query_factory=lambda: db.session.query(models.Database),
widget=Select2Widget(extra_classes="readonly"),
)
}
def pre_add(self, item: "TableModelView") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
validate_sqlatable(item)
def pre_update(self, item: "TableModelView") -> None:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]:
check_ownership(item)
def post_add( # pylint: disable=arguments-differ
self,
item: "TableModelView",
flash_message: bool = True,
fetch_metadata: bool = True,
) -> None:
if fetch_metadata:
item.fetch_metadata()
create_table_permissions(item)
if flash_message:
flash(
_(
"The table was created. "
"As part of this two-phase configuration "
"process, you should now click the edit button by "
"the new table to configure it."
),
"info",
)
def post_update(self, item: "TableModelView") -> None:
self.post_add(item, flash_message=False, fetch_metadata=False)
def _delete(self, pk: int) -> None:
DeleteMixin._delete(self, pk)
@expose("/edit/<pk>", methods=["GET", "POST"])
@has_access
def edit(self, pk: str) -> FlaskResponse:
"""Simple hack to redirect to explore view after saving"""
resp = super().edit(pk)
if isinstance(resp, str):
return resp
return redirect("/superset/explore/table/{}/".format(pk))
@action(
"refresh", __("Refresh Metadata"), __("Refresh column metadata"), "fa-refresh"
)
def refresh( # pylint: disable=no-self-use, too-many-branches
self, tables: Union["TableModelView", List["TableModelView"]]
) -> FlaskResponse:
logger.warning(
"This endpoint is deprecated and will be removed in version 2.0.0"
)
if not isinstance(tables, list):
tables = [tables]
@dataclass
class RefreshResults:
successes: List[TableModelView] = field(default_factory=list)
failures: List[TableModelView] = field(default_factory=list)
added: Dict[str, List[str]] = field(default_factory=dict)
removed: Dict[str, List[str]] = field(default_factory=dict)
modified: Dict[str, List[str]] = field(default_factory=dict)
results = RefreshResults()
for table_ in tables:
try:
metadata_results = table_.fetch_metadata()
if metadata_results.added:
results.added[table_.table_name] = metadata_results.added
if metadata_results.removed:
results.removed[table_.table_name] = metadata_results.removed
if metadata_results.modified:
results.modified[table_.table_name] = metadata_results.modified
results.successes.append(table_)
except Exception: # pylint: disable=broad-except
results.failures.append(table_)
if len(results.successes) > 0:
success_msg = _(
"Metadata refreshed for the following table(s): %(tables)s",
tables=", ".join([t.table_name for t in results.successes]),
)
flash(success_msg, "info")
if results.added:
added_tables = []
for table, cols in results.added.items():
added_tables.append(f"{table} ({', '.join(cols)})")
flash(
_(
"The following tables added new columns: %(tables)s",
tables=", ".join(added_tables),
),
"info",
)
if results.removed:
removed_tables = []
for table, cols in results.removed.items():
removed_tables.append(f"{table} ({', '.join(cols)})")
flash(
_(
"The following tables removed columns: %(tables)s",
tables=", ".join(removed_tables),
),
"info",
)
if results.modified:
modified_tables = []
for table, cols in results.modified.items():
modified_tables.append(f"{table} ({', '.join(cols)})")
flash(
_(
"The following tables update column metadata: %(tables)s",
tables=", ".join(modified_tables),
),
"info",
)
if len(results.failures) > 0:
failure_msg = _(
"Unable to refresh metadata for the following table(s): %(tables)s",
tables=", ".join([t.table_name for t in results.failures]),
)
flash(failure_msg, "danger")
return redirect("/tablemodelview/list/")
@expose("/list/")
@has_access
def list(self) -> FlaskResponse:
if not is_feature_enabled("ENABLE_REACT_CRUD_VIEWS"):
return super().list()
return super().render_app_template()
|
"""
To run this flow:
```python forecasting_flow.py --environment=conda run```
"""
from functools import partial
from metaflow import (
Flow,
FlowSpec,
IncludeFile,
Parameter,
batch,
conda,
conda_base,
get_metadata,
parallel_map,
step,
)
from pip_decorator import pip
from forecasting_models import GluonTSModel, KatsModel, NeuralProphetModel, MerlionModel
# this version is used in pre and post processing steps
PANDAS_VERSION = "1.3.3"
# this version is used when conda packages aren't available
PIP_VERSION = "21.3.1"
def run_model(
model_config, wrapper_class, target_index, forecast_steps, train_df, data_freq
):
try:
model = wrapper_class(
model_config, target_index, forecast_steps, data_freq=data_freq
)
model.fit(train_df)
forecast = model.predict(train_df)
forecast["id"] = model_config["id"]
return forecast
except:
print(f"Error with {model_config}")
raise
@conda_base(python="3.8.12")
class ForecastingFlow(FlowSpec):
"""
A flow for benchmarking forecasting libraries.
"""
train_path = Parameter(
"train_path",
help="The path to a DataFrame file for training",
default="https://jgoode.s3.amazonaws.com/ts-datasets/seattle-trail.csv",
)
test_path = Parameter(
"test_path",
help="The path to a DataFrame file for testing",
default=None,
)
date_col = Parameter(
"date_col",
help="Column of the date in the input DataFrame",
default="Date",
)
target_col = Parameter(
"target_col",
help="Column of the target in the input DataFrame",
default="BGT North of NE 70th Total",
)
# data_config_path = Parameter(
# "data_config_path",
# help=
model_config_path = Parameter(
"model_config_path",
help="The path to a model config file",
default="../configs/forecasting/models/default.yaml",
)
forecast_steps = Parameter(
"forecast_steps",
help="The number of steps ahead to forecast",
default=10,
)
@conda(libraries={"pandas": PANDAS_VERSION, "pyyaml": "6.0"})
@step
def start(self):
"""
Start the flow by preprocessing the data.
"""
import pandas as pd
from pprint import pprint
import yaml
# Print the Metaflow metadata provider
print(f"Using metadata provider: {get_metadata()}")
def load_df(path):
df = pd.read_csv(path)
assert self.date_col in df.columns, '"%s" not in columns' % self.date_col
assert self.target_col in df.columns, (
'"%s" not in columns' % self.target_col
)
# parse date column and set it as the index
df[self.date_col] = pd.to_datetime(df[self.date_col])
df.set_index(self.date_col, inplace=True)
return df
self.train_df = load_df(self.train_path)
if self.test_path is not None:
self.test_df = load_df(self.test_path)
assert (
self.train_df.columns == self.test_df.columns
).all(), "Columns do not match"
else:
self.test_df = None
if self.test_df is None:
n_train = 500
self.test_df = self.train_df.iloc[n_train : n_train + self.forecast_steps]
self.train_df = self.train_df.iloc[:n_train]
# get index of the target column
self.target_index = self.train_df.columns.tolist().index(self.target_col)
# get the frequency of the data
self.freq = pd.infer_freq(self.train_df.index)
# load the model config file
with open(self.model_config_path, "r") as f:
self.model_config = yaml.safe_load(f)
print("train df")
print(self.train_df)
print("test df")
print(self.test_df)
print("model_config")
pprint(self.model_config)
# these branches will run in parallel
# TODO: skip those with no entries in the model config
self.next(
self.run_merlion,
self.run_gluonts,
self.run_kats,
self.run_neuralprophet,
)
@conda(libraries={"salesforce-merlion": "1.0.2"})
@step
def run_merlion(self):
"""
Run Merlion models.
https://github.com/salesforce/Merlion
"""
self.forecasts = parallel_map(
partial(
run_model,
wrapper_class=MerlionModel,
target_index=self.target_index,
forecast_steps=self.forecast_steps,
train_df=self.train_df,
data_freq=self.freq,
),
self.model_config["libs"].get("merlion", []),
)
self.next(self.join)
# We use pip because mxnet 1.5.0 is broken and there's no newer conda version.
@pip(libraries={"mxnet": "1.8.0.post0", "gluonts": "0.8.1"})
@conda(libraries={"pip": PIP_VERSION})
@step
def run_gluonts(self):
"""
Run gluon-ts models.
https://github.com/awslabs/gluon-ts
"""
self.forecasts = parallel_map(
partial(
run_model,
wrapper_class=GluonTSModel,
target_index=self.target_index,
forecast_steps=self.forecast_steps,
train_df=self.train_df,
data_freq=self.freq,
),
self.model_config["libs"].get("gluonts", []),
)
self.next(self.join)
@conda(libraries={"kats": "0.1.0"})
@step
def run_kats(self):
"""
Run Kats models.
https://github.com/facebookresearch/Kats
"""
self.forecasts = parallel_map(
partial(
run_model,
wrapper_class=KatsModel,
target_index=self.target_index,
forecast_steps=self.forecast_steps,
train_df=self.train_df,
data_freq=self.freq,
),
self.model_config["libs"].get("kats", []),
)
self.next(self.join)
# We use pip because there isn't a conda package for NeuralProphet.
@pip(libraries={"neuralprophet": "0.3.0"})
@conda(libraries={"pip": PIP_VERSION})
@step
def run_neuralprophet(self):
"""
Run NeuralProphet models.
https://github.com/ourownstory/neural_prophet
"""
self.forecasts = parallel_map(
partial(
run_model,
wrapper_class=NeuralProphetModel,
target_index=self.target_index,
forecast_steps=self.forecast_steps,
train_df=self.train_df,
data_freq=self.freq,
),
self.model_config["libs"].get("neuralprophet", []),
)
self.next(self.join)
@conda(libraries={"pandas": PANDAS_VERSION})
@step
def join(self, inputs):
"""
Compute performance metrics for each library.
"""
from collections import OrderedDict
import numpy as np
import pandas as pd
forecasts = OrderedDict()
# get forecasts for each library
for lib in inputs:
# carry these forward
self.train_df = lib.train_df
self.test_df = lib.test_df
self.target_index = lib.target_index
for forecast in lib.forecasts:
assert (
forecast["id"] not in forecasts
), f"Duplicate forecast id: {forecast["id"]}"
forecasts[forecast["id"]] = forecast["y_hat"].reshape(-1)
# get timestamps for the forecasts
freq = self.train_df.index[1] - self.train_df.index[0]
future_dates = pd.DatetimeIndex(
[
self.train_df.index[-1] + (i + 1) * freq
for i in range(self.forecast_steps)
]
)
self.forecasts = pd.DataFrame(forecasts, index=future_dates)
print("forecasts:")
print(self.forecasts)
if self.test_df is not None:
# duplicate univariate target across columns for each model
true = self.test_df.iloc[
: self.forecast_steps, [self.target_index] * self.forecasts.shape[1]
]
pred = self.forecasts
print("--> true")
print(true)
print("--> pred")
print(pred)
self.rmse = pd.Series(
np.sqrt(np.mean((pred.values - true.values) ** 2, axis=0)),
index=self.forecasts.columns,
).sort_values()
print(f"RMSE:")
print(self.rmse)
self.next(self.end)
@step
def end(self):
"""
End of the flow
"""
pass
if __name__ == "__main__":
ForecastingFlow()
| """
To run this flow:
```python forecasting_flow.py --environment=conda run```
"""
from functools import partial
from metaflow import (
Flow,
FlowSpec,
IncludeFile,
Parameter,
batch,
conda,
conda_base,
get_metadata,
parallel_map,
step,
)
from pip_decorator import pip
from forecasting_models import GluonTSModel, KatsModel, NeuralProphetModel, MerlionModel
# this version is used in pre and post processing steps
PANDAS_VERSION = "1.3.3"
# this version is used when conda packages aren't available
PIP_VERSION = "21.3.1"
def run_model(
model_config, wrapper_class, target_index, forecast_steps, train_df, data_freq
):
try:
model = wrapper_class(
model_config, target_index, forecast_steps, data_freq=data_freq
)
model.fit(train_df)
forecast = model.predict(train_df)
forecast["id"] = model_config["id"]
return forecast
except:
print(f"Error with {model_config}")
raise
@conda_base(python="3.8.12")
class ForecastingFlow(FlowSpec):
"""
A flow for benchmarking forecasting libraries.
"""
train_path = Parameter(
"train_path",
help="The path to a DataFrame file for training",
default="https://jgoode.s3.amazonaws.com/ts-datasets/seattle-trail.csv",
)
test_path = Parameter(
"test_path",
help="The path to a DataFrame file for testing",
default=None,
)
date_col = Parameter(
"date_col",
help="Column of the date in the input DataFrame",
default="Date",
)
target_col = Parameter(
"target_col",
help="Column of the target in the input DataFrame",
default="BGT North of NE 70th Total",
)
# data_config_path = Parameter(
# "data_config_path",
# help=
model_config_path = Parameter(
"model_config_path",
help="The path to a model config file",
default="../configs/forecasting/models/default.yaml",
)
forecast_steps = Parameter(
"forecast_steps",
help="The number of steps ahead to forecast",
default=10,
)
@conda(libraries={"pandas": PANDAS_VERSION, "pyyaml": "6.0"})
@step
def start(self):
"""
Start the flow by preprocessing the data.
"""
import pandas as pd
from pprint import pprint
import yaml
# Print the Metaflow metadata provider
print(f"Using metadata provider: {get_metadata()}")
def load_df(path):
df = pd.read_csv(path)
assert self.date_col in df.columns, '"%s" not in columns' % self.date_col
assert self.target_col in df.columns, (
'"%s" not in columns' % self.target_col
)
# parse date column and set it as the index
df[self.date_col] = pd.to_datetime(df[self.date_col])
df.set_index(self.date_col, inplace=True)
return df
self.train_df = load_df(self.train_path)
if self.test_path is not None:
self.test_df = load_df(self.test_path)
assert (
self.train_df.columns == self.test_df.columns
).all(), "Columns do not match"
else:
self.test_df = None
if self.test_df is None:
n_train = 500
self.test_df = self.train_df.iloc[n_train : n_train + self.forecast_steps]
self.train_df = self.train_df.iloc[:n_train]
# get index of the target column
self.target_index = self.train_df.columns.tolist().index(self.target_col)
# get the frequency of the data
self.freq = pd.infer_freq(self.train_df.index)
# load the model config file
with open(self.model_config_path, "r") as f:
self.model_config = yaml.safe_load(f)
print("train df")
print(self.train_df)
print("test df")
print(self.test_df)
print("model_config")
pprint(self.model_config)
# these branches will run in parallel
# TODO: skip those with no entries in the model config
self.next(
self.run_merlion,
self.run_gluonts,
self.run_kats,
self.run_neuralprophet,
)
@conda(libraries={"salesforce-merlion": "1.0.2"})
@step
def run_merlion(self):
"""
Run Merlion models.
https://github.com/salesforce/Merlion
"""
self.forecasts = parallel_map(
partial(
run_model,
wrapper_class=MerlionModel,
target_index=self.target_index,
forecast_steps=self.forecast_steps,
train_df=self.train_df,
data_freq=self.freq,
),
self.model_config["libs"].get("merlion", []),
)
self.next(self.join)
# We use pip because mxnet 1.5.0 is broken and there's no newer conda version.
@pip(libraries={"mxnet": "1.8.0.post0", "gluonts": "0.8.1"})
@conda(libraries={"pip": PIP_VERSION})
@step
def run_gluonts(self):
"""
Run gluon-ts models.
https://github.com/awslabs/gluon-ts
"""
self.forecasts = parallel_map(
partial(
run_model,
wrapper_class=GluonTSModel,
target_index=self.target_index,
forecast_steps=self.forecast_steps,
train_df=self.train_df,
data_freq=self.freq,
),
self.model_config["libs"].get("gluonts", []),
)
self.next(self.join)
@conda(libraries={"kats": "0.1.0"})
@step
def run_kats(self):
"""
Run Kats models.
https://github.com/facebookresearch/Kats
"""
self.forecasts = parallel_map(
partial(
run_model,
wrapper_class=KatsModel,
target_index=self.target_index,
forecast_steps=self.forecast_steps,
train_df=self.train_df,
data_freq=self.freq,
),
self.model_config["libs"].get("kats", []),
)
self.next(self.join)
# We use pip because there isn't a conda package for NeuralProphet.
@pip(libraries={"neuralprophet": "0.3.0"})
@conda(libraries={"pip": PIP_VERSION})
@step
def run_neuralprophet(self):
"""
Run NeuralProphet models.
https://github.com/ourownstory/neural_prophet
"""
self.forecasts = parallel_map(
partial(
run_model,
wrapper_class=NeuralProphetModel,
target_index=self.target_index,
forecast_steps=self.forecast_steps,
train_df=self.train_df,
data_freq=self.freq,
),
self.model_config["libs"].get("neuralprophet", []),
)
self.next(self.join)
@conda(libraries={"pandas": PANDAS_VERSION})
@step
def join(self, inputs):
"""
Compute performance metrics for each library.
"""
from collections import OrderedDict
import numpy as np
import pandas as pd
forecasts = OrderedDict()
# get forecasts for each library
for lib in inputs:
# carry these forward
self.train_df = lib.train_df
self.test_df = lib.test_df
self.target_index = lib.target_index
for forecast in lib.forecasts:
assert (
forecast["id"] not in forecasts
), f"Duplicate forecast id: {forecast['id']}"
forecasts[forecast["id"]] = forecast["y_hat"].reshape(-1)
# get timestamps for the forecasts
freq = self.train_df.index[1] - self.train_df.index[0]
future_dates = pd.DatetimeIndex(
[
self.train_df.index[-1] + (i + 1) * freq
for i in range(self.forecast_steps)
]
)
self.forecasts = pd.DataFrame(forecasts, index=future_dates)
print("forecasts:")
print(self.forecasts)
if self.test_df is not None:
# duplicate univariate target across columns for each model
true = self.test_df.iloc[
: self.forecast_steps, [self.target_index] * self.forecasts.shape[1]
]
pred = self.forecasts
print("--> true")
print(true)
print("--> pred")
print(pred)
self.rmse = pd.Series(
np.sqrt(np.mean((pred.values - true.values) ** 2, axis=0)),
index=self.forecasts.columns,
).sort_values()
print(f"RMSE:")
print(self.rmse)
self.next(self.end)
@step
def end(self):
"""
End of the flow
"""
pass
if __name__ == "__main__":
ForecastingFlow()
|
#!/bin/env python3
import feedparser
import argparse
import os
from os import path, getcwd
import xml.etree.ElementTree as ET
from io import IOBase
import json
import requests
import time
from configparser import ConfigParser
import re
import logging
# ----- ----- ----- ----- -----
class podqueue():
def __init__(self):
# Initialise to defaults before checking config file / CLI args
self.verbose = False
self.opml = None
self.dest = os.path.join(os.getcwd(), 'output')
self.time_format = '%Y-%m-%d'
self.log_file = 'podqueue.log'
self.feeds = []
self.FEED_FIELDS = ['title', 'link', 'description', 'published', 'image', 'categories',]
self.EPISODE_FIELDS = ['title', 'link', 'description', 'published_parsed', 'links',]
# If a config file exists, ingest it
self.check_config()
# Overwrite any config file defaults with CLI params
self.cli_args()
self.config_logging()
# Check an OPML was provided
try:
assert self.opml is not None
except Exception as e:
logging.error('OPML file or destination dir was not provided')
exit()
def config_logging(self):
# Always log to file; only stdout if -v
handlers = [logging.FileHandler(self.log_file)]
if (self.verbose): handlers.append(logging.StreamHandler())
# Config settings
level = logging.INFO if (self.verbose) else logging.WARNING
logging.basicConfig(level=level, datefmt='%Y-%m-%d %H:%M:%S', handlers=handlers,
format='%(asctime)s [%(levelname)s] %(message)s')
# Add header for append-mode file logging
logging.info('\n----- ----- ----- ----- -----\nInitialising\n----- ----- ----- ----- -----')
def ascii_normalise(self, input_str, ):
try:
# Replace non-simple chars with dunders
input_str = re.sub(r'[^a-zA-Z0-9\-\_\/\\\.]', '_', input_str)
# Replace any strings of 2+ puncts with a single underscore
input_str = re.sub(r'_+', r'_', input_str)
input_str = re.sub(r'([^a-zA-Z0-9]{2,})', r'_', input_str)
# Remove any trailing puncts
input_str = re.sub(r'(_|\.)$', r'', input_str)
except Exception as e:
logging.error(f'\t\tError normalising file name: {e}')
exit()
return input_str
def check_config(self):
# get the path to podqueue.conf
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'podqueue.conf')
# Check if the file has been created
if not os.path.exists(config_path):
logging.info(f'Config file does not exist: {config_path}')
return None
conf = ConfigParser()
conf.read(config_path)
for key in ['opml', 'dest', 'time_format', 'verbose', 'log_file']:
if conf['podqueue'].get(key, None):
setattr(self, key, conf['podqueue'].get(key, None))
# If we just changed verbose to str, make sure it's back to a bool
if self.verbose:
self.verbose = bool(self.verbose)
return
def cli_args(self):
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-o', '--opml', dest='opml', default=None, type=argparse.FileType('r'),
help='Pass an OPML file that contains a podcast subscription list.')
parser.add_argument('-d', '--dest', dest='dest', type=self.args_path,
help='The destination folder for downloads. Will be created if required, including sub-directories for each separate podcast.')
parser.add_argument('-t', '--time_format', dest='time_format',
help='Specify a time format string for JSON files. Defaults to 2022-06-31 if not specified.')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
help='Prints additional debug information. If excluded, only errors are printed (for automation).')
parser.add_argument('-l', '--log_file', dest='log_file',
help='Specify a path to the log file. Defaults to ./podqueue.log')
# Save the CLI args to class vars - self.XXX
# vars() converts into a native dict
result = vars(parser.parse_args())
for key, value in result.items():
# Don't overwrite if it's not provided in CLI
if value is not None:
setattr(self, key, value)
def args_path(self, directory):
# Create the directory, if required
if not os.path.isdir(directory):
os.makedirs(directory)
return directory
def parse_opml(self, opml):
logging.info(f'Parsing OPML file: {opml.name}')
# Check if we have an actual file handle (CLI arg),
# Or a string path (config file), and we need to get our own handle
with (opml if isinstance(opml, IOBase) else open(opml, 'r')) as opml_f:
xml_root = ET.parse(opml_f).getroot()
# Get all RSS feeds with a 'xmlUrl' attribute
for feed in [x.attrib for x in xml_root.findall(".//outline[@type='rss']")]:
feed_url = feed.get('xmlUrl', None)
if feed_url:
self.feeds.append(feed_url)
def get_feeds(self, feeds):
logging.info(f'Fetching feeds:')
for feed in feeds:
try:
content = feedparser.parse(feed)
# The remote RSS server can close the HTTP connection
# except http.client.RemoteDisconnected:
except:
logging.warning(f'Feed server unexpectedly closed connection: {feed}')
continue
# If feedparser library reports bad XML, warn and skip
# Test str: 'http://feedparser.org/tests/illformed/rss/aaa_illformed.xml'
if content.get('bozo', False):
logging.warning(f'Feed is misformatted: {feed}')
continue
title = content.feed.get('title', 'Unknown Title')
logging.info(f'\tProcessing feed: {title}')
# Normalise the podcast name with no spaces or non-simple ascii
feed_dir_name = '_'.join([x for x in title.split(' ')])
feed_dir_name = self.ascii_normalise(feed_dir_name)
# Create the directory we need (no spaces) if it doesn't exist
directory = os.path.join(self.dest, feed_dir_name)
if not os.path.isdir(directory):
os.makedirs(directory)
# Also create the <<PODCAST>>/episodes subdirectory
if not os.path.isdir(os.path.join(directory, 'episodes')):
os.makedirs(os.path.join(directory, 'episodes'))
# Get content.feed metadata - podcast title, icon, description, etc.
# And write it to disk as <<PODCAST>>/<<PODCAST>>.json
feed_metadata = self.process_feed_metadata(content, directory)
# Also fetch the podcast logo, if available
if feed_metadata.get('image', None):
self.get_feed_image(feed_metadata['image'], directory)
# Then, process the episodes each and write to disk
for episode in content.entries:
episode_data = self.process_feed_episode(episode, directory)
return None
def process_feed_metadata(self, content, directory):
logging.info(f'\t\tProcessing feed metadata')
feed_metadata = {}
for field in self.FEED_FIELDS:
# .image is a dict structure where we only want href,
# the rest are strs, so special case
if (field == 'image') and (content.feed.get('image', None)):
value = content.feed.image.href
else:
value = content.feed.get(field, None)
feed_metadata[field] = value
# Additional calculated metadata based on structure:
feed_metadata['episode_count'] = len(content.entries)
metadata_filename = os.path.join(directory, f'{os.path.split(directory)[1]}.json')
with open(metadata_filename, 'w') as meta_f:
meta_f.write(json.dumps(feed_metadata))
return feed_metadata
def get_feed_image(self, image_url, directory):
try:
img = requests.get(image_url)
img.raise_for_status()
except Exception as e:
logging.warning(f'\t\tImage could not be found: {image_url}, for reason: {e}')
return
image_filename_ext = os.path.splitext(image_url)[1]
image_filename = os.path.join(directory, f'{os.path.split(directory)[1]}{image_filename_ext}')
with open(image_filename, 'wb') as img_f:
for chunk in img.iter_content(chunk_size=128):
img_f.write(chunk)
logging.info(f'\t\tAdded image to disk: {os.path.split(image_filename)[1]}')
return
def process_feed_episode(self, episode, directory):
episode_metadata = {}
for field in self.EPISODE_FIELDS:
episode_metadata[field] = episode.get(field, None)
# Change the time_struct tuple to a human string
if episode_metadata.get('published_parsed', None):
episode_metadata['published_parsed'] = time.strftime(self.time_format, \
episode_metadata['published_parsed'])
# Change the links{} into a single audio URL
if episode_metadata.get('links', None):
for link in episode_metadata['links']:
if link.get('type', None):
if 'audio' in link.get('type', None):
episode_metadata['link'] = link.get('href', None)
break
# Remove the old complicated links{}
episode_metadata.pop('links', None)
# Get a unique episode filename(s)
episode_title = f'{episode_metadata['published_parsed']}_{episode_metadata['title']}'
# Special case - the final file name (not path) can't have a slash in it
# Also replace colons as they are invalid in filenames on Windows (used for Alterante Data Streams on NTFS)
episode_title = re.sub(r'(\/|\\|:|\?|")', r'_', episode_title)
# Check the title isn't going to overshoot 255 bytes
# This is the limit in ZFS, BTRFS, ext*, NTFS, APFS, XFS, etc ...
# Otherwise, file.write will raise OSError 36 - "File name too long"
# I'm looking at you, Memory Palace 73. I mean really, 55 words and 316 characters long?
# https://thememorypalace.us/notes-on-an-imagined-plaque/
if len(episode_title) >= 250:
episode_title = f'{episode_title[0:245]}_'
episode_meta_filename = os.path.join(os.path.join(directory, 'episodes'), \
f'{episode_title}.json')
episode_audio_filename = os.path.join(os.path.join(directory, 'episodes'), \
f'{episode_title}.mp3')
# episode_meta_filename = self.ascii_normalise(episode_meta_filename)
# episode_audio_filename = self.ascii_normalise(episode_audio_filename)
# Check if the file already exists on disk (if so, skip)
if os.path.exists(episode_meta_filename) and os.path.exists(episode_audio_filename):
logging.info(f'\t\tEpisode already saved, skipping: {episode_title}')
return
# Write metadata to disk
with open(episode_meta_filename, 'w') as ep_meta_f:
ep_meta_f.write(json.dumps(episode_metadata))
logging.info(f'\t\t\tAdded episode metadata to disk: {episode_title}')
# Download the audio file
if episode_metadata.get('link', None):
try:
audio = requests.get(episode_metadata['link'])
audio.raise_for_status()
except Exception as e:
logging.warning(f'\t\t\tAudio could not be found: {episode_metadata['link']}')
return
# Write audio to disk
with open(episode_audio_filename, 'wb') as audio_f:
for chunk in audio.iter_content(chunk_size=1024*8):
audio_f.write(chunk)
logging.info(f'\t\t\tAdded episode audio to disk: {episode_title}')
return
# ----- ----- ----- ----- -----
def entry():
# Initialise the config - from file, or CLI args
pq = podqueue()
# Parse all feed URLs out of the OPML XML into pq.feeds=[]
pq.parse_opml(pq.opml)
# Download the metadata, images, and any missing episodes
pq.get_feeds(pq.feeds)
if __name__ == '__main__':
entry()
| #!/bin/env python3
import feedparser
import argparse
import os
from os import path, getcwd
import xml.etree.ElementTree as ET
from io import IOBase
import json
import requests
import time
from configparser import ConfigParser
import re
import logging
# ----- ----- ----- ----- -----
class podqueue():
def __init__(self):
# Initialise to defaults before checking config file / CLI args
self.verbose = False
self.opml = None
self.dest = os.path.join(os.getcwd(), 'output')
self.time_format = '%Y-%m-%d'
self.log_file = 'podqueue.log'
self.feeds = []
self.FEED_FIELDS = ['title', 'link', 'description', 'published', 'image', 'categories',]
self.EPISODE_FIELDS = ['title', 'link', 'description', 'published_parsed', 'links',]
# If a config file exists, ingest it
self.check_config()
# Overwrite any config file defaults with CLI params
self.cli_args()
self.config_logging()
# Check an OPML was provided
try:
assert self.opml is not None
except Exception as e:
logging.error('OPML file or destination dir was not provided')
exit()
def config_logging(self):
# Always log to file; only stdout if -v
handlers = [logging.FileHandler(self.log_file)]
if (self.verbose): handlers.append(logging.StreamHandler())
# Config settings
level = logging.INFO if (self.verbose) else logging.WARNING
logging.basicConfig(level=level, datefmt='%Y-%m-%d %H:%M:%S', handlers=handlers,
format='%(asctime)s [%(levelname)s] %(message)s')
# Add header for append-mode file logging
logging.info('\n----- ----- ----- ----- -----\nInitialising\n----- ----- ----- ----- -----')
def ascii_normalise(self, input_str, ):
try:
# Replace non-simple chars with dunders
input_str = re.sub(r'[^a-zA-Z0-9\-\_\/\\\.]', '_', input_str)
# Replace any strings of 2+ puncts with a single underscore
input_str = re.sub(r'_+', r'_', input_str)
input_str = re.sub(r'([^a-zA-Z0-9]{2,})', r'_', input_str)
# Remove any trailing puncts
input_str = re.sub(r'(_|\.)$', r'', input_str)
except Exception as e:
logging.error(f'\t\tError normalising file name: {e}')
exit()
return input_str
def check_config(self):
# get the path to podqueue.conf
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'podqueue.conf')
# Check if the file has been created
if not os.path.exists(config_path):
logging.info(f'Config file does not exist: {config_path}')
return None
conf = ConfigParser()
conf.read(config_path)
for key in ['opml', 'dest', 'time_format', 'verbose', 'log_file']:
if conf['podqueue'].get(key, None):
setattr(self, key, conf['podqueue'].get(key, None))
# If we just changed verbose to str, make sure it's back to a bool
if self.verbose:
self.verbose = bool(self.verbose)
return
def cli_args(self):
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-o', '--opml', dest='opml', default=None, type=argparse.FileType('r'),
help='Pass an OPML file that contains a podcast subscription list.')
parser.add_argument('-d', '--dest', dest='dest', type=self.args_path,
help='The destination folder for downloads. Will be created if required, including sub-directories for each separate podcast.')
parser.add_argument('-t', '--time_format', dest='time_format',
help='Specify a time format string for JSON files. Defaults to 2022-06-31 if not specified.')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
help='Prints additional debug information. If excluded, only errors are printed (for automation).')
parser.add_argument('-l', '--log_file', dest='log_file',
help='Specify a path to the log file. Defaults to ./podqueue.log')
# Save the CLI args to class vars - self.XXX
# vars() converts into a native dict
result = vars(parser.parse_args())
for key, value in result.items():
# Don't overwrite if it's not provided in CLI
if value is not None:
setattr(self, key, value)
def args_path(self, directory):
# Create the directory, if required
if not os.path.isdir(directory):
os.makedirs(directory)
return directory
def parse_opml(self, opml):
logging.info(f'Parsing OPML file: {opml.name}')
# Check if we have an actual file handle (CLI arg),
# Or a string path (config file), and we need to get our own handle
with (opml if isinstance(opml, IOBase) else open(opml, 'r')) as opml_f:
xml_root = ET.parse(opml_f).getroot()
# Get all RSS feeds with a 'xmlUrl' attribute
for feed in [x.attrib for x in xml_root.findall(".//outline[@type='rss']")]:
feed_url = feed.get('xmlUrl', None)
if feed_url:
self.feeds.append(feed_url)
def get_feeds(self, feeds):
logging.info(f'Fetching feeds:')
for feed in feeds:
try:
content = feedparser.parse(feed)
# The remote RSS server can close the HTTP connection
# except http.client.RemoteDisconnected:
except:
logging.warning(f'Feed server unexpectedly closed connection: {feed}')
continue
# If feedparser library reports bad XML, warn and skip
# Test str: 'http://feedparser.org/tests/illformed/rss/aaa_illformed.xml'
if content.get('bozo', False):
logging.warning(f'Feed is misformatted: {feed}')
continue
title = content.feed.get('title', 'Unknown Title')
logging.info(f'\tProcessing feed: {title}')
# Normalise the podcast name with no spaces or non-simple ascii
feed_dir_name = '_'.join([x for x in title.split(' ')])
feed_dir_name = self.ascii_normalise(feed_dir_name)
# Create the directory we need (no spaces) if it doesn't exist
directory = os.path.join(self.dest, feed_dir_name)
if not os.path.isdir(directory):
os.makedirs(directory)
# Also create the <<PODCAST>>/episodes subdirectory
if not os.path.isdir(os.path.join(directory, 'episodes')):
os.makedirs(os.path.join(directory, 'episodes'))
# Get content.feed metadata - podcast title, icon, description, etc.
# And write it to disk as <<PODCAST>>/<<PODCAST>>.json
feed_metadata = self.process_feed_metadata(content, directory)
# Also fetch the podcast logo, if available
if feed_metadata.get('image', None):
self.get_feed_image(feed_metadata['image'], directory)
# Then, process the episodes each and write to disk
for episode in content.entries:
episode_data = self.process_feed_episode(episode, directory)
return None
def process_feed_metadata(self, content, directory):
logging.info(f'\t\tProcessing feed metadata')
feed_metadata = {}
for field in self.FEED_FIELDS:
# .image is a dict structure where we only want href,
# the rest are strs, so special case
if (field == 'image') and (content.feed.get('image', None)):
value = content.feed.image.href
else:
value = content.feed.get(field, None)
feed_metadata[field] = value
# Additional calculated metadata based on structure:
feed_metadata['episode_count'] = len(content.entries)
metadata_filename = os.path.join(directory, f'{os.path.split(directory)[1]}.json')
with open(metadata_filename, 'w') as meta_f:
meta_f.write(json.dumps(feed_metadata))
return feed_metadata
def get_feed_image(self, image_url, directory):
try:
img = requests.get(image_url)
img.raise_for_status()
except Exception as e:
logging.warning(f'\t\tImage could not be found: {image_url}, for reason: {e}')
return
image_filename_ext = os.path.splitext(image_url)[1]
image_filename = os.path.join(directory, f'{os.path.split(directory)[1]}{image_filename_ext}')
with open(image_filename, 'wb') as img_f:
for chunk in img.iter_content(chunk_size=128):
img_f.write(chunk)
logging.info(f'\t\tAdded image to disk: {os.path.split(image_filename)[1]}')
return
def process_feed_episode(self, episode, directory):
episode_metadata = {}
for field in self.EPISODE_FIELDS:
episode_metadata[field] = episode.get(field, None)
# Change the time_struct tuple to a human string
if episode_metadata.get('published_parsed', None):
episode_metadata['published_parsed'] = time.strftime(self.time_format, \
episode_metadata['published_parsed'])
# Change the links{} into a single audio URL
if episode_metadata.get('links', None):
for link in episode_metadata['links']:
if link.get('type', None):
if 'audio' in link.get('type', None):
episode_metadata['link'] = link.get('href', None)
break
# Remove the old complicated links{}
episode_metadata.pop('links', None)
# Get a unique episode filename(s)
episode_title = f'{episode_metadata["published_parsed"]}_{episode_metadata["title"]}'
# Special case - the final file name (not path) can't have a slash in it
# Also replace colons as they are invalid in filenames on Windows (used for Alterante Data Streams on NTFS)
episode_title = re.sub(r'(\/|\\|:|\?|")', r'_', episode_title)
# Check the title isn't going to overshoot 255 bytes
# This is the limit in ZFS, BTRFS, ext*, NTFS, APFS, XFS, etc ...
# Otherwise, file.write will raise OSError 36 - "File name too long"
# I'm looking at you, Memory Palace 73. I mean really, 55 words and 316 characters long?
# https://thememorypalace.us/notes-on-an-imagined-plaque/
if len(episode_title) >= 250:
episode_title = f'{episode_title[0:245]}_'
episode_meta_filename = os.path.join(os.path.join(directory, 'episodes'), \
f'{episode_title}.json')
episode_audio_filename = os.path.join(os.path.join(directory, 'episodes'), \
f'{episode_title}.mp3')
# episode_meta_filename = self.ascii_normalise(episode_meta_filename)
# episode_audio_filename = self.ascii_normalise(episode_audio_filename)
# Check if the file already exists on disk (if so, skip)
if os.path.exists(episode_meta_filename) and os.path.exists(episode_audio_filename):
logging.info(f'\t\tEpisode already saved, skipping: {episode_title}')
return
# Write metadata to disk
with open(episode_meta_filename, 'w') as ep_meta_f:
ep_meta_f.write(json.dumps(episode_metadata))
logging.info(f'\t\t\tAdded episode metadata to disk: {episode_title}')
# Download the audio file
if episode_metadata.get('link', None):
try:
audio = requests.get(episode_metadata['link'])
audio.raise_for_status()
except Exception as e:
logging.warning(f'\t\t\tAudio could not be found: {episode_metadata["link"]}')
return
# Write audio to disk
with open(episode_audio_filename, 'wb') as audio_f:
for chunk in audio.iter_content(chunk_size=1024*8):
audio_f.write(chunk)
logging.info(f'\t\t\tAdded episode audio to disk: {episode_title}')
return
# ----- ----- ----- ----- -----
def entry():
# Initialise the config - from file, or CLI args
pq = podqueue()
# Parse all feed URLs out of the OPML XML into pq.feeds=[]
pq.parse_opml(pq.opml)
# Download the metadata, images, and any missing episodes
pq.get_feeds(pq.feeds)
if __name__ == '__main__':
entry()
|
# type: ignore
# flake8: noqa
"""Backport of Python3.7 dataclasses Library
Taken directly from here: https://github.com/ericvsmith/dataclasses
Licensed under the Apache License: https://github.com/ericvsmith/dataclasses/blob/master/LICENSE.txt
Needed due to isorts strict no non-optional requirements stance.
TODO: Remove once isort only supports 3.7+
"""
import copy
import inspect
import keyword
import re
import sys
import types
__all__ = [
"dataclass",
"field",
"Field",
"FrozenInstanceError",
"InitVar",
"MISSING",
# Helper functions.
"fields",
"asdict",
"astuple",
"make_dataclass",
"replace",
"is_dataclass",
]
# Conditions for adding methods. The boxes indicate what action the
# dataclass decorator takes. For all of these tables, when I talk
# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm
# referring to the arguments to the @dataclass decorator. When
# checking if a dunder method already exists, I mean check for an
# entry in the class's __dict__. I never check to see if an attribute
# is defined in a base class.
# Key:
# +=========+=========================================+
# + Value | Meaning |
# +=========+=========================================+
# | <blank> | No action: no method is added. |
# +---------+-----------------------------------------+
# | add | Generated method is added. |
# +---------+-----------------------------------------+
# | raise | TypeError is raised. |
# +---------+-----------------------------------------+
# | None | Attribute is set to None. |
# +=========+=========================================+
# __init__
#
# +--- init= parameter
# |
# v | | |
# | no | yes | <--- class has __init__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __repr__
#
# +--- repr= parameter
# |
# v | | |
# | no | yes | <--- class has __repr__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __setattr__
# __delattr__
#
# +--- frozen= parameter
# |
# v | | |
# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because not adding these methods would break the "frozen-ness"
# of the class.
# __eq__
#
# +--- eq= parameter
# |
# v | | |
# | no | yes | <--- class has __eq__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __lt__
# __le__
# __gt__
# __ge__
#
# +--- order= parameter
# |
# v | | |
# | no | yes | <--- class has any comparison method in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because to allow this case would interfere with using
# functools.total_ordering.
# __hash__
# +------------------- unsafe_hash= parameter
# | +----------- eq= parameter
# | | +--- frozen= parameter
# | | |
# v v v | | |
# | no | yes | <--- class has explicitly defined __hash__
# +=======+=======+=======+========+========+
# | False | False | False | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | False | True | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | True | False | None | | <-- the default, not hashable
# +-------+-------+-------+--------+--------+
# | False | True | True | add | | Frozen, so hashable, allows override
# +-------+-------+-------+--------+--------+
# | True | False | False | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | False | True | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | False | add | raise | Not frozen, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | True | add | raise | Frozen, so hashable
# +=======+=======+=======+========+========+
# For boxes that are blank, __hash__ is untouched and therefore
# inherited from the base class. If the base is object, then
# id-based hashing is used.
#
# Note that a class may already have __hash__=None if it specified an
# __eq__ method in the class body (not one that was created by
# @dataclass).
#
# See _hash_action (below) for a coded version of this table.
# Raised when an attempt is made to modify a frozen class.
class FrozenInstanceError(AttributeError):
pass
# A sentinel object for default values to signal that a default
# factory will be used. This is given a nice repr() which will appear
# in the function signature of dataclasses' constructors.
class _HAS_DEFAULT_FACTORY_CLASS:
def __repr__(self):
return "<factory>"
_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS()
# A sentinel object to detect if a parameter is supplied or not. Use
# a class to give it a better repr.
class _MISSING_TYPE:
pass
MISSING = _MISSING_TYPE()
# Since most per-field metadata will be unused, create an empty
# read-only proxy that can be shared among all fields.
_EMPTY_METADATA = types.MappingProxyType({})
# Markers for the various kinds of fields and pseudo-fields.
class _FIELD_BASE:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
_FIELD = _FIELD_BASE("_FIELD")
_FIELD_CLASSVAR = _FIELD_BASE("_FIELD_CLASSVAR")
_FIELD_INITVAR = _FIELD_BASE("_FIELD_INITVAR")
# The name of an attribute on the class where we store the Field
# objects. Also used to check if a class is a Data Class.
_FIELDS = "__dataclass_fields__"
# The name of an attribute on the class that stores the parameters to
# @dataclass.
_PARAMS = "__dataclass_params__"
# The name of the function, that if it exists, is called at the end of
# __init__.
_POST_INIT_NAME = "__post_init__"
# String regex that string annotations for ClassVar or InitVar must match.
# Allows "identifier.identifier[" or "identifier[".
# https://bugs.python.org/issue33453 for details.
_MODULE_IDENTIFIER_RE = re.compile(r"^(?:\s*(\w+)\s*\.)?\s*(\w+)")
class _InitVarMeta(type):
def __getitem__(self, params):
return self
class InitVar(metaclass=_InitVarMeta):
pass
# Instances of Field are only ever created from within this module,
# and only from the field() function, although Field instances are
# exposed externally as (conceptually) read-only objects.
#
# name and type are filled in after the fact, not in __init__.
# They're not known at the time this class is instantiated, but it's
# convenient if they're available later.
#
# When cls._FIELDS is filled in with a list of Field objects, the name
# and type fields will have been populated.
class Field:
__slots__ = (
"name",
"type",
"default",
"default_factory",
"repr",
"hash",
"init",
"compare",
"metadata",
"_field_type", # Private: not to be used by user code.
)
def __init__(self, default, default_factory, init, repr, hash, compare, metadata):
self.name = None
self.type = None
self.default = default
self.default_factory = default_factory
self.init = init
self.repr = repr
self.hash = hash
self.compare = compare
self.metadata = (
_EMPTY_METADATA
if metadata is None or len(metadata) == 0
else types.MappingProxyType(metadata)
)
self._field_type = None
def __repr__(self):
return (
"Field("
f"name={self.name!r},"
f"type={self.type!r},"
f"default={self.default!r},"
f"default_factory={self.default_factory!r},"
f"init={self.init!r},"
f"repr={self.repr!r},"
f"hash={self.hash!r},"
f"compare={self.compare!r},"
f"metadata={self.metadata!r},"
f"_field_type={self._field_type}"
")"
)
# This is used to support the PEP 487 __set_name__ protocol in the
# case where we're using a field that contains a descriptor as a
# defaul value. For details on __set_name__, see
# https://www.python.org/dev/peps/pep-0487/#implementation-details.
#
# Note that in _process_class, this Field object is overwritten
# with the default value, so the end result is a descriptor that
# had __set_name__ called on it at the right time.
def __set_name__(self, owner, name):
func = getattr(type(self.default), "__set_name__", None)
if func:
# There is a __set_name__ method on the descriptor, call
# it.
func(self.default, owner, name)
class _DataclassParams:
__slots__ = ("init", "repr", "eq", "order", "unsafe_hash", "frozen")
def __init__(self, init, repr, eq, order, unsafe_hash, frozen):
self.init = init
self.repr = repr
self.eq = eq
self.order = order
self.unsafe_hash = unsafe_hash
self.frozen = frozen
def __repr__(self):
return (
"_DataclassParams("
f"init={self.init!r},"
f"repr={self.repr!r},"
f"eq={self.eq!r},"
f"order={self.order!r},"
f"unsafe_hash={self.unsafe_hash!r},"
f"frozen={self.frozen!r}"
")"
)
# This function is used instead of exposing Field creation directly,
# so that a type checker can be told (via overloads) that this is a
# function whose type depends on its parameters.
def field(
*,
default=MISSING,
default_factory=MISSING,
init=True,
repr=True,
hash=None,
compare=True,
metadata=None,
):
"""Return an object to identify dataclass fields.
default is the default value of the field. default_factory is a
0-argument function called to initialize a field's value. If init
is True, the field will be a parameter to the class's __init__()
function. If repr is True, the field will be included in the
object's repr(). If hash is True, the field will be included in
the object's hash(). If compare is True, the field will be used
in comparison functions. metadata, if specified, must be a
mapping which is stored but not otherwise examined by dataclass.
It is an error to specify both default and default_factory.
"""
if default is not MISSING and default_factory is not MISSING:
raise ValueError("cannot specify both default and default_factory")
return Field(default, default_factory, init, repr, hash, compare, metadata)
def _tuple_str(obj_name, fields):
# Return a string representing each field of obj_name as a tuple
# member. So, if fields is ['x', 'y'] and obj_name is "self",
# return "(self.x,self.y)".
# Special case for the 0-tuple.
if not fields:
return "()"
# Note the trailing comma, needed if this turns out to be a 1-tuple.
return f'({','.join([f'{obj_name}.{f.name}" for f in fields])},)'
def _create_fn(name, args, body, *, globals=None, locals=None, return_type=MISSING):
# Note that we mutate locals when exec() is called. Caller
# beware! The only callers are internal to this module, so no
# worries about external callers.
if locals is None:
locals = {}
return_annotation = ""
if return_type is not MISSING:
locals["_return_type"] = return_type
return_annotation = "->_return_type"
args = ",".join(args)
body = "\n".join(f" {b}" for b in body)
# Compute the text of the entire function.
txt = f"def {name}({args}){return_annotation}:\n{body}"
exec(txt, globals, locals) # nosec
return locals[name]
def _field_assign(frozen, name, value, self_name):
# If we're a frozen class, then assign to our fields in __init__
# via object.__setattr__. Otherwise, just use a simple
# assignment.
#
# self_name is what "self" is called in this function: don't
# hard-code "self", since that might be a field name.
if frozen:
return f"object.__setattr__({self_name},{name!r},{value})"
return f"{self_name}.{name}={value}"
def _field_init(f, frozen, globals, self_name):
# Return the text of the line in the body of __init__ that will
# initialize this field.
default_name = f"_dflt_{f.name}"
if f.default_factory is not MISSING:
if f.init:
# This field has a default factory. If a parameter is
# given, use it. If not, call the factory.
globals[default_name] = f.default_factory
value = f"{default_name}() " f"if {f.name} is _HAS_DEFAULT_FACTORY " f"else {f.name}"
else:
# This is a field that's not in the __init__ params, but
# has a default factory function. It needs to be
# initialized here by calling the factory function,
# because there's no other way to initialize it.
# For a field initialized with a default=defaultvalue, the
# class dict just has the default value
# (cls.fieldname=defaultvalue). But that won't work for a
# default factory, the factory must be called in __init__
# and we must assign that to self.fieldname. We can't
# fall back to the class dict's value, both because it's
# not set, and because it might be different per-class
# (which, after all, is why we have a factory function!).
globals[default_name] = f.default_factory
value = f"{default_name}()"
else:
# No default factory.
if f.init:
if f.default is MISSING:
# There's no default, just do an assignment.
value = f.name
elif f.default is not MISSING:
globals[default_name] = f.default
value = f.name
else:
# This field does not need initialization. Signify that
# to the caller by returning None.
return None
# Only test this now, so that we can create variables for the
# default. However, return None to signify that we're not going
# to actually do the assignment statement for InitVars.
if f._field_type == _FIELD_INITVAR:
return None
# Now, actually generate the field assignment.
return _field_assign(frozen, f.name, value, self_name)
def _init_param(f):
# Return the __init__ parameter string for this field. For
# example, the equivalent of 'x:int=3' (except instead of 'int',
# reference a variable set to int, and instead of '3', reference a
# variable set to 3).
if f.default is MISSING and f.default_factory is MISSING:
# There's no default, and no default_factory, just output the
# variable name and type.
default = ""
elif f.default is not MISSING:
# There's a default, this will be the name that's used to look
# it up.
default = f"=_dflt_{f.name}"
elif f.default_factory is not MISSING:
# There's a factory function. Set a marker.
default = "=_HAS_DEFAULT_FACTORY"
return f"{f.name}:_type_{f.name}{default}"
def _init_fn(fields, frozen, has_post_init, self_name):
# fields contains both real fields and InitVar pseudo-fields.
# Make sure we don't have fields without defaults following fields
# with defaults. This actually would be caught when exec-ing the
# function source code, but catching it here gives a better error
# message, and future-proofs us in case we build up the function
# using ast.
seen_default = False
for f in fields:
# Only consider fields in the __init__ call.
if f.init:
if not (f.default is MISSING and f.default_factory is MISSING):
seen_default = True
elif seen_default:
raise TypeError(f"non-default argument {f.name!r} " "follows default argument")
globals = {"MISSING": MISSING, "_HAS_DEFAULT_FACTORY": _HAS_DEFAULT_FACTORY}
body_lines = []
for f in fields:
line = _field_init(f, frozen, globals, self_name)
# line is None means that this field doesn't require
# initialization (it's a pseudo-field). Just skip it.
if line:
body_lines.append(line)
# Does this class have a post-init function?
if has_post_init:
params_str = ",".join(f.name for f in fields if f._field_type is _FIELD_INITVAR)
body_lines.append(f"{self_name}.{_POST_INIT_NAME}({params_str})")
# If no body lines, use 'pass'.
if not body_lines:
body_lines = ["pass"]
locals = {f"_type_{f.name}": f.type for f in fields}
return _create_fn(
"__init__",
[self_name] + [_init_param(f) for f in fields if f.init],
body_lines,
locals=locals,
globals=globals,
return_type=None,
)
def _repr_fn(fields):
return _create_fn(
"__repr__",
("self",),
[
'return self.__class__.__qualname__ + f"('
+ ", ".join([f"{f.name}={{self.{f.name}!r}}" for f in fields])
+ ')"'
],
)
def _frozen_get_del_attr(cls, fields):
# XXX: globals is modified on the first call to _create_fn, then
# the modified version is used in the second call. Is this okay?
globals = {"cls": cls, "FrozenInstanceError": FrozenInstanceError}
if fields:
fields_str = "(" + ",".join(repr(f.name) for f in fields) + ",)"
else:
# Special case for the zero-length tuple.
fields_str = "()"
return (
_create_fn(
"__setattr__",
("self", "name", "value"),
(
f"if type(self) is cls or name in {fields_str}:",
' raise FrozenInstanceError(f"cannot assign to field {name!r}")',
f"super(cls, self).__setattr__(name, value)",
),
globals=globals,
),
_create_fn(
"__delattr__",
("self", "name"),
(
f"if type(self) is cls or name in {fields_str}:",
' raise FrozenInstanceError(f"cannot delete field {name!r}")',
f"super(cls, self).__delattr__(name)",
),
globals=globals,
),
)
def _cmp_fn(name, op, self_tuple, other_tuple):
# Create a comparison function. If the fields in the object are
# named 'x' and 'y', then self_tuple is the string
# '(self.x,self.y)' and other_tuple is the string
# '(other.x,other.y)'.
return _create_fn(
name,
("self", "other"),
[
"if other.__class__ is self.__class__:",
f" return {self_tuple}{op}{other_tuple}",
"return NotImplemented",
],
)
def _hash_fn(fields):
self_tuple = _tuple_str("self", fields)
return _create_fn("__hash__", ("self",), [f"return hash({self_tuple})"])
def _is_classvar(a_type, typing):
# This test uses a typing internal class, but it's the best way to
# test if this is a ClassVar.
return type(a_type) is typing._ClassVar
def _is_initvar(a_type, dataclasses):
# The module we're checking against is the module we're
# currently in (dataclasses.py).
return a_type is dataclasses.InitVar
def _is_type(annotation, cls, a_module, a_type, is_type_predicate):
# Given a type annotation string, does it refer to a_type in
# a_module? For example, when checking that annotation denotes a
# ClassVar, then a_module is typing, and a_type is
# typing.ClassVar.
# It's possible to look up a_module given a_type, but it involves
# looking in sys.modules (again!), and seems like a waste since
# the caller already knows a_module.
# - annotation is a string type annotation
# - cls is the class that this annotation was found in
# - a_module is the module we want to match
# - a_type is the type in that module we want to match
# - is_type_predicate is a function called with (obj, a_module)
# that determines if obj is of the desired type.
# Since this test does not do a local namespace lookup (and
# instead only a module (global) lookup), there are some things it
# gets wrong.
# With string annotations, cv0 will be detected as a ClassVar:
# CV = ClassVar
# @dataclass
# class C0:
# cv0: CV
# But in this example cv1 will not be detected as a ClassVar:
# @dataclass
# class C1:
# CV = ClassVar
# cv1: CV
# In C1, the code in this function (_is_type) will look up "CV" in
# the module and not find it, so it will not consider cv1 as a
# ClassVar. This is a fairly obscure corner case, and the best
# way to fix it would be to eval() the string "CV" with the
# correct global and local namespaces. However that would involve
# a eval() penalty for every single field of every dataclass
# that's defined. It was judged not worth it.
match = _MODULE_IDENTIFIER_RE.match(annotation)
if match:
ns = None
module_name = match.group(1)
if not module_name:
# No module name, assume the class's module did
# "from dataclasses import InitVar".
ns = sys.modules.get(cls.__module__).__dict__
else:
# Look up module_name in the class's module.
module = sys.modules.get(cls.__module__)
if module and module.__dict__.get(module_name) is a_module:
ns = sys.modules.get(a_type.__module__).__dict__
if ns and is_type_predicate(ns.get(match.group(2)), a_module):
return True
return False
def _get_field(cls, a_name, a_type):
# Return a Field object for this field name and type. ClassVars
# and InitVars are also returned, but marked as such (see
# f._field_type).
# If the default value isn't derived from Field, then it's only a
# normal default value. Convert it to a Field().
default = getattr(cls, a_name, MISSING)
if isinstance(default, Field):
f = default
else:
if isinstance(default, types.MemberDescriptorType):
# This is a field in __slots__, so it has no default value.
default = MISSING
f = field(default=default)
# Only at this point do we know the name and the type. Set them.
f.name = a_name
f.type = a_type
# Assume it's a normal field until proven otherwise. We're next
# going to decide if it's a ClassVar or InitVar, everything else
# is just a normal field.
f._field_type = _FIELD
# In addition to checking for actual types here, also check for
# string annotations. get_type_hints() won't always work for us
# (see https://github.com/python/typing/issues/508 for example),
# plus it's expensive and would require an eval for every stirng
# annotation. So, make a best effort to see if this is a ClassVar
# or InitVar using regex's and checking that the thing referenced
# is actually of the correct type.
# For the complete discussion, see https://bugs.python.org/issue33453
# If typing has not been imported, then it's impossible for any
# annotation to be a ClassVar. So, only look for ClassVar if
# typing has been imported by any module (not necessarily cls's
# module).
typing = sys.modules.get("typing")
if typing:
if _is_classvar(a_type, typing) or (
isinstance(f.type, str) and _is_type(f.type, cls, typing, typing.ClassVar, _is_classvar)
):
f._field_type = _FIELD_CLASSVAR
# If the type is InitVar, or if it's a matching string annotation,
# then it's an InitVar.
if f._field_type is _FIELD:
# The module we're checking against is the module we're
# currently in (dataclasses.py).
dataclasses = sys.modules[__name__]
if _is_initvar(a_type, dataclasses) or (
isinstance(f.type, str)
and _is_type(f.type, cls, dataclasses, dataclasses.InitVar, _is_initvar)
):
f._field_type = _FIELD_INITVAR
# Validations for individual fields. This is delayed until now,
# instead of in the Field() constructor, since only here do we
# know the field name, which allows for better error reporting.
# Special restrictions for ClassVar and InitVar.
if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR):
if f.default_factory is not MISSING:
raise TypeError(f"field {f.name} cannot have a " "default factory")
# Should I check for other field settings? default_factory
# seems the most serious to check for. Maybe add others. For
# example, how about init=False (or really,
# init=<not-the-default-init-value>)? It makes no sense for
# ClassVar and InitVar to specify init=<anything>.
# For real fields, disallow mutable defaults for known types.
if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)):
raise ValueError(
f"mutable default {type(f.default)} for field "
f"{f.name} is not allowed: use default_factory"
)
return f
def _set_new_attribute(cls, name, value):
# Never overwrites an existing attribute. Returns True if the
# attribute already exists.
if name in cls.__dict__:
return True
setattr(cls, name, value)
return False
# Decide if/how we're going to create a hash function. Key is
# (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to
# take. The common case is to do nothing, so instead of providing a
# function that is a no-op, use None to signify that.
def _hash_set_none(cls, fields):
return None
def _hash_add(cls, fields):
flds = [f for f in fields if (f.compare if f.hash is None else f.hash)]
return _hash_fn(flds)
def _hash_exception(cls, fields):
# Raise an exception.
raise TypeError(f"Cannot overwrite attribute __hash__ " f"in class {cls.__name__}")
#
# +-------------------------------------- unsafe_hash?
# | +------------------------------- eq?
# | | +------------------------ frozen?
# | | | +---------------- has-explicit-hash?
# | | | |
# | | | | +------- action
# | | | | |
# v v v v v
_hash_action = {
(False, False, False, False): None,
(False, False, False, True): None,
(False, False, True, False): None,
(False, False, True, True): None,
(False, True, False, False): _hash_set_none,
(False, True, False, True): None,
(False, True, True, False): _hash_add,
(False, True, True, True): None,
(True, False, False, False): _hash_add,
(True, False, False, True): _hash_exception,
(True, False, True, False): _hash_add,
(True, False, True, True): _hash_exception,
(True, True, False, False): _hash_add,
(True, True, False, True): _hash_exception,
(True, True, True, False): _hash_add,
(True, True, True, True): _hash_exception,
}
# See https://bugs.python.org/issue32929#msg312829 for an if-statement
# version of this table.
def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
# Now that dicts retain insertion order, there's no reason to use
# an ordered dict. I am leveraging that ordering here, because
# derived class fields overwrite base class fields, but the order
# is defined by the base class, which is found first.
fields = {}
setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order, unsafe_hash, frozen))
# Find our base classes in reverse MRO order, and exclude
# ourselves. In reversed order so that more derived classes
# override earlier field definitions in base classes. As long as
# we're iterating over them, see if any are frozen.
any_frozen_base = False
has_dataclass_bases = False
for b in cls.__mro__[-1:0:-1]:
# Only process classes that have been processed by our
# decorator. That is, they have a _FIELDS attribute.
base_fields = getattr(b, _FIELDS, None)
if base_fields:
has_dataclass_bases = True
for f in base_fields.values():
fields[f.name] = f
if getattr(b, _PARAMS).frozen:
any_frozen_base = True
# Annotations that are defined in this class (not in base
# classes). If __annotations__ isn't present, then this class
# adds no new annotations. We use this to compute fields that are
# added by this class.
#
# Fields are found from cls_annotations, which is guaranteed to be
# ordered. Default values are from class attributes, if a field
# has a default. If the default value is a Field(), then it
# contains additional info beyond (and possibly including) the
# actual default value. Pseudo-fields ClassVars and InitVars are
# included, despite the fact that they're not real fields. That's
# dealt with later.
cls_annotations = cls.__dict__.get("__annotations__", {})
# Now find fields in our class. While doing so, validate some
# things, and set the default values (as class attributes) where
# we can.
cls_fields = [_get_field(cls, name, type) for name, type in cls_annotations.items()]
for f in cls_fields:
fields[f.name] = f
# If the class attribute (which is the default value for this
# field) exists and is of type 'Field', replace it with the
# real default. This is so that normal class introspection
# sees a real default value, not a Field.
if isinstance(getattr(cls, f.name, None), Field):
if f.default is MISSING:
# If there's no default, delete the class attribute.
# This happens if we specify field(repr=False), for
# example (that is, we specified a field object, but
# no default value). Also if we're using a default
# factory. The class attribute should not be set at
# all in the post-processed class.
delattr(cls, f.name)
else:
setattr(cls, f.name, f.default)
# Do we have any Field members that don't also have annotations?
for name, value in cls.__dict__.items():
if isinstance(value, Field) and not name in cls_annotations:
raise TypeError(f"{name!r} is a field but has no type annotation")
# Check rules that apply if we are derived from any dataclasses.
if has_dataclass_bases:
# Raise an exception if any of our bases are frozen, but we're not.
if any_frozen_base and not frozen:
raise TypeError("cannot inherit non-frozen dataclass from a " "frozen one")
# Raise an exception if we're frozen, but none of our bases are.
if not any_frozen_base and frozen:
raise TypeError("cannot inherit frozen dataclass from a " "non-frozen one")
# Remember all of the fields on our class (including bases). This
# also marks this class as being a dataclass.
setattr(cls, _FIELDS, fields)
# Was this class defined with an explicit __hash__? Note that if
# __eq__ is defined in this class, then python will automatically
# set __hash__ to None. This is a heuristic, as it's possible
# that such a __hash__ == None was not auto-generated, but it
# close enough.
class_hash = cls.__dict__.get("__hash__", MISSING)
has_explicit_hash = not (
class_hash is MISSING or (class_hash is None and "__eq__" in cls.__dict__)
)
# If we're generating ordering methods, we must be generating the
# eq methods.
if order and not eq:
raise ValueError("eq must be true if order is true")
if init:
# Does this class have a post-init function?
has_post_init = hasattr(cls, _POST_INIT_NAME)
# Include InitVars and regular fields (so, not ClassVars).
flds = [f for f in fields.values() if f._field_type in (_FIELD, _FIELD_INITVAR)]
_set_new_attribute(
cls,
"__init__",
_init_fn(
flds,
frozen,
has_post_init,
# The name to use for the "self"
# param in __init__. Use "self"
# if possible.
"__dataclass_self__" if "self" in fields else "self",
),
)
# Get the fields as a list, and include only real fields. This is
# used in all of the following methods.
field_list = [f for f in fields.values() if f._field_type is _FIELD]
if repr:
flds = [f for f in field_list if f.repr]
_set_new_attribute(cls, "__repr__", _repr_fn(flds))
if eq:
# Create _eq__ method. There's no need for a __ne__ method,
# since python will call __eq__ and negate it.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str("self", flds)
other_tuple = _tuple_str("other", flds)
_set_new_attribute(cls, "__eq__", _cmp_fn("__eq__", "==", self_tuple, other_tuple))
if order:
# Create and set the ordering methods.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str("self", flds)
other_tuple = _tuple_str("other", flds)
for name, op in [("__lt__", "<"), ("__le__", "<="), ("__gt__", ">"), ("__ge__", ">=")]:
if _set_new_attribute(cls, name, _cmp_fn(name, op, self_tuple, other_tuple)):
raise TypeError(
f"Cannot overwrite attribute {name} "
f"in class {cls.__name__}. Consider using "
"functools.total_ordering"
)
if frozen:
for fn in _frozen_get_del_attr(cls, field_list):
if _set_new_attribute(cls, fn.__name__, fn):
raise TypeError(
f"Cannot overwrite attribute {fn.__name__} " f"in class {cls.__name__}"
)
# Decide if/how we're going to create a hash function.
hash_action = _hash_action[bool(unsafe_hash), bool(eq), bool(frozen), has_explicit_hash]
if hash_action:
# No need to call _set_new_attribute here, since by the time
# we're here the overwriting is unconditional.
cls.__hash__ = hash_action(cls, field_list)
if not getattr(cls, "__doc__"):
# Create a class doc-string.
cls.__doc__ = cls.__name__ + str(inspect.signature(cls)).replace(" -> None", "")
return cls
# _cls should never be specified by keyword, so start it with an
# underscore. The presence of _cls is used to detect if this
# decorator is being called with parameters or not.
def dataclass(
_cls=None, *, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False
):
"""Returns the same class as was passed in, with dunder methods
added based on the fields defined in the class.
Examines PEP 526 __annotations__ to determine fields.
If init is true, an __init__() method is added to the class. If
repr is true, a __repr__() method is added. If order is true, rich
comparison dunder methods are added. If unsafe_hash is true, a
__hash__() method function is added. If frozen is true, fields may
not be assigned to after instance creation.
"""
def wrap(cls):
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
# See if we're being called as @dataclass or @dataclass().
if _cls is None:
# We're called with parens.
return wrap
# We're called as @dataclass without parens.
return wrap(_cls)
def fields(class_or_instance):
"""Return a tuple describing the fields of this dataclass.
Accepts a dataclass or an instance of one. Tuple elements are of
type Field.
"""
# Might it be worth caching this, per class?
try:
fields = getattr(class_or_instance, _FIELDS)
except AttributeError:
raise TypeError("must be called with a dataclass type or instance")
# Exclude pseudo-fields. Note that fields is sorted by insertion
# order, so the order of the tuple is as the fields were defined.
return tuple(f for f in fields.values() if f._field_type is _FIELD)
def _is_dataclass_instance(obj):
"""Returns True if obj is an instance of a dataclass."""
return not isinstance(obj, type) and hasattr(obj, _FIELDS)
def is_dataclass(obj):
"""Returns True if obj is a dataclass or an instance of a
dataclass."""
return hasattr(obj, _FIELDS)
def asdict(obj, *, dict_factory=dict):
"""Return the fields of a dataclass instance as a new dictionary mapping
field names to field values.
Example usage:
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert asdict(c) == {'x': 1, 'y': 2}
If given, 'dict_factory' will be used instead of built-in dict.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("asdict() should be called on dataclass instances")
return _asdict_inner(obj, dict_factory)
def _asdict_inner(obj, dict_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, (list, tuple)):
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)(
(_asdict_inner(k, dict_factory), _asdict_inner(v, dict_factory)) for k, v in obj.items()
)
else:
return copy.deepcopy(obj)
def astuple(obj, *, tuple_factory=tuple):
"""Return the fields of a dataclass instance as a new tuple of field values.
Example usage::
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert astuple(c) == (1, 2)
If given, 'tuple_factory' will be used instead of built-in tuple.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("astuple() should be called on dataclass instances")
return _astuple_inner(obj, tuple_factory)
def _astuple_inner(obj, tuple_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _astuple_inner(getattr(obj, f.name), tuple_factory)
result.append(value)
return tuple_factory(result)
elif isinstance(obj, (list, tuple)):
return type(obj)(_astuple_inner(v, tuple_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)(
(_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory))
for k, v in obj.items()
)
else:
return copy.deepcopy(obj)
def make_dataclass(
cls_name,
fields,
*,
bases=(),
namespace=None,
init=True,
repr=True,
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
):
"""Return a new dynamically created dataclass.
The dataclass name will be 'cls_name'. 'fields' is an iterable
of either (name), (name, type) or (name, type, Field) objects. If type is
omitted, use the string 'typing.Any'. Field objects are created by
the equivalent of calling 'field(name, type [, Field-info])'.
C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,))
is equivalent to:
@dataclass
class C(Base):
x: 'typing.Any'
y: int
z: int = field(init=False)
For the bases and namespace parameters, see the builtin type() function.
The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to
dataclass().
"""
if namespace is None:
namespace = {}
else:
# Copy namespace since we're going to mutate it.
namespace = namespace.copy()
# While we're looking through the field names, validate that they
# are identifiers, are not keywords, and not duplicates.
seen = set()
anns = {}
for item in fields:
if isinstance(item, str):
name = item
tp = "typing.Any"
elif len(item) == 2:
name, tp, = item
elif len(item) == 3:
name, tp, spec = item
namespace[name] = spec
else:
raise TypeError(f"Invalid field: {item!r}")
if not isinstance(name, str) or not name.isidentifier():
raise TypeError(f"Field names must be valid identifers: {name!r}")
if keyword.iskeyword(name):
raise TypeError(f"Field names must not be keywords: {name!r}")
if name in seen:
raise TypeError(f"Field name duplicated: {name!r}")
seen.add(name)
anns[name] = tp
namespace["__annotations__"] = anns
# We use `types.new_class()` instead of simply `type()` to allow dynamic creation
# of generic dataclassses.
cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace))
return dataclass(
cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen
)
def replace(obj, **changes):
"""Return a new object replacing specified fields with new values.
This is especially useful for frozen classes. Example usage:
@dataclass(frozen=True)
class C:
x: int
y: int
c = C(1, 2)
c1 = replace(c, x=3)
assert c1.x == 3 and c1.y == 2
"""
# We're going to mutate 'changes', but that's okay because it's a
# new dict, even if called with 'replace(obj, **my_changes)'.
if not _is_dataclass_instance(obj):
raise TypeError("replace() should be called on dataclass instances")
# It's an error to have init=False fields in 'changes'.
# If a field is not in 'changes', read its value from the provided obj.
for f in getattr(obj, _FIELDS).values():
if not f.init:
# Error if this field is specified in changes.
if f.name in changes:
raise ValueError(
f"field {f.name} is declared with "
"init=False, it cannot be specified with "
"replace()"
)
continue
if f.name not in changes:
changes[f.name] = getattr(obj, f.name)
# Create the new object, which calls __init__() and
# __post_init__() (if defined), using all of the init fields we've
# added and/or left in 'changes'. If there are values supplied in
# changes that aren't fields, this will correctly raise a
# TypeError.
return obj.__class__(**changes)
| # type: ignore
# flake8: noqa
"""Backport of Python3.7 dataclasses Library
Taken directly from here: https://github.com/ericvsmith/dataclasses
Licensed under the Apache License: https://github.com/ericvsmith/dataclasses/blob/master/LICENSE.txt
Needed due to isorts strict no non-optional requirements stance.
TODO: Remove once isort only supports 3.7+
"""
import copy
import inspect
import keyword
import re
import sys
import types
__all__ = [
"dataclass",
"field",
"Field",
"FrozenInstanceError",
"InitVar",
"MISSING",
# Helper functions.
"fields",
"asdict",
"astuple",
"make_dataclass",
"replace",
"is_dataclass",
]
# Conditions for adding methods. The boxes indicate what action the
# dataclass decorator takes. For all of these tables, when I talk
# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm
# referring to the arguments to the @dataclass decorator. When
# checking if a dunder method already exists, I mean check for an
# entry in the class's __dict__. I never check to see if an attribute
# is defined in a base class.
# Key:
# +=========+=========================================+
# + Value | Meaning |
# +=========+=========================================+
# | <blank> | No action: no method is added. |
# +---------+-----------------------------------------+
# | add | Generated method is added. |
# +---------+-----------------------------------------+
# | raise | TypeError is raised. |
# +---------+-----------------------------------------+
# | None | Attribute is set to None. |
# +=========+=========================================+
# __init__
#
# +--- init= parameter
# |
# v | | |
# | no | yes | <--- class has __init__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __repr__
#
# +--- repr= parameter
# |
# v | | |
# | no | yes | <--- class has __repr__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __setattr__
# __delattr__
#
# +--- frozen= parameter
# |
# v | | |
# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because not adding these methods would break the "frozen-ness"
# of the class.
# __eq__
#
# +--- eq= parameter
# |
# v | | |
# | no | yes | <--- class has __eq__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __lt__
# __le__
# __gt__
# __ge__
#
# +--- order= parameter
# |
# v | | |
# | no | yes | <--- class has any comparison method in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because to allow this case would interfere with using
# functools.total_ordering.
# __hash__
# +------------------- unsafe_hash= parameter
# | +----------- eq= parameter
# | | +--- frozen= parameter
# | | |
# v v v | | |
# | no | yes | <--- class has explicitly defined __hash__
# +=======+=======+=======+========+========+
# | False | False | False | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | False | True | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | True | False | None | | <-- the default, not hashable
# +-------+-------+-------+--------+--------+
# | False | True | True | add | | Frozen, so hashable, allows override
# +-------+-------+-------+--------+--------+
# | True | False | False | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | False | True | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | False | add | raise | Not frozen, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | True | add | raise | Frozen, so hashable
# +=======+=======+=======+========+========+
# For boxes that are blank, __hash__ is untouched and therefore
# inherited from the base class. If the base is object, then
# id-based hashing is used.
#
# Note that a class may already have __hash__=None if it specified an
# __eq__ method in the class body (not one that was created by
# @dataclass).
#
# See _hash_action (below) for a coded version of this table.
# Raised when an attempt is made to modify a frozen class.
class FrozenInstanceError(AttributeError):
pass
# A sentinel object for default values to signal that a default
# factory will be used. This is given a nice repr() which will appear
# in the function signature of dataclasses' constructors.
class _HAS_DEFAULT_FACTORY_CLASS:
def __repr__(self):
return "<factory>"
_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS()
# A sentinel object to detect if a parameter is supplied or not. Use
# a class to give it a better repr.
class _MISSING_TYPE:
pass
MISSING = _MISSING_TYPE()
# Since most per-field metadata will be unused, create an empty
# read-only proxy that can be shared among all fields.
_EMPTY_METADATA = types.MappingProxyType({})
# Markers for the various kinds of fields and pseudo-fields.
class _FIELD_BASE:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
_FIELD = _FIELD_BASE("_FIELD")
_FIELD_CLASSVAR = _FIELD_BASE("_FIELD_CLASSVAR")
_FIELD_INITVAR = _FIELD_BASE("_FIELD_INITVAR")
# The name of an attribute on the class where we store the Field
# objects. Also used to check if a class is a Data Class.
_FIELDS = "__dataclass_fields__"
# The name of an attribute on the class that stores the parameters to
# @dataclass.
_PARAMS = "__dataclass_params__"
# The name of the function, that if it exists, is called at the end of
# __init__.
_POST_INIT_NAME = "__post_init__"
# String regex that string annotations for ClassVar or InitVar must match.
# Allows "identifier.identifier[" or "identifier[".
# https://bugs.python.org/issue33453 for details.
_MODULE_IDENTIFIER_RE = re.compile(r"^(?:\s*(\w+)\s*\.)?\s*(\w+)")
class _InitVarMeta(type):
def __getitem__(self, params):
return self
class InitVar(metaclass=_InitVarMeta):
pass
# Instances of Field are only ever created from within this module,
# and only from the field() function, although Field instances are
# exposed externally as (conceptually) read-only objects.
#
# name and type are filled in after the fact, not in __init__.
# They're not known at the time this class is instantiated, but it's
# convenient if they're available later.
#
# When cls._FIELDS is filled in with a list of Field objects, the name
# and type fields will have been populated.
class Field:
__slots__ = (
"name",
"type",
"default",
"default_factory",
"repr",
"hash",
"init",
"compare",
"metadata",
"_field_type", # Private: not to be used by user code.
)
def __init__(self, default, default_factory, init, repr, hash, compare, metadata):
self.name = None
self.type = None
self.default = default
self.default_factory = default_factory
self.init = init
self.repr = repr
self.hash = hash
self.compare = compare
self.metadata = (
_EMPTY_METADATA
if metadata is None or len(metadata) == 0
else types.MappingProxyType(metadata)
)
self._field_type = None
def __repr__(self):
return (
"Field("
f"name={self.name!r},"
f"type={self.type!r},"
f"default={self.default!r},"
f"default_factory={self.default_factory!r},"
f"init={self.init!r},"
f"repr={self.repr!r},"
f"hash={self.hash!r},"
f"compare={self.compare!r},"
f"metadata={self.metadata!r},"
f"_field_type={self._field_type}"
")"
)
# This is used to support the PEP 487 __set_name__ protocol in the
# case where we're using a field that contains a descriptor as a
# defaul value. For details on __set_name__, see
# https://www.python.org/dev/peps/pep-0487/#implementation-details.
#
# Note that in _process_class, this Field object is overwritten
# with the default value, so the end result is a descriptor that
# had __set_name__ called on it at the right time.
def __set_name__(self, owner, name):
func = getattr(type(self.default), "__set_name__", None)
if func:
# There is a __set_name__ method on the descriptor, call
# it.
func(self.default, owner, name)
class _DataclassParams:
__slots__ = ("init", "repr", "eq", "order", "unsafe_hash", "frozen")
def __init__(self, init, repr, eq, order, unsafe_hash, frozen):
self.init = init
self.repr = repr
self.eq = eq
self.order = order
self.unsafe_hash = unsafe_hash
self.frozen = frozen
def __repr__(self):
return (
"_DataclassParams("
f"init={self.init!r},"
f"repr={self.repr!r},"
f"eq={self.eq!r},"
f"order={self.order!r},"
f"unsafe_hash={self.unsafe_hash!r},"
f"frozen={self.frozen!r}"
")"
)
# This function is used instead of exposing Field creation directly,
# so that a type checker can be told (via overloads) that this is a
# function whose type depends on its parameters.
def field(
*,
default=MISSING,
default_factory=MISSING,
init=True,
repr=True,
hash=None,
compare=True,
metadata=None,
):
"""Return an object to identify dataclass fields.
default is the default value of the field. default_factory is a
0-argument function called to initialize a field's value. If init
is True, the field will be a parameter to the class's __init__()
function. If repr is True, the field will be included in the
object's repr(). If hash is True, the field will be included in
the object's hash(). If compare is True, the field will be used
in comparison functions. metadata, if specified, must be a
mapping which is stored but not otherwise examined by dataclass.
It is an error to specify both default and default_factory.
"""
if default is not MISSING and default_factory is not MISSING:
raise ValueError("cannot specify both default and default_factory")
return Field(default, default_factory, init, repr, hash, compare, metadata)
def _tuple_str(obj_name, fields):
# Return a string representing each field of obj_name as a tuple
# member. So, if fields is ['x', 'y'] and obj_name is "self",
# return "(self.x,self.y)".
# Special case for the 0-tuple.
if not fields:
return "()"
# Note the trailing comma, needed if this turns out to be a 1-tuple.
return f'({",".join([f"{obj_name}.{f.name}" for f in fields])},)'
def _create_fn(name, args, body, *, globals=None, locals=None, return_type=MISSING):
# Note that we mutate locals when exec() is called. Caller
# beware! The only callers are internal to this module, so no
# worries about external callers.
if locals is None:
locals = {}
return_annotation = ""
if return_type is not MISSING:
locals["_return_type"] = return_type
return_annotation = "->_return_type"
args = ",".join(args)
body = "\n".join(f" {b}" for b in body)
# Compute the text of the entire function.
txt = f"def {name}({args}){return_annotation}:\n{body}"
exec(txt, globals, locals) # nosec
return locals[name]
def _field_assign(frozen, name, value, self_name):
# If we're a frozen class, then assign to our fields in __init__
# via object.__setattr__. Otherwise, just use a simple
# assignment.
#
# self_name is what "self" is called in this function: don't
# hard-code "self", since that might be a field name.
if frozen:
return f"object.__setattr__({self_name},{name!r},{value})"
return f"{self_name}.{name}={value}"
def _field_init(f, frozen, globals, self_name):
# Return the text of the line in the body of __init__ that will
# initialize this field.
default_name = f"_dflt_{f.name}"
if f.default_factory is not MISSING:
if f.init:
# This field has a default factory. If a parameter is
# given, use it. If not, call the factory.
globals[default_name] = f.default_factory
value = f"{default_name}() " f"if {f.name} is _HAS_DEFAULT_FACTORY " f"else {f.name}"
else:
# This is a field that's not in the __init__ params, but
# has a default factory function. It needs to be
# initialized here by calling the factory function,
# because there's no other way to initialize it.
# For a field initialized with a default=defaultvalue, the
# class dict just has the default value
# (cls.fieldname=defaultvalue). But that won't work for a
# default factory, the factory must be called in __init__
# and we must assign that to self.fieldname. We can't
# fall back to the class dict's value, both because it's
# not set, and because it might be different per-class
# (which, after all, is why we have a factory function!).
globals[default_name] = f.default_factory
value = f"{default_name}()"
else:
# No default factory.
if f.init:
if f.default is MISSING:
# There's no default, just do an assignment.
value = f.name
elif f.default is not MISSING:
globals[default_name] = f.default
value = f.name
else:
# This field does not need initialization. Signify that
# to the caller by returning None.
return None
# Only test this now, so that we can create variables for the
# default. However, return None to signify that we're not going
# to actually do the assignment statement for InitVars.
if f._field_type == _FIELD_INITVAR:
return None
# Now, actually generate the field assignment.
return _field_assign(frozen, f.name, value, self_name)
def _init_param(f):
# Return the __init__ parameter string for this field. For
# example, the equivalent of 'x:int=3' (except instead of 'int',
# reference a variable set to int, and instead of '3', reference a
# variable set to 3).
if f.default is MISSING and f.default_factory is MISSING:
# There's no default, and no default_factory, just output the
# variable name and type.
default = ""
elif f.default is not MISSING:
# There's a default, this will be the name that's used to look
# it up.
default = f"=_dflt_{f.name}"
elif f.default_factory is not MISSING:
# There's a factory function. Set a marker.
default = "=_HAS_DEFAULT_FACTORY"
return f"{f.name}:_type_{f.name}{default}"
def _init_fn(fields, frozen, has_post_init, self_name):
# fields contains both real fields and InitVar pseudo-fields.
# Make sure we don't have fields without defaults following fields
# with defaults. This actually would be caught when exec-ing the
# function source code, but catching it here gives a better error
# message, and future-proofs us in case we build up the function
# using ast.
seen_default = False
for f in fields:
# Only consider fields in the __init__ call.
if f.init:
if not (f.default is MISSING and f.default_factory is MISSING):
seen_default = True
elif seen_default:
raise TypeError(f"non-default argument {f.name!r} " "follows default argument")
globals = {"MISSING": MISSING, "_HAS_DEFAULT_FACTORY": _HAS_DEFAULT_FACTORY}
body_lines = []
for f in fields:
line = _field_init(f, frozen, globals, self_name)
# line is None means that this field doesn't require
# initialization (it's a pseudo-field). Just skip it.
if line:
body_lines.append(line)
# Does this class have a post-init function?
if has_post_init:
params_str = ",".join(f.name for f in fields if f._field_type is _FIELD_INITVAR)
body_lines.append(f"{self_name}.{_POST_INIT_NAME}({params_str})")
# If no body lines, use 'pass'.
if not body_lines:
body_lines = ["pass"]
locals = {f"_type_{f.name}": f.type for f in fields}
return _create_fn(
"__init__",
[self_name] + [_init_param(f) for f in fields if f.init],
body_lines,
locals=locals,
globals=globals,
return_type=None,
)
def _repr_fn(fields):
return _create_fn(
"__repr__",
("self",),
[
'return self.__class__.__qualname__ + f"('
+ ", ".join([f"{f.name}={{self.{f.name}!r}}" for f in fields])
+ ')"'
],
)
def _frozen_get_del_attr(cls, fields):
# XXX: globals is modified on the first call to _create_fn, then
# the modified version is used in the second call. Is this okay?
globals = {"cls": cls, "FrozenInstanceError": FrozenInstanceError}
if fields:
fields_str = "(" + ",".join(repr(f.name) for f in fields) + ",)"
else:
# Special case for the zero-length tuple.
fields_str = "()"
return (
_create_fn(
"__setattr__",
("self", "name", "value"),
(
f"if type(self) is cls or name in {fields_str}:",
' raise FrozenInstanceError(f"cannot assign to field {name!r}")',
f"super(cls, self).__setattr__(name, value)",
),
globals=globals,
),
_create_fn(
"__delattr__",
("self", "name"),
(
f"if type(self) is cls or name in {fields_str}:",
' raise FrozenInstanceError(f"cannot delete field {name!r}")',
f"super(cls, self).__delattr__(name)",
),
globals=globals,
),
)
def _cmp_fn(name, op, self_tuple, other_tuple):
# Create a comparison function. If the fields in the object are
# named 'x' and 'y', then self_tuple is the string
# '(self.x,self.y)' and other_tuple is the string
# '(other.x,other.y)'.
return _create_fn(
name,
("self", "other"),
[
"if other.__class__ is self.__class__:",
f" return {self_tuple}{op}{other_tuple}",
"return NotImplemented",
],
)
def _hash_fn(fields):
self_tuple = _tuple_str("self", fields)
return _create_fn("__hash__", ("self",), [f"return hash({self_tuple})"])
def _is_classvar(a_type, typing):
# This test uses a typing internal class, but it's the best way to
# test if this is a ClassVar.
return type(a_type) is typing._ClassVar
def _is_initvar(a_type, dataclasses):
# The module we're checking against is the module we're
# currently in (dataclasses.py).
return a_type is dataclasses.InitVar
def _is_type(annotation, cls, a_module, a_type, is_type_predicate):
# Given a type annotation string, does it refer to a_type in
# a_module? For example, when checking that annotation denotes a
# ClassVar, then a_module is typing, and a_type is
# typing.ClassVar.
# It's possible to look up a_module given a_type, but it involves
# looking in sys.modules (again!), and seems like a waste since
# the caller already knows a_module.
# - annotation is a string type annotation
# - cls is the class that this annotation was found in
# - a_module is the module we want to match
# - a_type is the type in that module we want to match
# - is_type_predicate is a function called with (obj, a_module)
# that determines if obj is of the desired type.
# Since this test does not do a local namespace lookup (and
# instead only a module (global) lookup), there are some things it
# gets wrong.
# With string annotations, cv0 will be detected as a ClassVar:
# CV = ClassVar
# @dataclass
# class C0:
# cv0: CV
# But in this example cv1 will not be detected as a ClassVar:
# @dataclass
# class C1:
# CV = ClassVar
# cv1: CV
# In C1, the code in this function (_is_type) will look up "CV" in
# the module and not find it, so it will not consider cv1 as a
# ClassVar. This is a fairly obscure corner case, and the best
# way to fix it would be to eval() the string "CV" with the
# correct global and local namespaces. However that would involve
# a eval() penalty for every single field of every dataclass
# that's defined. It was judged not worth it.
match = _MODULE_IDENTIFIER_RE.match(annotation)
if match:
ns = None
module_name = match.group(1)
if not module_name:
# No module name, assume the class's module did
# "from dataclasses import InitVar".
ns = sys.modules.get(cls.__module__).__dict__
else:
# Look up module_name in the class's module.
module = sys.modules.get(cls.__module__)
if module and module.__dict__.get(module_name) is a_module:
ns = sys.modules.get(a_type.__module__).__dict__
if ns and is_type_predicate(ns.get(match.group(2)), a_module):
return True
return False
def _get_field(cls, a_name, a_type):
# Return a Field object for this field name and type. ClassVars
# and InitVars are also returned, but marked as such (see
# f._field_type).
# If the default value isn't derived from Field, then it's only a
# normal default value. Convert it to a Field().
default = getattr(cls, a_name, MISSING)
if isinstance(default, Field):
f = default
else:
if isinstance(default, types.MemberDescriptorType):
# This is a field in __slots__, so it has no default value.
default = MISSING
f = field(default=default)
# Only at this point do we know the name and the type. Set them.
f.name = a_name
f.type = a_type
# Assume it's a normal field until proven otherwise. We're next
# going to decide if it's a ClassVar or InitVar, everything else
# is just a normal field.
f._field_type = _FIELD
# In addition to checking for actual types here, also check for
# string annotations. get_type_hints() won't always work for us
# (see https://github.com/python/typing/issues/508 for example),
# plus it's expensive and would require an eval for every stirng
# annotation. So, make a best effort to see if this is a ClassVar
# or InitVar using regex's and checking that the thing referenced
# is actually of the correct type.
# For the complete discussion, see https://bugs.python.org/issue33453
# If typing has not been imported, then it's impossible for any
# annotation to be a ClassVar. So, only look for ClassVar if
# typing has been imported by any module (not necessarily cls's
# module).
typing = sys.modules.get("typing")
if typing:
if _is_classvar(a_type, typing) or (
isinstance(f.type, str) and _is_type(f.type, cls, typing, typing.ClassVar, _is_classvar)
):
f._field_type = _FIELD_CLASSVAR
# If the type is InitVar, or if it's a matching string annotation,
# then it's an InitVar.
if f._field_type is _FIELD:
# The module we're checking against is the module we're
# currently in (dataclasses.py).
dataclasses = sys.modules[__name__]
if _is_initvar(a_type, dataclasses) or (
isinstance(f.type, str)
and _is_type(f.type, cls, dataclasses, dataclasses.InitVar, _is_initvar)
):
f._field_type = _FIELD_INITVAR
# Validations for individual fields. This is delayed until now,
# instead of in the Field() constructor, since only here do we
# know the field name, which allows for better error reporting.
# Special restrictions for ClassVar and InitVar.
if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR):
if f.default_factory is not MISSING:
raise TypeError(f"field {f.name} cannot have a " "default factory")
# Should I check for other field settings? default_factory
# seems the most serious to check for. Maybe add others. For
# example, how about init=False (or really,
# init=<not-the-default-init-value>)? It makes no sense for
# ClassVar and InitVar to specify init=<anything>.
# For real fields, disallow mutable defaults for known types.
if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)):
raise ValueError(
f"mutable default {type(f.default)} for field "
f"{f.name} is not allowed: use default_factory"
)
return f
def _set_new_attribute(cls, name, value):
# Never overwrites an existing attribute. Returns True if the
# attribute already exists.
if name in cls.__dict__:
return True
setattr(cls, name, value)
return False
# Decide if/how we're going to create a hash function. Key is
# (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to
# take. The common case is to do nothing, so instead of providing a
# function that is a no-op, use None to signify that.
def _hash_set_none(cls, fields):
return None
def _hash_add(cls, fields):
flds = [f for f in fields if (f.compare if f.hash is None else f.hash)]
return _hash_fn(flds)
def _hash_exception(cls, fields):
# Raise an exception.
raise TypeError(f"Cannot overwrite attribute __hash__ " f"in class {cls.__name__}")
#
# +-------------------------------------- unsafe_hash?
# | +------------------------------- eq?
# | | +------------------------ frozen?
# | | | +---------------- has-explicit-hash?
# | | | |
# | | | | +------- action
# | | | | |
# v v v v v
_hash_action = {
(False, False, False, False): None,
(False, False, False, True): None,
(False, False, True, False): None,
(False, False, True, True): None,
(False, True, False, False): _hash_set_none,
(False, True, False, True): None,
(False, True, True, False): _hash_add,
(False, True, True, True): None,
(True, False, False, False): _hash_add,
(True, False, False, True): _hash_exception,
(True, False, True, False): _hash_add,
(True, False, True, True): _hash_exception,
(True, True, False, False): _hash_add,
(True, True, False, True): _hash_exception,
(True, True, True, False): _hash_add,
(True, True, True, True): _hash_exception,
}
# See https://bugs.python.org/issue32929#msg312829 for an if-statement
# version of this table.
def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
# Now that dicts retain insertion order, there's no reason to use
# an ordered dict. I am leveraging that ordering here, because
# derived class fields overwrite base class fields, but the order
# is defined by the base class, which is found first.
fields = {}
setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order, unsafe_hash, frozen))
# Find our base classes in reverse MRO order, and exclude
# ourselves. In reversed order so that more derived classes
# override earlier field definitions in base classes. As long as
# we're iterating over them, see if any are frozen.
any_frozen_base = False
has_dataclass_bases = False
for b in cls.__mro__[-1:0:-1]:
# Only process classes that have been processed by our
# decorator. That is, they have a _FIELDS attribute.
base_fields = getattr(b, _FIELDS, None)
if base_fields:
has_dataclass_bases = True
for f in base_fields.values():
fields[f.name] = f
if getattr(b, _PARAMS).frozen:
any_frozen_base = True
# Annotations that are defined in this class (not in base
# classes). If __annotations__ isn't present, then this class
# adds no new annotations. We use this to compute fields that are
# added by this class.
#
# Fields are found from cls_annotations, which is guaranteed to be
# ordered. Default values are from class attributes, if a field
# has a default. If the default value is a Field(), then it
# contains additional info beyond (and possibly including) the
# actual default value. Pseudo-fields ClassVars and InitVars are
# included, despite the fact that they're not real fields. That's
# dealt with later.
cls_annotations = cls.__dict__.get("__annotations__", {})
# Now find fields in our class. While doing so, validate some
# things, and set the default values (as class attributes) where
# we can.
cls_fields = [_get_field(cls, name, type) for name, type in cls_annotations.items()]
for f in cls_fields:
fields[f.name] = f
# If the class attribute (which is the default value for this
# field) exists and is of type 'Field', replace it with the
# real default. This is so that normal class introspection
# sees a real default value, not a Field.
if isinstance(getattr(cls, f.name, None), Field):
if f.default is MISSING:
# If there's no default, delete the class attribute.
# This happens if we specify field(repr=False), for
# example (that is, we specified a field object, but
# no default value). Also if we're using a default
# factory. The class attribute should not be set at
# all in the post-processed class.
delattr(cls, f.name)
else:
setattr(cls, f.name, f.default)
# Do we have any Field members that don't also have annotations?
for name, value in cls.__dict__.items():
if isinstance(value, Field) and not name in cls_annotations:
raise TypeError(f"{name!r} is a field but has no type annotation")
# Check rules that apply if we are derived from any dataclasses.
if has_dataclass_bases:
# Raise an exception if any of our bases are frozen, but we're not.
if any_frozen_base and not frozen:
raise TypeError("cannot inherit non-frozen dataclass from a " "frozen one")
# Raise an exception if we're frozen, but none of our bases are.
if not any_frozen_base and frozen:
raise TypeError("cannot inherit frozen dataclass from a " "non-frozen one")
# Remember all of the fields on our class (including bases). This
# also marks this class as being a dataclass.
setattr(cls, _FIELDS, fields)
# Was this class defined with an explicit __hash__? Note that if
# __eq__ is defined in this class, then python will automatically
# set __hash__ to None. This is a heuristic, as it's possible
# that such a __hash__ == None was not auto-generated, but it
# close enough.
class_hash = cls.__dict__.get("__hash__", MISSING)
has_explicit_hash = not (
class_hash is MISSING or (class_hash is None and "__eq__" in cls.__dict__)
)
# If we're generating ordering methods, we must be generating the
# eq methods.
if order and not eq:
raise ValueError("eq must be true if order is true")
if init:
# Does this class have a post-init function?
has_post_init = hasattr(cls, _POST_INIT_NAME)
# Include InitVars and regular fields (so, not ClassVars).
flds = [f for f in fields.values() if f._field_type in (_FIELD, _FIELD_INITVAR)]
_set_new_attribute(
cls,
"__init__",
_init_fn(
flds,
frozen,
has_post_init,
# The name to use for the "self"
# param in __init__. Use "self"
# if possible.
"__dataclass_self__" if "self" in fields else "self",
),
)
# Get the fields as a list, and include only real fields. This is
# used in all of the following methods.
field_list = [f for f in fields.values() if f._field_type is _FIELD]
if repr:
flds = [f for f in field_list if f.repr]
_set_new_attribute(cls, "__repr__", _repr_fn(flds))
if eq:
# Create _eq__ method. There's no need for a __ne__ method,
# since python will call __eq__ and negate it.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str("self", flds)
other_tuple = _tuple_str("other", flds)
_set_new_attribute(cls, "__eq__", _cmp_fn("__eq__", "==", self_tuple, other_tuple))
if order:
# Create and set the ordering methods.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str("self", flds)
other_tuple = _tuple_str("other", flds)
for name, op in [("__lt__", "<"), ("__le__", "<="), ("__gt__", ">"), ("__ge__", ">=")]:
if _set_new_attribute(cls, name, _cmp_fn(name, op, self_tuple, other_tuple)):
raise TypeError(
f"Cannot overwrite attribute {name} "
f"in class {cls.__name__}. Consider using "
"functools.total_ordering"
)
if frozen:
for fn in _frozen_get_del_attr(cls, field_list):
if _set_new_attribute(cls, fn.__name__, fn):
raise TypeError(
f"Cannot overwrite attribute {fn.__name__} " f"in class {cls.__name__}"
)
# Decide if/how we're going to create a hash function.
hash_action = _hash_action[bool(unsafe_hash), bool(eq), bool(frozen), has_explicit_hash]
if hash_action:
# No need to call _set_new_attribute here, since by the time
# we're here the overwriting is unconditional.
cls.__hash__ = hash_action(cls, field_list)
if not getattr(cls, "__doc__"):
# Create a class doc-string.
cls.__doc__ = cls.__name__ + str(inspect.signature(cls)).replace(" -> None", "")
return cls
# _cls should never be specified by keyword, so start it with an
# underscore. The presence of _cls is used to detect if this
# decorator is being called with parameters or not.
def dataclass(
_cls=None, *, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False
):
"""Returns the same class as was passed in, with dunder methods
added based on the fields defined in the class.
Examines PEP 526 __annotations__ to determine fields.
If init is true, an __init__() method is added to the class. If
repr is true, a __repr__() method is added. If order is true, rich
comparison dunder methods are added. If unsafe_hash is true, a
__hash__() method function is added. If frozen is true, fields may
not be assigned to after instance creation.
"""
def wrap(cls):
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
# See if we're being called as @dataclass or @dataclass().
if _cls is None:
# We're called with parens.
return wrap
# We're called as @dataclass without parens.
return wrap(_cls)
def fields(class_or_instance):
"""Return a tuple describing the fields of this dataclass.
Accepts a dataclass or an instance of one. Tuple elements are of
type Field.
"""
# Might it be worth caching this, per class?
try:
fields = getattr(class_or_instance, _FIELDS)
except AttributeError:
raise TypeError("must be called with a dataclass type or instance")
# Exclude pseudo-fields. Note that fields is sorted by insertion
# order, so the order of the tuple is as the fields were defined.
return tuple(f for f in fields.values() if f._field_type is _FIELD)
def _is_dataclass_instance(obj):
"""Returns True if obj is an instance of a dataclass."""
return not isinstance(obj, type) and hasattr(obj, _FIELDS)
def is_dataclass(obj):
"""Returns True if obj is a dataclass or an instance of a
dataclass."""
return hasattr(obj, _FIELDS)
def asdict(obj, *, dict_factory=dict):
"""Return the fields of a dataclass instance as a new dictionary mapping
field names to field values.
Example usage:
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert asdict(c) == {'x': 1, 'y': 2}
If given, 'dict_factory' will be used instead of built-in dict.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("asdict() should be called on dataclass instances")
return _asdict_inner(obj, dict_factory)
def _asdict_inner(obj, dict_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, (list, tuple)):
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)(
(_asdict_inner(k, dict_factory), _asdict_inner(v, dict_factory)) for k, v in obj.items()
)
else:
return copy.deepcopy(obj)
def astuple(obj, *, tuple_factory=tuple):
"""Return the fields of a dataclass instance as a new tuple of field values.
Example usage::
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert astuple(c) == (1, 2)
If given, 'tuple_factory' will be used instead of built-in tuple.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("astuple() should be called on dataclass instances")
return _astuple_inner(obj, tuple_factory)
def _astuple_inner(obj, tuple_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _astuple_inner(getattr(obj, f.name), tuple_factory)
result.append(value)
return tuple_factory(result)
elif isinstance(obj, (list, tuple)):
return type(obj)(_astuple_inner(v, tuple_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)(
(_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory))
for k, v in obj.items()
)
else:
return copy.deepcopy(obj)
def make_dataclass(
cls_name,
fields,
*,
bases=(),
namespace=None,
init=True,
repr=True,
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
):
"""Return a new dynamically created dataclass.
The dataclass name will be 'cls_name'. 'fields' is an iterable
of either (name), (name, type) or (name, type, Field) objects. If type is
omitted, use the string 'typing.Any'. Field objects are created by
the equivalent of calling 'field(name, type [, Field-info])'.
C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,))
is equivalent to:
@dataclass
class C(Base):
x: 'typing.Any'
y: int
z: int = field(init=False)
For the bases and namespace parameters, see the builtin type() function.
The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to
dataclass().
"""
if namespace is None:
namespace = {}
else:
# Copy namespace since we're going to mutate it.
namespace = namespace.copy()
# While we're looking through the field names, validate that they
# are identifiers, are not keywords, and not duplicates.
seen = set()
anns = {}
for item in fields:
if isinstance(item, str):
name = item
tp = "typing.Any"
elif len(item) == 2:
name, tp, = item
elif len(item) == 3:
name, tp, spec = item
namespace[name] = spec
else:
raise TypeError(f"Invalid field: {item!r}")
if not isinstance(name, str) or not name.isidentifier():
raise TypeError(f"Field names must be valid identifers: {name!r}")
if keyword.iskeyword(name):
raise TypeError(f"Field names must not be keywords: {name!r}")
if name in seen:
raise TypeError(f"Field name duplicated: {name!r}")
seen.add(name)
anns[name] = tp
namespace["__annotations__"] = anns
# We use `types.new_class()` instead of simply `type()` to allow dynamic creation
# of generic dataclassses.
cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace))
return dataclass(
cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen
)
def replace(obj, **changes):
"""Return a new object replacing specified fields with new values.
This is especially useful for frozen classes. Example usage:
@dataclass(frozen=True)
class C:
x: int
y: int
c = C(1, 2)
c1 = replace(c, x=3)
assert c1.x == 3 and c1.y == 2
"""
# We're going to mutate 'changes', but that's okay because it's a
# new dict, even if called with 'replace(obj, **my_changes)'.
if not _is_dataclass_instance(obj):
raise TypeError("replace() should be called on dataclass instances")
# It's an error to have init=False fields in 'changes'.
# If a field is not in 'changes', read its value from the provided obj.
for f in getattr(obj, _FIELDS).values():
if not f.init:
# Error if this field is specified in changes.
if f.name in changes:
raise ValueError(
f"field {f.name} is declared with "
"init=False, it cannot be specified with "
"replace()"
)
continue
if f.name not in changes:
changes[f.name] = getattr(obj, f.name)
# Create the new object, which calls __init__() and
# __post_init__() (if defined), using all of the init fields we've
# added and/or left in 'changes'. If there are values supplied in
# changes that aren't fields, this will correctly raise a
# TypeError.
return obj.__class__(**changes)
|
import discord
from discord import member
from discord.ext import commands
from mysqldb import *
import asyncio
from extra.useful_variables import list_of_commands
from extra.prompt.menu import Confirm
from extra.view import ReportSupportView
from typing import List, Dict, Optional
import os
case_cat_id = int(os.getenv('CASE_CAT_ID'))
reportsupport_channel_id = int(os.getenv('REPORT_CHANNEL_ID'))
dnk_id = int(os.getenv('DNK_ID'))
moderator_role_id = int(os.getenv('MOD_ROLE_ID'))
admin_role_id = int(os.getenv('ADMIN_ROLE_ID'))
lesson_management_role_id = int(os.getenv('LESSON_MANAGEMENT_ROLE_ID'))
staff_vc_id = int(os.getenv('STAFF_VC_ID'))
allowed_roles = [
int(os.getenv('OWNER_ROLE_ID')), admin_role_id,
moderator_role_id]
from extra.reportsupport.applications import ApplicationsTable
from extra.reportsupport.verify import Verify
from extra.reportsupport.openchannels import OpenChannels
report_support_classes: List[commands.Cog] = [
ApplicationsTable, Verify, OpenChannels
]
class ReportSupport(*report_support_classes):
""" A cog related to the system of reports and some other things. """
def __init__(self, client) -> None:
# super(ReportSupport, self).__init__(client)
self.client = client
self.cosmos_id: int = int(os.getenv('COSMOS_ID'))
self.muffin_id: int = int(os.getenv('MUFFIN_ID'))
self.cache = {}
self.report_cache = {}
@commands.Cog.listener()
async def on_ready(self) -> None:
self.client.add_view(view=ReportSupportView(self.client))
print('ReportSupport cog is online!')
async def handle_application(self, guild, payload) -> None:
""" Handles teacher applications.
:param guild: The server in which the application is running.
:param payload: Data about the Staff member who is opening the application. """
emoji = str(payload.emoji)
if emoji == '✅':
# Gets the teacher app and does the magic
if not (app := await self.get_application_by_message(payload.message_id)):
return
# Checks if the person has not an open interview channel already
if not app[3]:
# Creates an interview room with the teacher and sends their application there (you can z!close there)
return await self.create_interview_room(guild, app)
elif emoji == '❌':
# Tries to delete the teacher app from the db, in case it is registered
app = await self.get_application_by_message(payload.message_id)
if app and not app[3]:
await self.delete_application(payload.message_id)
interview_info = self.interview_info[app[2]]
app_channel = self.client.get_channel(interview_info['app'])
app_msg = await app_channel.fetch_message(payload.message_id)
await app_msg.add_reaction('🔏')
if applicant := discord.utils.get(guild.members, id=app[1]):
return await applicant.send(embed=discord.Embed(description=interview_info['message']))
async def send_teacher_application(self, member) -> None:
""" Sends a teacher application form to the user.
:param member: The member to send the application to. """
def msg_check(message):
if message.author == member and not message.guild:
if len(message.content) <= 100:
return True
else:
self.client.loop.create_task(member.send("**Your answer must be within 100 characters**"))
else:
return False
def check_reaction(r, u):
return u.id == member.id and not r.message.guild and str(r.emoji) in ['✅', '❌']
terms_embed = discord.Embed(
title="Terms of Application",
description="""Hello there!
Thank you for applying for teaching here,
Before you can formally start applying to teach in The Language Sloth, there are a couple things we would like you to know. The Language Sloth is a free of charge language learning platform which is meant to be accessible and open for anyone who is interested in languages from any background. We do not charge for any kind of service, nor do we pay for any services for starting teachers. We are a community that shares the same interest: Languages.
We do not require professional teaching skills, anyone can teach their native language, however, we have a set numbers of requirements for our teachers
Entry requirements:
》Must be at least 16 years of age
》Must have at least a conversational level of English
》Must have clear microphone audio
》Must commit 40 minutes per week
》Must prepare their own material weekly
``` ✅ To agree with our terms```""",
color=member.color
)
terms = await member.send(embed=terms_embed)
await terms.add_reaction('✅')
await terms.add_reaction('❌')
# Waits for reaction confirmation to the terms of application
terms_r = await self.get_reaction(member, check_reaction)
if terms_r is None:
self.cache[member.id] = 0
return
if terms_r != '✅':
self.cache[member.id] = 0
return await member.send(f"**Thank you anyways, bye!**")
embed = discord.Embed(title=f"__Teacher Application__")
embed.set_footer(text=f"by {member}", icon_url=member.display_avatar)
embed.description = '''
- Hello, there you've reacted to apply to become a teacher.
To apply please answer to these following questions with One message at a time
Question one:
What language are you applying to teach?'''
q1 = await member.send(embed=embed)
a1 = await self.get_message_content(member, msg_check)
if not a1:
return
embed.description = '''
- Why do you want to teach that language on the language sloth?
Please answer with one message.'''
q2 = await member.send(embed=embed)
a2 = await self.get_message_content(member, msg_check)
if not a2:
return
embed.description = '''
- On The Language Sloth, our classes happen once a week at the same time weekly.
Please let us know when would be the best time for you to teach,
E.A: Thursdays 3 pm CET, you can specify your timezone.
Again remember to answer with one message.'''
q3 = await member.send(embed=embed)
a3 = await self.get_message_content(member, msg_check)
if not a3:
return
embed.description = '''
- Let's talk about your English level, how do you consider your English level?
Are you able to teach lessons in English?
Please answer using one message only'''
q4 = await member.send(embed=embed)
a4 = await self.get_message_content(member, msg_check)
if not a4:
return
embed.description = '''- Have you ever taught people before?'''
q5 = await member.send(embed=embed)
a5 = await self.get_message_content(member, msg_check)
if not a5:
return
embed.description = '''- Inform a short description for your class.'''
q6 = await member.send(embed=embed)
a6 = await self.get_message_content(member, msg_check)
if not a6:
return
embed.description = '''- How old are you?'''
q7 = await member.send(embed=embed)
a7 = await self.get_message_content(member, msg_check)
if not a7:
return
# Get user's native roles
user_native_roles = []
for role in member.roles:
if str(role.name).lower().startswith('native'):
user_native_roles.append(role.name.title())
# Application result
app = f"""```ini\n[Username]: {member} ({member.id})\n[Joined the server]: {member.joined_at.strftime("%a, %d %B %y, %I %M %p UTC")}\n[Applying to teach]: {a1.title()}\n[Native roles]: {", ".join(user_native_roles)}\n[Motivation for teaching]: {a2.capitalize()}\n[Applying to teach on]: {a3.upper()}\n[English level]: {a4.capitalize()}\n[Experience teaching]: {a5.capitalize()}\n[Description]:{a6.capitalize()}\n[Age]: {a7}```"""
await member.send(app)
embed.description = '''
Are you sure you want to apply this? :white_check_mark: to send and :x: to Cancel
'''
app_conf = await member.send(embed=embed)
await app_conf.add_reaction('✅')
await app_conf.add_reaction('❌')
# Waits for reaction confirmation
r = await self.get_reaction(member, check_reaction)
if r is None:
return
if r == '✅':
embed.description = "**Application successfully made, please, be patient now!**"
await member.send(embed=embed)
teacher_app_channel = await self.client.fetch_channel(self.teacher_app_channel_id)
muffin = discord.utils.get(teacher_app_channel.guild.members, id=self.muffin_id)
app = await teacher_app_channel.send(content=f"{muffin.mention}, {member.mention}\n{app}")
await app.add_reaction('✅')
await app.add_reaction('❌')
# Saves in the database
await self.insert_application(app.id, member.id, 'teacher')
else:
self.cache[member.id] = 0
return await member.send("**Thank you anyways!**")
async def send_moderator_application(self, member):
""" Sends a moderator application form to the user.
:param member: The member to send the application to. """
def msg_check(message):
if message.author == member and not message.guild:
if len(message.content) <= 100:
return True
else:
self.client.loop.create_task(member.send("**Your answer must be within 100 characters**"))
else:
return False
def check_reaction(r, u):
return u.id == member.id and not r.message.guild and str(r.emoji) in ['✅', '❌']
terms_embed = discord.Embed(
title="Terms of Application",
description="""Hello there!
Before you can formally start applying for Staff in The Language Sloth, there are a couple requirements we would like you to know we feel necessity for:
Entry requirements:
```》Must be at least 18 years of age
》Must have at least a conversational level of English
》Must be an active member
》Be a member of the server for at least a month
》Must not have any warnings in the past 3 months.```""",
color=member.color
)
terms = await member.send(embed=terms_embed)
await terms.add_reaction('✅')
await terms.add_reaction('❌')
# Waits for reaction confirmation to the terms of application
terms_r = await self.get_reaction(member, check_reaction)
if terms_r is None:
self.cache[member.id] = 0
return
if terms_r != '✅':
self.cache[member.id] = 0
return await member.send(f"**Thanks anyways, bye!**")
embed = discord.Embed(title=f"__Moderator Application__")
embed.set_footer(text=f"by {member}", icon_url=member.display_avatar)
embed.description = "- What's your age?"
await member.send(embed=embed)
a1 = await self.get_message_content(member, msg_check)
if not a1: return
embed.description = """
- Hello, there you've reacted to apply to become a moderator.
To apply please answer to these following questions with One message at a time
Question one:
Do you have any experience moderating Discord servers?"""
q2 = await member.send(embed=embed)
a2 = await self.get_message_content(member, msg_check)
if not a2: return
embed.description = """
- What is your gender?
Please answer with one message."""
await member.send(embed=embed)
a3 = await self.get_message_content(member, msg_check)
if not a3: return
embed.description = """
- What's your English level? Are you able to express yourself using English?
Please answer using one message only."""
await member.send(embed=embed)
a4 = await self.get_message_content(member, msg_check)
if not a4: return
embed.description = """
- Why are you applying to be Staff? What is your motivation?
Please answer using one message only."""
await member.send(embed=embed)
a5 = await self.get_message_content(member, msg_check)
if not a5: return
embed.description = """- How do you think The Language Sloth could be a better community?
Please answer using one message only."""
await member.send(embed=embed)
a6 = await self.get_message_content(member, msg_check)
if not a6: return
embed.description = """- How active are you on Discord in general?
Please answer using one message only."""
await member.send(embed=embed)
a7 = await self.get_message_content(member, msg_check)
if not a7: return
embed.description = """- What is your time zone?
Please answer using one message only.."""
await member.send(embed=embed)
a8 = await self.get_message_content(member, msg_check)
if not a8: return
embed.description = "- What is your country of origin?"
await member.send(embed=embed)
a9 = await self.get_message_content(member, msg_check)
if not a9: return
embed.description = "- Tell us about yourself?"
await member.send(embed=embed)
a10 = await self.get_message_content(member, msg_check)
if not a10: return
# Get user's native roles
user_native_roles = []
for role in member.roles:
if str(role.name).lower().startswith('native'):
user_native_roles.append(role.name.title())
# Application result
app = f"""```ini\n[Username]: {member} ({member.id})
[Joined the server]: {member.joined_at.strftime("%a, %d %B %y, %I %M %p UTC")}
[Native roles]: {', '.join(user_native_roles)}
[Age]: {a1}
[Experience moderating]: {a2.capitalize()}
[Gender]: {a3.title()}
[English level]: {a4.capitalize()}
[Reason & Motivation]: {a5.capitalize()}
[How we can improve Sloth]: {a6.capitalize()}
[Activity Status]: {a7.capitalize()}
[Timezone]: {a8.title()}
[Origin Country]: {a9.title()}
[About]: {a10.capitalize()}```"""
await member.send(app)
embed.description = """
Are you sure you want to apply this? :white_check_mark: to send and :x: to Cancel
"""
app_conf = await member.send(embed=embed)
await app_conf.add_reaction('✅')
await app_conf.add_reaction('❌')
# Waits for reaction confirmation
r = await self.get_reaction(member, check_reaction)
if r is None:
return
if r == '✅':
# ""
embed.description = """**Application successfully made, please, be patient now.**
We will let you know when we need a new mod. We check apps when we need it!"""
await member.send(embed=embed)
moderator_app_channel = await self.client.fetch_channel(self.moderator_app_channel_id)
cosmos = discord.utils.get(moderator_app_channel.guild.members, id=self.cosmos_id)
app = await moderator_app_channel.send(content=f"{cosmos.mention}, {member.mention}\n{app}")
await app.add_reaction('✅')
await app.add_reaction('❌')
# Saves in the database
await self.insert_application(app.id, member.id, 'moderator')
else:
self.cache[member.id] = 0
return await member.send("**Thank you anyways!**")
async def send_event_manager_application(self, member):
""" Sends a event manager application form to the user.
:param member: The member to send the application to. """
def msg_check(message):
if message.author == member and not message.guild:
if len(message.content) <= 100:
return True
else:
self.client.loop.create_task(member.send("**Your answer must be within 100 characters**"))
else:
return False
def check_reaction(r, u):
return u.id == member.id and not r.message.guild and str(r.emoji) in ['✅', '❌']
terms_embed = discord.Embed(
title="Terms of Application",
description="""Hello there!
Thank you for applying for hosting events here,
Before you can formally start applying to host events in The Language Sloth, there are a couple things we would like you to know. The Language Sloth is a free of charge language learning platform which is meant to be accessible and open for anyone who is interested in languages from any background. We do not charge for any kind of service, nor do we pay for any services for hosting events. We are a community that shares the same interest: Languages.
We do not require professional skills, however, we have a set numbers of requirements for our event managers
Entry requirements:
》Must be at least 16 years of age
》Must have at least a conversational level of English
》Must have clear microphone audio
》Must prepare their own material weekly
``` ✅ To agree with our terms```""",
color=member.color
)
terms = await member.send(embed=terms_embed)
await terms.add_reaction('✅')
await terms.add_reaction('❌')
# Waits for reaction confirmation to the terms of application
terms_r = await self.get_reaction(member, check_reaction)
if terms_r is None:
self.cache[member.id] = 0
return
if terms_r != '✅':
self.cache[member.id] = 0
return await member.send(f"**Thank you anyways, bye!**")
embed = discord.Embed(title=f"__Teacher Application__")
embed.set_footer(text=f"by {member}", icon_url=member.display_avatar)
embed.title = "Event manager Application"
embed.description = '''
- Hello, there you've reacted to apply to become an event manager.
To apply please answer to these following questions with One message at a time
Question one:
What is your event called?'''
q1 = await member.send(embed=embed)
a1 = await self.get_message_content(member, msg_check)
if not a1:
return
embed.description = '''
- Why do you want to host that event on the language sloth?
Please answer with one message.'''
q2 = await member.send(embed=embed)
a2 = await self.get_message_content(member, msg_check)
if not a2:
return
embed.description = '''
- Please let us know when would be the best time for you to host events
E.A: Thursdays 3 pm CET, you can specify your timezone.
Again remember to answer with one message.'''
q3 = await member.send(embed=embed)
a3 = await self.get_message_content(member, msg_check)
if not a3:
return
embed.description = """
- Let's talk about your English level, how do you consider your English level?
Are you able to host events in English? If not, in which language would you be hosting?
Please answer using one message only"""
q4 = await member.send(embed=embed)
a4 = await self.get_message_content(member, msg_check)
if not a4:
return
embed.description = "- Have you ever hosted events before? If yes, please describe!"
q5 = await member.send(embed=embed)
a5 = await self.get_message_content(member, msg_check)
if not a5:
return
embed.description = "- Inform a short description for your event/events."
q6 = await member.send(embed=embed)
a6 = await self.get_message_content(member, msg_check)
if not a6:
return
embed.description = "- How old are you?"
q7 = await member.send(embed=embed)
a7 = await self.get_message_content(member, msg_check)
if not a7:
return
# Get user's native roles
user_native_roles = []
for role in member.roles:
if str(role.name).lower().startswith('native'):
user_native_roles.append(role.name.title())
# Application result
app = f"""```ini\n[Username]: {member} ({member.id})\n[Joined the server]: {member.joined_at.strftime("%a, %d %B %y, %I %M %p UTC")}\n[Applying to host]: {a1.title()}\n[Native roles]: {", ".join(user_native_roles)}\n[Motivation for hosting]: {a2.capitalize()}\n[Applying to host on]: {a3.upper()}\n[English level]: {a4.capitalize()}\n[Experience hosting]: {a5.capitalize()}\n[Description]:{a6.capitalize()}\n[Age]: {a7}```"""
await member.send(app)
embed.description = '''
Are you sure you want to apply this? :white_check_mark: to send and :x: to Cancel
'''
app_conf = await member.send(embed=embed)
await app_conf.add_reaction('✅')
await app_conf.add_reaction('❌')
# Waits for reaction confirmation
r = await self.get_reaction(member, check_reaction)
if r is None:
return
if r == '✅':
embed.description = "**Application successfully made, please, be patient now!**"
await member.send(embed=embed)
event_manager_channel = await self.client.fetch_channel(self.event_manager_app_channel_id)
app = await event_manager_channel.send(content=f"{member.mention}\n{app}")
await app.add_reaction('✅')
await app.add_reaction('❌')
# Saves in the database
await self.insert_application(app.id, member.id, 'event_manager')
else:
self.cache[member.id] = 0
return await member.send("**Thank you anyways!**")
async def send_verified_selfies_verification(self, interaction: discord.Interaction) -> None:
""" Sends a message to the user asking for them to send a selfie in order for them
to get the verified selfies role.
:param interaction: The interaction object. """
guild = interaction.guild
member = interaction.user
def msg_check(message):
if message.author == member and not message.guild:
if len(message.content) <= 100:
return True
else:
self.client.loop.create_task(member.send("**Your answer must be within 100 characters**"))
else:
return False
embed = discord.Embed(
title=f"__Verify__",
description=f"""You have opened a verification request, if you would like to verify:\n
**1.** Take a clear picture of yourself holding a piece of paper with today's date and time of verification, and your Discord server name written on it. Image links won't work, only image uploads!\n(You have 5 minutes to do so)"""
)
embed.set_footer(text=f"by {member}", icon_url=member.display_avatar)
embed.set_image(url="https://cdn.discordapp.com/attachments/562019472257318943/882352621116096542/slothfacepopoo.png")
await member.send(embed=embed)
while True:
msg = await self.get_message(member, msg_check, 300)
if msg is None:
return await member.send(f"**Timeout, you didn't answer in time, try again later!**")
attachments = [att for att in msg.attachments if att.content_type.startswith('image')]
if msg.content.lower() == 'quit':
return await member.send(f"**Bye!**")
if not attachments:
await member.send(f"**No uploaded pic detected, send it again or type `quit` to stop this!**")
continue
break
# Sends verified request to admins
verify_embed = discord.Embed(
title=f"__Verification Request__",
description=f"{member} ({member.id})",
color=member.color,
timestamp=interaction.message.created_at
)
verify_embed.set_thumbnail(url=member.display_avatar)
verify_embed.set_image(url=attachments[0])
verify_req_channel_id = discord.utils.get(guild.text_channels, id=self.verify_reqs_channel_id)
verify_msg = await verify_req_channel_id.send(content=member.mention, embed=verify_embed)
await verify_msg.add_reaction('✅')
await verify_msg.add_reaction('❌')
# Saves
await self.insert_application(verify_msg.id, member.id, 'verify')
return await member.send(f"**Request sent, you will get notified here if you get accepted or declined! ✅**")
# - Report someone
async def report_someone(self, interaction: discord.Interaction):
member = interaction.user
guild = interaction.guild
if open_channel := await self.member_has_open_channel(member.id):
if open_channel := discord.utils.get(guild.text_channels, id=open_channel[1]):
embed = discord.Embed(title="Error!", description=f"**You already have an open channel! ({open_channel.mention})**", color=discord.Color.red())
await interaction.followup.send(embed=embed, ephemeral=True)
return False
else:
await self.remove_user_open_channel(member.id)
# Report someone
case_cat = discord.utils.get(guild.categories, id=case_cat_id)
counter = await self.get_case_number()
moderator = discord.utils.get(guild.roles, id=moderator_role_id)
cosmos = discord.utils.get(guild.members, id=self.cosmos_id)
overwrites = {guild.default_role: discord.PermissionOverwrite(
read_messages=False, send_messages=False, connect=False, view_channel=False),
member: discord.PermissionOverwrite(
read_messages=True, send_messages=True, connect=False, view_channel=True),
moderator: discord.PermissionOverwrite(
read_messages=True, send_messages=True, connect=False, view_channel=True, manage_messages=True)}
try:
the_channel = await guild.create_text_channel(name=f"case-{counter[0][0]}", category=case_cat, overwrites=overwrites)
except Exception:
await interaction.followup.send("**Something went wrong with it, please contact an admin!**", ephemeral=True)
raise Exception
else:
created_embed = discord.Embed(
title="Report room created!",
description=f"**Go to {the_channel.mention}!**",
color=discord.Color.green())
await interaction.followup.send(embed=created_embed, ephemeral=True)
await self.insert_user_open_channel(member.id, the_channel.id)
await self.increase_case_number()
embed = discord.Embed(title="Report Support!", description=f"Please, {member.mention}, try to explain what happened and who you want to report.",
color=discord.Color.red())
message = await the_channel.send(content=f"{member.mention}, {moderator.mention}, {cosmos.mention}", embed=embed)
ctx = await self.client.get_context(message)
return await self.client.get_cog('Tools').vc(ctx, member=member)
# - Report someone
async def generic_help(self, interaction: discord.Interaction, type_help: str, message: str, ping: bool = True) -> None:
""" Opens a generic help channel.
:param interaction: The interaction that generated this action.
:param type_help: The kind of general help.
:param message: The text message to send in the room.
:param ping: Whether mods should be pinged for this. """
member = interaction.user
guild = interaction.guild
if open_channel := await self.member_has_open_channel(member.id):
if open_channel := discord.utils.get(guild.text_channels, id=open_channel[1]):
embed = discord.Embed(title="Error!", description=f"**You already have an open channel! ({open_channel.mention})**", color=discord.Color.red())
await interaction.followup.send(embed=embed, ephemeral=True)
return False
else:
await self.remove_user_open_channel(member.id)
# General help
case_cat = discord.utils.get(guild.categories, id=case_cat_id)
moderator = discord.utils.get(guild.roles, id=moderator_role_id)
overwrites = {guild.default_role: discord.PermissionOverwrite(
read_messages=False, send_messages=False, connect=False, view_channel=False),
member: discord.PermissionOverwrite(
read_messages=True, send_messages=True, connect=False, view_channel=True),
moderator: discord.PermissionOverwrite(
read_messages=True, send_messages=True, connect=False, view_channel=True, manage_messages=True)}
try:
the_channel = await guild.create_text_channel(name=f"{"-".join(type_help.split())}", category=case_cat, overwrites=overwrites)
except:
await interaction.followup.send("**Something went wrong with it, please contact an admin!**", ephemeral=True)
raise Exception
else:
created_embed = discord.Embed(
title=f"Room for `{type_help}` created!",
description=f"**Go to {the_channel.mention}!**",
color=discord.Color.green())
await interaction.followup.send(embed=created_embed, ephemeral=True)
await self.insert_user_open_channel(member.id, the_channel.id)
embed = discord.Embed(title=f"{type_help.title()}!", description=message, color=discord.Color.red())
if ping:
await the_channel.send(content=f"{member.mention}, {moderator.mention}", embed=embed)
else:
await the_channel.send(content=member.mention, embed=embed)
async def get_message_content(self, member, check, timeout: Optional[int] = 300) -> str:
""" Gets a message content.
:param member: The member to get the message from.
:param check: The check for the event.
:param timeout: Timeout for getting the message. [Optional] """
try:
message = await self.client.wait_for('message', timeout=timeout,
check=check)
except asyncio.TimeoutError:
await member.send("**Timeout! Try again.**")
return None
else:
content = message.content
return content
async def get_message(self, member, check, timeout: Optional[int] = 300) -> discord.Message:
""" Gets a message.
:param member: The member to get the message from.
:param check: The check for the event.
:param timeout: Timeout for getting the message. [Optional] """
try:
message = await self.client.wait_for('message', timeout=timeout,
check=check)
except asyncio.TimeoutError:
await member.send("**Timeout! Try again.**")
return None
else:
return message
async def get_reaction(self, member, check, timeout: int = 300):
try:
reaction, _ = await self.client.wait_for('reaction_add',
timeout=timeout, check=check)
except asyncio.TimeoutError:
await member.send("**Timeout! Try again.**")
return None
else:
return str(reaction.emoji)
@commands.command(aliases=['permit_case', 'allow_case', 'add_witness', 'witness', 'aw'])
@commands.has_any_role(*allowed_roles)
async def allow_witness(self, ctx, member: discord.Member = None):
""" Allows a witness to join a case channel.
:param member: The member to allow. """
if not member:
return await ctx.send("**Inform a witness to allow!**")
user_channel = await self.get_case_channel(ctx.channel.id)
if user_channel:
confirm = await Confirm(f"**Are you sure you want to allow {member.mention} as a witness in this case channel, {ctx.author.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not allowing them, then!**")
channel = discord.utils.get(ctx.guild.channels, id=user_channel[0][1])
try:
await channel.set_permissions(
member, read_messages=True, send_messages=True, connect=True, speak=True, view_channel=True)
except Exception:
pass
return await ctx.send(f"**{member.mention} has been allowed here!**")
else:
await ctx.send(f"**This is not a case channel, {ctx.author.mention}!**")
@commands.command(aliases=['forbid_case', 'delete_witness', 'remve_witness', 'fw'])
@commands.has_any_role(*allowed_roles)
async def forbid_witness(self, ctx, member: discord.Member = None):
""" Forbids a witness from a case channel.
:param member: The member to forbid. """
if not member:
return await ctx.send("**Inform a witness to forbid!**")
user_channel = await self.get_case_channel(ctx.channel.id)
if user_channel:
confirm = await Confirm(f"**Are you sure you want to forbid {member.mention} from being a witness in this case channel, {ctx.author.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not forbidding them, then!**")
channel = discord.utils.get(ctx.guild.channels, id=user_channel[0][1])
try:
await channel.set_permissions(
member, read_messages=False, send_messages=False, connect=False, speak=False, view_channel=False)
except Exception:
pass
return await ctx.send(f"**{member.mention} has been forbidden here!**")
else:
await ctx.send(f"**This is not a case channel, {ctx.author.mention}!**")
@commands.command(aliases=['delete_channel', 'archive'])
@commands.has_any_role(*allowed_roles)
async def close_channel(self, ctx):
""" (MOD) Closes a Case-Channel. """
user_channel = await self.get_case_channel(ctx.channel.id)
if not user_channel:
return await ctx.send(f"**What do you think that you are doing? You cannot delete this channel, {ctx.author.mention}!**")
channel = discord.utils.get(ctx.guild.text_channels, id=user_channel[0][1])
embed = discord.Embed(title="Confirmation",
description="Are you sure that you want to delete this channel?",
color=ctx.author.color,
timestamp=ctx.message.created_at)
confirmation = await ctx.send(content=ctx.author.mention, embed=embed)
await confirmation.add_reaction('✅')
await confirmation.add_reaction('❌')
try:
reaction, user = await self.client.wait_for('reaction_add', timeout=20,
check=lambda r, u: u == ctx.author and r.message.channel == ctx.channel and str(r.emoji) in ['✅', '❌'])
except asyncio.TimeoutError:
embed = discord.Embed(title="Confirmation",
description="You took too long to answer the question; not deleting it!",
color=discord.Color.red(),
timestamp=ctx.message.created_at)
return await confirmation.edit(content=ctx.author.mention, embed=embed)
else:
if str(reaction.emoji) == '✅':
embed.description = f"**Channel {ctx.channel.mention} is being deleted...**"
await confirmation.edit(content=ctx.author.mention, embed=embed)
await asyncio.sleep(3)
await channel.delete()
await self.remove_user_open_channel(user_channel[0][0])
else:
embed.description = "Not deleting it!"
await confirmation.edit(content='', embed=embed)
async def dnk_embed(self, member):
def check(r, u):
return u == member and str(r.message.id) == str(the_msg.id) and str(r.emoji) in ['⬅️', '➡️']
command_index = 0
initial_embed = discord.Embed(title="__Table of Commands and their Prices__",
description="These are a few of commands and features that DNK can do.",
color=discord.Color.blue())
the_msg = await member.send(embed=initial_embed)
await the_msg.add_reaction('⬅️')
await the_msg.add_reaction('➡️')
while True:
embed = discord.Embed(title=f"__Table of Commands and their Prices__ ({command_index+1}/{len(list_of_commands)})",
description="These are a few of commands and features that DNK can do.",
color=discord.Color.blue())
embed.add_field(name=list_of_commands[command_index][0],
value=list_of_commands[command_index][1])
await the_msg.edit(embed=embed)
try:
pending_tasks = [self.client.wait_for('reaction_add', check=check),
self.client.wait_for('reaction_remove', check=check)]
done_tasks, pending_tasks = await asyncio.wait(pending_tasks, timeout=60, return_when=asyncio.FIRST_COMPLETED)
if not done_tasks:
raise asyncio.TimeoutError
for task in pending_tasks:
task.cancel()
except asyncio.TimeoutError:
await the_msg.remove_reaction('⬅️', self.client.user)
await the_msg.remove_reaction('➡️', self.client.user)
break
else:
for task in done_tasks:
reaction, user = await task
if str(reaction.emoji) == "➡️":
# await the_msg.remove_reaction(reaction.emoji, member)
if command_index < (len(list_of_commands) - 1):
command_index += 1
continue
elif str(reaction.emoji) == "⬅️":
# await the_msg.remove_reaction(reaction.emoji, member)
if command_index > 0:
command_index -= 1
continue
# Discord methods
async def create_interview_room(self, guild: discord.Guild, app: List[str]) -> None:
""" Creates an interview room for the given application.
:param guild: The server in which the interview will be.
:param app: The applicant info. """
applicant = discord.utils.get(guild.members, id=app[1])
interview_info = self.interview_info.get(app[2])
# Create Private Thread for the user
app_parent = self.client.get_channel(interview_info['parent'])
#delete this later
message = None
# message = await app_parent.send('Uncomment this in your development environment')
txt_channel = await app_parent.create_thread(name=f"{applicant.display_name}'s-interview", message=message, reason=f"{app[2].title()} Interview Room")
# Add permissions for the user in the interview room
parent_channel = self.client.get_channel(interview_info['parent'])
interview_vc = self.client.get_channel(interview_info['interview'])
# Updates the applicant's application in the database, adding the channels ids
await self.update_application(applicant.id, txt_channel.id, interview_vc.id, app[2])
# Set channel perms for the user.
await parent_channel.set_permissions(applicant, read_messages=True, send_messages=False, view_channel=True)
await interview_vc.set_permissions(applicant, speak=True, connect=True, view_channel=True)
app_embed = discord.Embed(
title=f"{applicant.name}'s Interview",
description=f"""
Hello, {applicant.mention}, we have received and reviewed your `{app[2].title().replace('_', ' ')}` application. In order to explain how our system works we have to schedule a voice conversation with you.
When would be the best time to talk to one of our staff?""",
color=applicant.color)
formatted_pings = await self.format_application_pings(guild, interview_info['pings'])
await txt_channel.send(content=f"{formatted_pings}, {applicant.mention}", embed=app_embed)
# In-game commands
@commands.command()
@commands.has_permissions(administrator=True)
async def close_app(self, ctx) -> None:
""" (ADMIN) Closes an application channel. """
member = ctx.author
channel = ctx.channel
guild = ctx.guild
if not (app := await self.get_application_by_channel(channel.id)):
return await ctx.send(f"**This is not an application channel, {member.mention}!**")
interview_info = self.interview_info[app[2]]
all_apps_channel = discord.utils.get(guild.text_channels, id=interview_info['app'])
confirm = await Confirm(f"**Are you sure that you want to delete this application channel, {member.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not deleting it, then, {member.mention}!**")
applicant = guild.get_member(app[1])
parent_channel = discord.utils.get(guild.text_channels, id=interview_info['parent'])
interview_vc = discord.utils.get(guild.voice_channels, id=interview_info['interview'])
try:
await parent_channel.set_permissions(applicant, overwrite=None)
await interview_vc.set_permissions(applicant, overwrite=None)
except:
pass
await channel.delete()
await self.delete_application(app[0])
try:
msg = await all_apps_channel.fetch_message(app[0])
await msg.add_reaction('🔒')
except:
pass
async def audio(self, member: discord.Member, audio_name: str) -> None:
""" Plays an audio.
:param member: A member to get guild context from.
:param audio_name: The name of the audio to play. """
# Resolves bot's channel state
staff_vc = self.client.get_channel(staff_vc_id)
bot_state = member.guild.voice_client
try:
if bot_state and bot_state.channel and bot_state.channel != staff_vc:
await bot_state.disconnect()
await bot_state.move_to(staff_vc)
elif not bot_state:
voicechannel = discord.utils.get(member.guild.channels, id=staff_vc.id)
vc = await voicechannel.connect()
await asyncio.sleep(2)
voice_client: discord.VoiceClient = discord.utils.get(self.client.voice_clients, guild=member.guild)
# Plays / and they don't stop commin' /
if voice_client and not voice_client.is_playing():
audio_source = discord.FFmpegPCMAudio(f'tts/{audio_name}.mp3')
voice_client.play(audio_source, after=lambda e: print("Finished Warning Staff!"))
else:
print('couldnt play it!')
except Exception as e:
print(e)
return
@commands.command(aliases=['make_report_msg', 'reportmsg', 'report_msg', 'supportmsg', 'support_msg'])
@commands.has_permissions(administrator=True)
async def make_report_support_message(self, ctx) -> None:
""" (ADM) Makes a Report-Support message. """
guild = ctx.guild
embed = discord.Embed(
title="__Report-Support Section__",
description="""Welcome to the Report-Support section, here you can easily find your way into things and/or get help with whatever problem you may be experiencing.""",
color=ctx.author.color,
timestamp=ctx.message.created_at,
url="https://thelanguagesloth.com"
)
embed.set_author(name=self.client.user.display_name, url=self.client.user.display_avatar, icon_url=self.client.user.display_avatar)
embed.set_thumbnail(url=guild.icon.url)
embed.set_footer(text=guild.name, icon_url=guild.icon.url)
view = ReportSupportView(self.client)
await ctx.send("\u200b", embed=embed, view=view)
self.client.add_view(view=view)
def setup(client):
client.add_cog(ReportSupport(client))
| import discord
from discord import member
from discord.ext import commands
from mysqldb import *
import asyncio
from extra.useful_variables import list_of_commands
from extra.prompt.menu import Confirm
from extra.view import ReportSupportView
from typing import List, Dict, Optional
import os
case_cat_id = int(os.getenv('CASE_CAT_ID'))
reportsupport_channel_id = int(os.getenv('REPORT_CHANNEL_ID'))
dnk_id = int(os.getenv('DNK_ID'))
moderator_role_id = int(os.getenv('MOD_ROLE_ID'))
admin_role_id = int(os.getenv('ADMIN_ROLE_ID'))
lesson_management_role_id = int(os.getenv('LESSON_MANAGEMENT_ROLE_ID'))
staff_vc_id = int(os.getenv('STAFF_VC_ID'))
allowed_roles = [
int(os.getenv('OWNER_ROLE_ID')), admin_role_id,
moderator_role_id]
from extra.reportsupport.applications import ApplicationsTable
from extra.reportsupport.verify import Verify
from extra.reportsupport.openchannels import OpenChannels
report_support_classes: List[commands.Cog] = [
ApplicationsTable, Verify, OpenChannels
]
class ReportSupport(*report_support_classes):
""" A cog related to the system of reports and some other things. """
def __init__(self, client) -> None:
# super(ReportSupport, self).__init__(client)
self.client = client
self.cosmos_id: int = int(os.getenv('COSMOS_ID'))
self.muffin_id: int = int(os.getenv('MUFFIN_ID'))
self.cache = {}
self.report_cache = {}
@commands.Cog.listener()
async def on_ready(self) -> None:
self.client.add_view(view=ReportSupportView(self.client))
print('ReportSupport cog is online!')
async def handle_application(self, guild, payload) -> None:
""" Handles teacher applications.
:param guild: The server in which the application is running.
:param payload: Data about the Staff member who is opening the application. """
emoji = str(payload.emoji)
if emoji == '✅':
# Gets the teacher app and does the magic
if not (app := await self.get_application_by_message(payload.message_id)):
return
# Checks if the person has not an open interview channel already
if not app[3]:
# Creates an interview room with the teacher and sends their application there (you can z!close there)
return await self.create_interview_room(guild, app)
elif emoji == '❌':
# Tries to delete the teacher app from the db, in case it is registered
app = await self.get_application_by_message(payload.message_id)
if app and not app[3]:
await self.delete_application(payload.message_id)
interview_info = self.interview_info[app[2]]
app_channel = self.client.get_channel(interview_info['app'])
app_msg = await app_channel.fetch_message(payload.message_id)
await app_msg.add_reaction('🔏')
if applicant := discord.utils.get(guild.members, id=app[1]):
return await applicant.send(embed=discord.Embed(description=interview_info['message']))
async def send_teacher_application(self, member) -> None:
""" Sends a teacher application form to the user.
:param member: The member to send the application to. """
def msg_check(message):
if message.author == member and not message.guild:
if len(message.content) <= 100:
return True
else:
self.client.loop.create_task(member.send("**Your answer must be within 100 characters**"))
else:
return False
def check_reaction(r, u):
return u.id == member.id and not r.message.guild and str(r.emoji) in ['✅', '❌']
terms_embed = discord.Embed(
title="Terms of Application",
description="""Hello there!
Thank you for applying for teaching here,
Before you can formally start applying to teach in The Language Sloth, there are a couple things we would like you to know. The Language Sloth is a free of charge language learning platform which is meant to be accessible and open for anyone who is interested in languages from any background. We do not charge for any kind of service, nor do we pay for any services for starting teachers. We are a community that shares the same interest: Languages.
We do not require professional teaching skills, anyone can teach their native language, however, we have a set numbers of requirements for our teachers
Entry requirements:
》Must be at least 16 years of age
》Must have at least a conversational level of English
》Must have clear microphone audio
》Must commit 40 minutes per week
》Must prepare their own material weekly
``` ✅ To agree with our terms```""",
color=member.color
)
terms = await member.send(embed=terms_embed)
await terms.add_reaction('✅')
await terms.add_reaction('❌')
# Waits for reaction confirmation to the terms of application
terms_r = await self.get_reaction(member, check_reaction)
if terms_r is None:
self.cache[member.id] = 0
return
if terms_r != '✅':
self.cache[member.id] = 0
return await member.send(f"**Thank you anyways, bye!**")
embed = discord.Embed(title=f"__Teacher Application__")
embed.set_footer(text=f"by {member}", icon_url=member.display_avatar)
embed.description = '''
- Hello, there you've reacted to apply to become a teacher.
To apply please answer to these following questions with One message at a time
Question one:
What language are you applying to teach?'''
q1 = await member.send(embed=embed)
a1 = await self.get_message_content(member, msg_check)
if not a1:
return
embed.description = '''
- Why do you want to teach that language on the language sloth?
Please answer with one message.'''
q2 = await member.send(embed=embed)
a2 = await self.get_message_content(member, msg_check)
if not a2:
return
embed.description = '''
- On The Language Sloth, our classes happen once a week at the same time weekly.
Please let us know when would be the best time for you to teach,
E.A: Thursdays 3 pm CET, you can specify your timezone.
Again remember to answer with one message.'''
q3 = await member.send(embed=embed)
a3 = await self.get_message_content(member, msg_check)
if not a3:
return
embed.description = '''
- Let's talk about your English level, how do you consider your English level?
Are you able to teach lessons in English?
Please answer using one message only'''
q4 = await member.send(embed=embed)
a4 = await self.get_message_content(member, msg_check)
if not a4:
return
embed.description = '''- Have you ever taught people before?'''
q5 = await member.send(embed=embed)
a5 = await self.get_message_content(member, msg_check)
if not a5:
return
embed.description = '''- Inform a short description for your class.'''
q6 = await member.send(embed=embed)
a6 = await self.get_message_content(member, msg_check)
if not a6:
return
embed.description = '''- How old are you?'''
q7 = await member.send(embed=embed)
a7 = await self.get_message_content(member, msg_check)
if not a7:
return
# Get user's native roles
user_native_roles = []
for role in member.roles:
if str(role.name).lower().startswith('native'):
user_native_roles.append(role.name.title())
# Application result
app = f"""```ini\n[Username]: {member} ({member.id})\n[Joined the server]: {member.joined_at.strftime("%a, %d %B %y, %I %M %p UTC")}\n[Applying to teach]: {a1.title()}\n[Native roles]: {', '.join(user_native_roles)}\n[Motivation for teaching]: {a2.capitalize()}\n[Applying to teach on]: {a3.upper()}\n[English level]: {a4.capitalize()}\n[Experience teaching]: {a5.capitalize()}\n[Description]:{a6.capitalize()}\n[Age]: {a7}```"""
await member.send(app)
embed.description = '''
Are you sure you want to apply this? :white_check_mark: to send and :x: to Cancel
'''
app_conf = await member.send(embed=embed)
await app_conf.add_reaction('✅')
await app_conf.add_reaction('❌')
# Waits for reaction confirmation
r = await self.get_reaction(member, check_reaction)
if r is None:
return
if r == '✅':
embed.description = "**Application successfully made, please, be patient now!**"
await member.send(embed=embed)
teacher_app_channel = await self.client.fetch_channel(self.teacher_app_channel_id)
muffin = discord.utils.get(teacher_app_channel.guild.members, id=self.muffin_id)
app = await teacher_app_channel.send(content=f"{muffin.mention}, {member.mention}\n{app}")
await app.add_reaction('✅')
await app.add_reaction('❌')
# Saves in the database
await self.insert_application(app.id, member.id, 'teacher')
else:
self.cache[member.id] = 0
return await member.send("**Thank you anyways!**")
async def send_moderator_application(self, member):
""" Sends a moderator application form to the user.
:param member: The member to send the application to. """
def msg_check(message):
if message.author == member and not message.guild:
if len(message.content) <= 100:
return True
else:
self.client.loop.create_task(member.send("**Your answer must be within 100 characters**"))
else:
return False
def check_reaction(r, u):
return u.id == member.id and not r.message.guild and str(r.emoji) in ['✅', '❌']
terms_embed = discord.Embed(
title="Terms of Application",
description="""Hello there!
Before you can formally start applying for Staff in The Language Sloth, there are a couple requirements we would like you to know we feel necessity for:
Entry requirements:
```》Must be at least 18 years of age
》Must have at least a conversational level of English
》Must be an active member
》Be a member of the server for at least a month
》Must not have any warnings in the past 3 months.```""",
color=member.color
)
terms = await member.send(embed=terms_embed)
await terms.add_reaction('✅')
await terms.add_reaction('❌')
# Waits for reaction confirmation to the terms of application
terms_r = await self.get_reaction(member, check_reaction)
if terms_r is None:
self.cache[member.id] = 0
return
if terms_r != '✅':
self.cache[member.id] = 0
return await member.send(f"**Thanks anyways, bye!**")
embed = discord.Embed(title=f"__Moderator Application__")
embed.set_footer(text=f"by {member}", icon_url=member.display_avatar)
embed.description = "- What's your age?"
await member.send(embed=embed)
a1 = await self.get_message_content(member, msg_check)
if not a1: return
embed.description = """
- Hello, there you've reacted to apply to become a moderator.
To apply please answer to these following questions with One message at a time
Question one:
Do you have any experience moderating Discord servers?"""
q2 = await member.send(embed=embed)
a2 = await self.get_message_content(member, msg_check)
if not a2: return
embed.description = """
- What is your gender?
Please answer with one message."""
await member.send(embed=embed)
a3 = await self.get_message_content(member, msg_check)
if not a3: return
embed.description = """
- What's your English level? Are you able to express yourself using English?
Please answer using one message only."""
await member.send(embed=embed)
a4 = await self.get_message_content(member, msg_check)
if not a4: return
embed.description = """
- Why are you applying to be Staff? What is your motivation?
Please answer using one message only."""
await member.send(embed=embed)
a5 = await self.get_message_content(member, msg_check)
if not a5: return
embed.description = """- How do you think The Language Sloth could be a better community?
Please answer using one message only."""
await member.send(embed=embed)
a6 = await self.get_message_content(member, msg_check)
if not a6: return
embed.description = """- How active are you on Discord in general?
Please answer using one message only."""
await member.send(embed=embed)
a7 = await self.get_message_content(member, msg_check)
if not a7: return
embed.description = """- What is your time zone?
Please answer using one message only.."""
await member.send(embed=embed)
a8 = await self.get_message_content(member, msg_check)
if not a8: return
embed.description = "- What is your country of origin?"
await member.send(embed=embed)
a9 = await self.get_message_content(member, msg_check)
if not a9: return
embed.description = "- Tell us about yourself?"
await member.send(embed=embed)
a10 = await self.get_message_content(member, msg_check)
if not a10: return
# Get user's native roles
user_native_roles = []
for role in member.roles:
if str(role.name).lower().startswith('native'):
user_native_roles.append(role.name.title())
# Application result
app = f"""```ini\n[Username]: {member} ({member.id})
[Joined the server]: {member.joined_at.strftime("%a, %d %B %y, %I %M %p UTC")}
[Native roles]: {', '.join(user_native_roles)}
[Age]: {a1}
[Experience moderating]: {a2.capitalize()}
[Gender]: {a3.title()}
[English level]: {a4.capitalize()}
[Reason & Motivation]: {a5.capitalize()}
[How we can improve Sloth]: {a6.capitalize()}
[Activity Status]: {a7.capitalize()}
[Timezone]: {a8.title()}
[Origin Country]: {a9.title()}
[About]: {a10.capitalize()}```"""
await member.send(app)
embed.description = """
Are you sure you want to apply this? :white_check_mark: to send and :x: to Cancel
"""
app_conf = await member.send(embed=embed)
await app_conf.add_reaction('✅')
await app_conf.add_reaction('❌')
# Waits for reaction confirmation
r = await self.get_reaction(member, check_reaction)
if r is None:
return
if r == '✅':
# ""
embed.description = """**Application successfully made, please, be patient now.**
We will let you know when we need a new mod. We check apps when we need it!"""
await member.send(embed=embed)
moderator_app_channel = await self.client.fetch_channel(self.moderator_app_channel_id)
cosmos = discord.utils.get(moderator_app_channel.guild.members, id=self.cosmos_id)
app = await moderator_app_channel.send(content=f"{cosmos.mention}, {member.mention}\n{app}")
await app.add_reaction('✅')
await app.add_reaction('❌')
# Saves in the database
await self.insert_application(app.id, member.id, 'moderator')
else:
self.cache[member.id] = 0
return await member.send("**Thank you anyways!**")
async def send_event_manager_application(self, member):
""" Sends a event manager application form to the user.
:param member: The member to send the application to. """
def msg_check(message):
if message.author == member and not message.guild:
if len(message.content) <= 100:
return True
else:
self.client.loop.create_task(member.send("**Your answer must be within 100 characters**"))
else:
return False
def check_reaction(r, u):
return u.id == member.id and not r.message.guild and str(r.emoji) in ['✅', '❌']
terms_embed = discord.Embed(
title="Terms of Application",
description="""Hello there!
Thank you for applying for hosting events here,
Before you can formally start applying to host events in The Language Sloth, there are a couple things we would like you to know. The Language Sloth is a free of charge language learning platform which is meant to be accessible and open for anyone who is interested in languages from any background. We do not charge for any kind of service, nor do we pay for any services for hosting events. We are a community that shares the same interest: Languages.
We do not require professional skills, however, we have a set numbers of requirements for our event managers
Entry requirements:
》Must be at least 16 years of age
》Must have at least a conversational level of English
》Must have clear microphone audio
》Must prepare their own material weekly
``` ✅ To agree with our terms```""",
color=member.color
)
terms = await member.send(embed=terms_embed)
await terms.add_reaction('✅')
await terms.add_reaction('❌')
# Waits for reaction confirmation to the terms of application
terms_r = await self.get_reaction(member, check_reaction)
if terms_r is None:
self.cache[member.id] = 0
return
if terms_r != '✅':
self.cache[member.id] = 0
return await member.send(f"**Thank you anyways, bye!**")
embed = discord.Embed(title=f"__Teacher Application__")
embed.set_footer(text=f"by {member}", icon_url=member.display_avatar)
embed.title = "Event manager Application"
embed.description = '''
- Hello, there you've reacted to apply to become an event manager.
To apply please answer to these following questions with One message at a time
Question one:
What is your event called?'''
q1 = await member.send(embed=embed)
a1 = await self.get_message_content(member, msg_check)
if not a1:
return
embed.description = '''
- Why do you want to host that event on the language sloth?
Please answer with one message.'''
q2 = await member.send(embed=embed)
a2 = await self.get_message_content(member, msg_check)
if not a2:
return
embed.description = '''
- Please let us know when would be the best time for you to host events
E.A: Thursdays 3 pm CET, you can specify your timezone.
Again remember to answer with one message.'''
q3 = await member.send(embed=embed)
a3 = await self.get_message_content(member, msg_check)
if not a3:
return
embed.description = """
- Let's talk about your English level, how do you consider your English level?
Are you able to host events in English? If not, in which language would you be hosting?
Please answer using one message only"""
q4 = await member.send(embed=embed)
a4 = await self.get_message_content(member, msg_check)
if not a4:
return
embed.description = "- Have you ever hosted events before? If yes, please describe!"
q5 = await member.send(embed=embed)
a5 = await self.get_message_content(member, msg_check)
if not a5:
return
embed.description = "- Inform a short description for your event/events."
q6 = await member.send(embed=embed)
a6 = await self.get_message_content(member, msg_check)
if not a6:
return
embed.description = "- How old are you?"
q7 = await member.send(embed=embed)
a7 = await self.get_message_content(member, msg_check)
if not a7:
return
# Get user's native roles
user_native_roles = []
for role in member.roles:
if str(role.name).lower().startswith('native'):
user_native_roles.append(role.name.title())
# Application result
app = f"""```ini\n[Username]: {member} ({member.id})\n[Joined the server]: {member.joined_at.strftime("%a, %d %B %y, %I %M %p UTC")}\n[Applying to host]: {a1.title()}\n[Native roles]: {', '.join(user_native_roles)}\n[Motivation for hosting]: {a2.capitalize()}\n[Applying to host on]: {a3.upper()}\n[English level]: {a4.capitalize()}\n[Experience hosting]: {a5.capitalize()}\n[Description]:{a6.capitalize()}\n[Age]: {a7}```"""
await member.send(app)
embed.description = '''
Are you sure you want to apply this? :white_check_mark: to send and :x: to Cancel
'''
app_conf = await member.send(embed=embed)
await app_conf.add_reaction('✅')
await app_conf.add_reaction('❌')
# Waits for reaction confirmation
r = await self.get_reaction(member, check_reaction)
if r is None:
return
if r == '✅':
embed.description = "**Application successfully made, please, be patient now!**"
await member.send(embed=embed)
event_manager_channel = await self.client.fetch_channel(self.event_manager_app_channel_id)
app = await event_manager_channel.send(content=f"{member.mention}\n{app}")
await app.add_reaction('✅')
await app.add_reaction('❌')
# Saves in the database
await self.insert_application(app.id, member.id, 'event_manager')
else:
self.cache[member.id] = 0
return await member.send("**Thank you anyways!**")
async def send_verified_selfies_verification(self, interaction: discord.Interaction) -> None:
""" Sends a message to the user asking for them to send a selfie in order for them
to get the verified selfies role.
:param interaction: The interaction object. """
guild = interaction.guild
member = interaction.user
def msg_check(message):
if message.author == member and not message.guild:
if len(message.content) <= 100:
return True
else:
self.client.loop.create_task(member.send("**Your answer must be within 100 characters**"))
else:
return False
embed = discord.Embed(
title=f"__Verify__",
description=f"""You have opened a verification request, if you would like to verify:\n
**1.** Take a clear picture of yourself holding a piece of paper with today's date and time of verification, and your Discord server name written on it. Image links won't work, only image uploads!\n(You have 5 minutes to do so)"""
)
embed.set_footer(text=f"by {member}", icon_url=member.display_avatar)
embed.set_image(url="https://cdn.discordapp.com/attachments/562019472257318943/882352621116096542/slothfacepopoo.png")
await member.send(embed=embed)
while True:
msg = await self.get_message(member, msg_check, 300)
if msg is None:
return await member.send(f"**Timeout, you didn't answer in time, try again later!**")
attachments = [att for att in msg.attachments if att.content_type.startswith('image')]
if msg.content.lower() == 'quit':
return await member.send(f"**Bye!**")
if not attachments:
await member.send(f"**No uploaded pic detected, send it again or type `quit` to stop this!**")
continue
break
# Sends verified request to admins
verify_embed = discord.Embed(
title=f"__Verification Request__",
description=f"{member} ({member.id})",
color=member.color,
timestamp=interaction.message.created_at
)
verify_embed.set_thumbnail(url=member.display_avatar)
verify_embed.set_image(url=attachments[0])
verify_req_channel_id = discord.utils.get(guild.text_channels, id=self.verify_reqs_channel_id)
verify_msg = await verify_req_channel_id.send(content=member.mention, embed=verify_embed)
await verify_msg.add_reaction('✅')
await verify_msg.add_reaction('❌')
# Saves
await self.insert_application(verify_msg.id, member.id, 'verify')
return await member.send(f"**Request sent, you will get notified here if you get accepted or declined! ✅**")
# - Report someone
async def report_someone(self, interaction: discord.Interaction):
member = interaction.user
guild = interaction.guild
if open_channel := await self.member_has_open_channel(member.id):
if open_channel := discord.utils.get(guild.text_channels, id=open_channel[1]):
embed = discord.Embed(title="Error!", description=f"**You already have an open channel! ({open_channel.mention})**", color=discord.Color.red())
await interaction.followup.send(embed=embed, ephemeral=True)
return False
else:
await self.remove_user_open_channel(member.id)
# Report someone
case_cat = discord.utils.get(guild.categories, id=case_cat_id)
counter = await self.get_case_number()
moderator = discord.utils.get(guild.roles, id=moderator_role_id)
cosmos = discord.utils.get(guild.members, id=self.cosmos_id)
overwrites = {guild.default_role: discord.PermissionOverwrite(
read_messages=False, send_messages=False, connect=False, view_channel=False),
member: discord.PermissionOverwrite(
read_messages=True, send_messages=True, connect=False, view_channel=True),
moderator: discord.PermissionOverwrite(
read_messages=True, send_messages=True, connect=False, view_channel=True, manage_messages=True)}
try:
the_channel = await guild.create_text_channel(name=f"case-{counter[0][0]}", category=case_cat, overwrites=overwrites)
except Exception:
await interaction.followup.send("**Something went wrong with it, please contact an admin!**", ephemeral=True)
raise Exception
else:
created_embed = discord.Embed(
title="Report room created!",
description=f"**Go to {the_channel.mention}!**",
color=discord.Color.green())
await interaction.followup.send(embed=created_embed, ephemeral=True)
await self.insert_user_open_channel(member.id, the_channel.id)
await self.increase_case_number()
embed = discord.Embed(title="Report Support!", description=f"Please, {member.mention}, try to explain what happened and who you want to report.",
color=discord.Color.red())
message = await the_channel.send(content=f"{member.mention}, {moderator.mention}, {cosmos.mention}", embed=embed)
ctx = await self.client.get_context(message)
return await self.client.get_cog('Tools').vc(ctx, member=member)
# - Report someone
async def generic_help(self, interaction: discord.Interaction, type_help: str, message: str, ping: bool = True) -> None:
""" Opens a generic help channel.
:param interaction: The interaction that generated this action.
:param type_help: The kind of general help.
:param message: The text message to send in the room.
:param ping: Whether mods should be pinged for this. """
member = interaction.user
guild = interaction.guild
if open_channel := await self.member_has_open_channel(member.id):
if open_channel := discord.utils.get(guild.text_channels, id=open_channel[1]):
embed = discord.Embed(title="Error!", description=f"**You already have an open channel! ({open_channel.mention})**", color=discord.Color.red())
await interaction.followup.send(embed=embed, ephemeral=True)
return False
else:
await self.remove_user_open_channel(member.id)
# General help
case_cat = discord.utils.get(guild.categories, id=case_cat_id)
moderator = discord.utils.get(guild.roles, id=moderator_role_id)
overwrites = {guild.default_role: discord.PermissionOverwrite(
read_messages=False, send_messages=False, connect=False, view_channel=False),
member: discord.PermissionOverwrite(
read_messages=True, send_messages=True, connect=False, view_channel=True),
moderator: discord.PermissionOverwrite(
read_messages=True, send_messages=True, connect=False, view_channel=True, manage_messages=True)}
try:
the_channel = await guild.create_text_channel(name=f"{'-'.join(type_help.split())}", category=case_cat, overwrites=overwrites)
except:
await interaction.followup.send("**Something went wrong with it, please contact an admin!**", ephemeral=True)
raise Exception
else:
created_embed = discord.Embed(
title=f"Room for `{type_help}` created!",
description=f"**Go to {the_channel.mention}!**",
color=discord.Color.green())
await interaction.followup.send(embed=created_embed, ephemeral=True)
await self.insert_user_open_channel(member.id, the_channel.id)
embed = discord.Embed(title=f"{type_help.title()}!", description=message, color=discord.Color.red())
if ping:
await the_channel.send(content=f"{member.mention}, {moderator.mention}", embed=embed)
else:
await the_channel.send(content=member.mention, embed=embed)
async def get_message_content(self, member, check, timeout: Optional[int] = 300) -> str:
""" Gets a message content.
:param member: The member to get the message from.
:param check: The check for the event.
:param timeout: Timeout for getting the message. [Optional] """
try:
message = await self.client.wait_for('message', timeout=timeout,
check=check)
except asyncio.TimeoutError:
await member.send("**Timeout! Try again.**")
return None
else:
content = message.content
return content
async def get_message(self, member, check, timeout: Optional[int] = 300) -> discord.Message:
""" Gets a message.
:param member: The member to get the message from.
:param check: The check for the event.
:param timeout: Timeout for getting the message. [Optional] """
try:
message = await self.client.wait_for('message', timeout=timeout,
check=check)
except asyncio.TimeoutError:
await member.send("**Timeout! Try again.**")
return None
else:
return message
async def get_reaction(self, member, check, timeout: int = 300):
try:
reaction, _ = await self.client.wait_for('reaction_add',
timeout=timeout, check=check)
except asyncio.TimeoutError:
await member.send("**Timeout! Try again.**")
return None
else:
return str(reaction.emoji)
@commands.command(aliases=['permit_case', 'allow_case', 'add_witness', 'witness', 'aw'])
@commands.has_any_role(*allowed_roles)
async def allow_witness(self, ctx, member: discord.Member = None):
""" Allows a witness to join a case channel.
:param member: The member to allow. """
if not member:
return await ctx.send("**Inform a witness to allow!**")
user_channel = await self.get_case_channel(ctx.channel.id)
if user_channel:
confirm = await Confirm(f"**Are you sure you want to allow {member.mention} as a witness in this case channel, {ctx.author.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not allowing them, then!**")
channel = discord.utils.get(ctx.guild.channels, id=user_channel[0][1])
try:
await channel.set_permissions(
member, read_messages=True, send_messages=True, connect=True, speak=True, view_channel=True)
except Exception:
pass
return await ctx.send(f"**{member.mention} has been allowed here!**")
else:
await ctx.send(f"**This is not a case channel, {ctx.author.mention}!**")
@commands.command(aliases=['forbid_case', 'delete_witness', 'remve_witness', 'fw'])
@commands.has_any_role(*allowed_roles)
async def forbid_witness(self, ctx, member: discord.Member = None):
""" Forbids a witness from a case channel.
:param member: The member to forbid. """
if not member:
return await ctx.send("**Inform a witness to forbid!**")
user_channel = await self.get_case_channel(ctx.channel.id)
if user_channel:
confirm = await Confirm(f"**Are you sure you want to forbid {member.mention} from being a witness in this case channel, {ctx.author.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not forbidding them, then!**")
channel = discord.utils.get(ctx.guild.channels, id=user_channel[0][1])
try:
await channel.set_permissions(
member, read_messages=False, send_messages=False, connect=False, speak=False, view_channel=False)
except Exception:
pass
return await ctx.send(f"**{member.mention} has been forbidden here!**")
else:
await ctx.send(f"**This is not a case channel, {ctx.author.mention}!**")
@commands.command(aliases=['delete_channel', 'archive'])
@commands.has_any_role(*allowed_roles)
async def close_channel(self, ctx):
""" (MOD) Closes a Case-Channel. """
user_channel = await self.get_case_channel(ctx.channel.id)
if not user_channel:
return await ctx.send(f"**What do you think that you are doing? You cannot delete this channel, {ctx.author.mention}!**")
channel = discord.utils.get(ctx.guild.text_channels, id=user_channel[0][1])
embed = discord.Embed(title="Confirmation",
description="Are you sure that you want to delete this channel?",
color=ctx.author.color,
timestamp=ctx.message.created_at)
confirmation = await ctx.send(content=ctx.author.mention, embed=embed)
await confirmation.add_reaction('✅')
await confirmation.add_reaction('❌')
try:
reaction, user = await self.client.wait_for('reaction_add', timeout=20,
check=lambda r, u: u == ctx.author and r.message.channel == ctx.channel and str(r.emoji) in ['✅', '❌'])
except asyncio.TimeoutError:
embed = discord.Embed(title="Confirmation",
description="You took too long to answer the question; not deleting it!",
color=discord.Color.red(),
timestamp=ctx.message.created_at)
return await confirmation.edit(content=ctx.author.mention, embed=embed)
else:
if str(reaction.emoji) == '✅':
embed.description = f"**Channel {ctx.channel.mention} is being deleted...**"
await confirmation.edit(content=ctx.author.mention, embed=embed)
await asyncio.sleep(3)
await channel.delete()
await self.remove_user_open_channel(user_channel[0][0])
else:
embed.description = "Not deleting it!"
await confirmation.edit(content='', embed=embed)
async def dnk_embed(self, member):
def check(r, u):
return u == member and str(r.message.id) == str(the_msg.id) and str(r.emoji) in ['⬅️', '➡️']
command_index = 0
initial_embed = discord.Embed(title="__Table of Commands and their Prices__",
description="These are a few of commands and features that DNK can do.",
color=discord.Color.blue())
the_msg = await member.send(embed=initial_embed)
await the_msg.add_reaction('⬅️')
await the_msg.add_reaction('➡️')
while True:
embed = discord.Embed(title=f"__Table of Commands and their Prices__ ({command_index+1}/{len(list_of_commands)})",
description="These are a few of commands and features that DNK can do.",
color=discord.Color.blue())
embed.add_field(name=list_of_commands[command_index][0],
value=list_of_commands[command_index][1])
await the_msg.edit(embed=embed)
try:
pending_tasks = [self.client.wait_for('reaction_add', check=check),
self.client.wait_for('reaction_remove', check=check)]
done_tasks, pending_tasks = await asyncio.wait(pending_tasks, timeout=60, return_when=asyncio.FIRST_COMPLETED)
if not done_tasks:
raise asyncio.TimeoutError
for task in pending_tasks:
task.cancel()
except asyncio.TimeoutError:
await the_msg.remove_reaction('⬅️', self.client.user)
await the_msg.remove_reaction('➡️', self.client.user)
break
else:
for task in done_tasks:
reaction, user = await task
if str(reaction.emoji) == "➡️":
# await the_msg.remove_reaction(reaction.emoji, member)
if command_index < (len(list_of_commands) - 1):
command_index += 1
continue
elif str(reaction.emoji) == "⬅️":
# await the_msg.remove_reaction(reaction.emoji, member)
if command_index > 0:
command_index -= 1
continue
# Discord methods
async def create_interview_room(self, guild: discord.Guild, app: List[str]) -> None:
""" Creates an interview room for the given application.
:param guild: The server in which the interview will be.
:param app: The applicant info. """
applicant = discord.utils.get(guild.members, id=app[1])
interview_info = self.interview_info.get(app[2])
# Create Private Thread for the user
app_parent = self.client.get_channel(interview_info['parent'])
#delete this later
message = None
# message = await app_parent.send('Uncomment this in your development environment')
txt_channel = await app_parent.create_thread(name=f"{applicant.display_name}'s-interview", message=message, reason=f"{app[2].title()} Interview Room")
# Add permissions for the user in the interview room
parent_channel = self.client.get_channel(interview_info['parent'])
interview_vc = self.client.get_channel(interview_info['interview'])
# Updates the applicant's application in the database, adding the channels ids
await self.update_application(applicant.id, txt_channel.id, interview_vc.id, app[2])
# Set channel perms for the user.
await parent_channel.set_permissions(applicant, read_messages=True, send_messages=False, view_channel=True)
await interview_vc.set_permissions(applicant, speak=True, connect=True, view_channel=True)
app_embed = discord.Embed(
title=f"{applicant.name}'s Interview",
description=f"""
Hello, {applicant.mention}, we have received and reviewed your `{app[2].title().replace('_', ' ')}` application. In order to explain how our system works we have to schedule a voice conversation with you.
When would be the best time to talk to one of our staff?""",
color=applicant.color)
formatted_pings = await self.format_application_pings(guild, interview_info['pings'])
await txt_channel.send(content=f"{formatted_pings}, {applicant.mention}", embed=app_embed)
# In-game commands
@commands.command()
@commands.has_permissions(administrator=True)
async def close_app(self, ctx) -> None:
""" (ADMIN) Closes an application channel. """
member = ctx.author
channel = ctx.channel
guild = ctx.guild
if not (app := await self.get_application_by_channel(channel.id)):
return await ctx.send(f"**This is not an application channel, {member.mention}!**")
interview_info = self.interview_info[app[2]]
all_apps_channel = discord.utils.get(guild.text_channels, id=interview_info['app'])
confirm = await Confirm(f"**Are you sure that you want to delete this application channel, {member.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not deleting it, then, {member.mention}!**")
applicant = guild.get_member(app[1])
parent_channel = discord.utils.get(guild.text_channels, id=interview_info['parent'])
interview_vc = discord.utils.get(guild.voice_channels, id=interview_info['interview'])
try:
await parent_channel.set_permissions(applicant, overwrite=None)
await interview_vc.set_permissions(applicant, overwrite=None)
except:
pass
await channel.delete()
await self.delete_application(app[0])
try:
msg = await all_apps_channel.fetch_message(app[0])
await msg.add_reaction('🔒')
except:
pass
async def audio(self, member: discord.Member, audio_name: str) -> None:
""" Plays an audio.
:param member: A member to get guild context from.
:param audio_name: The name of the audio to play. """
# Resolves bot's channel state
staff_vc = self.client.get_channel(staff_vc_id)
bot_state = member.guild.voice_client
try:
if bot_state and bot_state.channel and bot_state.channel != staff_vc:
await bot_state.disconnect()
await bot_state.move_to(staff_vc)
elif not bot_state:
voicechannel = discord.utils.get(member.guild.channels, id=staff_vc.id)
vc = await voicechannel.connect()
await asyncio.sleep(2)
voice_client: discord.VoiceClient = discord.utils.get(self.client.voice_clients, guild=member.guild)
# Plays / and they don't stop commin' /
if voice_client and not voice_client.is_playing():
audio_source = discord.FFmpegPCMAudio(f'tts/{audio_name}.mp3')
voice_client.play(audio_source, after=lambda e: print("Finished Warning Staff!"))
else:
print('couldnt play it!')
except Exception as e:
print(e)
return
@commands.command(aliases=['make_report_msg', 'reportmsg', 'report_msg', 'supportmsg', 'support_msg'])
@commands.has_permissions(administrator=True)
async def make_report_support_message(self, ctx) -> None:
""" (ADM) Makes a Report-Support message. """
guild = ctx.guild
embed = discord.Embed(
title="__Report-Support Section__",
description="""Welcome to the Report-Support section, here you can easily find your way into things and/or get help with whatever problem you may be experiencing.""",
color=ctx.author.color,
timestamp=ctx.message.created_at,
url="https://thelanguagesloth.com"
)
embed.set_author(name=self.client.user.display_name, url=self.client.user.display_avatar, icon_url=self.client.user.display_avatar)
embed.set_thumbnail(url=guild.icon.url)
embed.set_footer(text=guild.name, icon_url=guild.icon.url)
view = ReportSupportView(self.client)
await ctx.send("\u200b", embed=embed, view=view)
self.client.add_view(view=view)
def setup(client):
client.add_cog(ReportSupport(client))
|
from typing import TYPE_CHECKING, Dict, Any, Tuple, Callable, List, Optional, IO
from wasabi import Printer
import tqdm
import sys
from ..util import registry
from .. import util
from ..errors import Errors
if TYPE_CHECKING:
from ..language import Language # noqa: F401
def setup_table(
*, cols: List[str], widths: List[int], max_width: int = 13
) -> Tuple[List[str], List[int], List[str]]:
final_cols = []
final_widths = []
for col, width in zip(cols, widths):
if len(col) > max_width:
col = col[: max_width - 3] + "..." # shorten column if too long
final_cols.append(col.upper())
final_widths.append(max(len(col), width))
return final_cols, final_widths, ["r" for _ in final_widths]
@registry.loggers("spacy.ConsoleLogger.v1")
def console_logger(progress_bar: bool = False):
def setup_printer(
nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr
) -> Tuple[Callable[[Optional[Dict[str, Any]]], None], Callable[[], None]]:
write = lambda text: stdout.write(f"{text}\n")
msg = Printer(no_print=True)
# ensure that only trainable components are logged
logged_pipes = [
name
for name, proc in nlp.pipeline
if hasattr(proc, "is_trainable") and proc.is_trainable
]
eval_frequency = nlp.config["training"]["eval_frequency"]
score_weights = nlp.config["training"]["score_weights"]
score_cols = [col for col, value in score_weights.items() if value is not None]
loss_cols = [f"Loss {pipe}" for pipe in logged_pipes]
spacing = 2
table_header, table_widths, table_aligns = setup_table(
cols=["E", "#"] + loss_cols + score_cols + ["Score"],
widths=[3, 6] + [8 for _ in loss_cols] + [6 for _ in score_cols] + [6],
)
write(msg.row(table_header, widths=table_widths, spacing=spacing))
write(msg.row(["-" * width for width in table_widths], spacing=spacing))
progress = None
def log_step(info: Optional[Dict[str, Any]]) -> None:
nonlocal progress
if info is None:
# If we don't have a new checkpoint, just return.
if progress is not None:
progress.update(1)
return
losses = [
"{0:.2f}".format(float(info["losses"][pipe_name]))
for pipe_name in logged_pipes
]
scores = []
for col in score_cols:
score = info["other_scores"].get(col, 0.0)
try:
score = float(score)
except TypeError:
err = Errors.E916.format(name=col, score_type=type(score))
raise ValueError(err) from None
if col != "speed":
score *= 100
scores.append("{0:.2f}".format(score))
data = (
[info["epoch"], info["step"]]
+ losses
+ scores
+ ["{0:.2f}".format(float(info["score"]))]
)
if progress is not None:
progress.close()
write(
msg.row(data, widths=table_widths, aligns=table_aligns, spacing=spacing)
)
if progress_bar:
# Set disable=None, so that it disables on non-TTY
progress = tqdm.tqdm(
total=eval_frequency, disable=None, leave=False, file=stderr
)
progress.set_description(f"Epoch {info["epoch"]+1}")
def finalize() -> None:
pass
return log_step, finalize
return setup_printer
@registry.loggers("spacy.WandbLogger.v2")
def wandb_logger(
project_name: str,
remove_config_values: List[str] = [],
model_log_interval: Optional[int] = None,
log_dataset_dir: Optional[str] = None,
):
try:
import wandb
# test that these are available
from wandb import init, log, join # noqa: F401
except ImportError:
raise ImportError(Errors.E880)
console = console_logger(progress_bar=False)
def setup_logger(
nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr
) -> Tuple[Callable[[Dict[str, Any]], None], Callable[[], None]]:
config = nlp.config.interpolate()
config_dot = util.dict_to_dot(config)
for field in remove_config_values:
del config_dot[field]
config = util.dot_to_dict(config_dot)
run = wandb.init(project=project_name, config=config, reinit=True)
console_log_step, console_finalize = console(nlp, stdout, stderr)
def log_dir_artifact(
path: str,
name: str,
type: str,
metadata: Optional[Dict[str, Any]] = {},
aliases: Optional[List[str]] = [],
):
dataset_artifact = wandb.Artifact(name, type=type, metadata=metadata)
dataset_artifact.add_dir(path, name=name)
wandb.log_artifact(dataset_artifact, aliases=aliases)
if log_dataset_dir:
log_dir_artifact(path=log_dataset_dir, name="dataset", type="dataset")
def log_step(info: Optional[Dict[str, Any]]):
console_log_step(info)
if info is not None:
score = info["score"]
other_scores = info["other_scores"]
losses = info["losses"]
wandb.log({"score": score})
if losses:
wandb.log({f"loss_{k}": v for k, v in losses.items()})
if isinstance(other_scores, dict):
wandb.log(other_scores)
if model_log_interval and info.get("output_path"):
if info["step"] % model_log_interval == 0 and info["step"] != 0:
log_dir_artifact(
path=info["output_path"],
name="pipeline_" + run.id,
type="checkpoint",
metadata=info,
aliases=[
f"epoch {info["epoch"]} step {info["step"]}",
"latest",
"best"
if info["score"] == max(info["checkpoints"])[0]
else "",
],
)
def finalize() -> None:
console_finalize()
wandb.join()
return log_step, finalize
return setup_logger
| from typing import TYPE_CHECKING, Dict, Any, Tuple, Callable, List, Optional, IO
from wasabi import Printer
import tqdm
import sys
from ..util import registry
from .. import util
from ..errors import Errors
if TYPE_CHECKING:
from ..language import Language # noqa: F401
def setup_table(
*, cols: List[str], widths: List[int], max_width: int = 13
) -> Tuple[List[str], List[int], List[str]]:
final_cols = []
final_widths = []
for col, width in zip(cols, widths):
if len(col) > max_width:
col = col[: max_width - 3] + "..." # shorten column if too long
final_cols.append(col.upper())
final_widths.append(max(len(col), width))
return final_cols, final_widths, ["r" for _ in final_widths]
@registry.loggers("spacy.ConsoleLogger.v1")
def console_logger(progress_bar: bool = False):
def setup_printer(
nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr
) -> Tuple[Callable[[Optional[Dict[str, Any]]], None], Callable[[], None]]:
write = lambda text: stdout.write(f"{text}\n")
msg = Printer(no_print=True)
# ensure that only trainable components are logged
logged_pipes = [
name
for name, proc in nlp.pipeline
if hasattr(proc, "is_trainable") and proc.is_trainable
]
eval_frequency = nlp.config["training"]["eval_frequency"]
score_weights = nlp.config["training"]["score_weights"]
score_cols = [col for col, value in score_weights.items() if value is not None]
loss_cols = [f"Loss {pipe}" for pipe in logged_pipes]
spacing = 2
table_header, table_widths, table_aligns = setup_table(
cols=["E", "#"] + loss_cols + score_cols + ["Score"],
widths=[3, 6] + [8 for _ in loss_cols] + [6 for _ in score_cols] + [6],
)
write(msg.row(table_header, widths=table_widths, spacing=spacing))
write(msg.row(["-" * width for width in table_widths], spacing=spacing))
progress = None
def log_step(info: Optional[Dict[str, Any]]) -> None:
nonlocal progress
if info is None:
# If we don't have a new checkpoint, just return.
if progress is not None:
progress.update(1)
return
losses = [
"{0:.2f}".format(float(info["losses"][pipe_name]))
for pipe_name in logged_pipes
]
scores = []
for col in score_cols:
score = info["other_scores"].get(col, 0.0)
try:
score = float(score)
except TypeError:
err = Errors.E916.format(name=col, score_type=type(score))
raise ValueError(err) from None
if col != "speed":
score *= 100
scores.append("{0:.2f}".format(score))
data = (
[info["epoch"], info["step"]]
+ losses
+ scores
+ ["{0:.2f}".format(float(info["score"]))]
)
if progress is not None:
progress.close()
write(
msg.row(data, widths=table_widths, aligns=table_aligns, spacing=spacing)
)
if progress_bar:
# Set disable=None, so that it disables on non-TTY
progress = tqdm.tqdm(
total=eval_frequency, disable=None, leave=False, file=stderr
)
progress.set_description(f"Epoch {info['epoch']+1}")
def finalize() -> None:
pass
return log_step, finalize
return setup_printer
@registry.loggers("spacy.WandbLogger.v2")
def wandb_logger(
project_name: str,
remove_config_values: List[str] = [],
model_log_interval: Optional[int] = None,
log_dataset_dir: Optional[str] = None,
):
try:
import wandb
# test that these are available
from wandb import init, log, join # noqa: F401
except ImportError:
raise ImportError(Errors.E880)
console = console_logger(progress_bar=False)
def setup_logger(
nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr
) -> Tuple[Callable[[Dict[str, Any]], None], Callable[[], None]]:
config = nlp.config.interpolate()
config_dot = util.dict_to_dot(config)
for field in remove_config_values:
del config_dot[field]
config = util.dot_to_dict(config_dot)
run = wandb.init(project=project_name, config=config, reinit=True)
console_log_step, console_finalize = console(nlp, stdout, stderr)
def log_dir_artifact(
path: str,
name: str,
type: str,
metadata: Optional[Dict[str, Any]] = {},
aliases: Optional[List[str]] = [],
):
dataset_artifact = wandb.Artifact(name, type=type, metadata=metadata)
dataset_artifact.add_dir(path, name=name)
wandb.log_artifact(dataset_artifact, aliases=aliases)
if log_dataset_dir:
log_dir_artifact(path=log_dataset_dir, name="dataset", type="dataset")
def log_step(info: Optional[Dict[str, Any]]):
console_log_step(info)
if info is not None:
score = info["score"]
other_scores = info["other_scores"]
losses = info["losses"]
wandb.log({"score": score})
if losses:
wandb.log({f"loss_{k}": v for k, v in losses.items()})
if isinstance(other_scores, dict):
wandb.log(other_scores)
if model_log_interval and info.get("output_path"):
if info["step"] % model_log_interval == 0 and info["step"] != 0:
log_dir_artifact(
path=info["output_path"],
name="pipeline_" + run.id,
type="checkpoint",
metadata=info,
aliases=[
f"epoch {info['epoch']} step {info['step']}",
"latest",
"best"
if info["score"] == max(info["checkpoints"])[0]
else "",
],
)
def finalize() -> None:
console_finalize()
wandb.join()
return log_step, finalize
return setup_logger
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Generator
from dataclasses import asdict, dataclass, replace
from functools import partial, wraps
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union
import torch
from torchmetrics import Metric
from typing_extensions import TypedDict
from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin
from pytorch_lightning.utilities.apply_func import apply_to_collection, apply_to_collections, move_data_to_device
from pytorch_lightning.utilities.data import extract_batch_size
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.metrics import metrics_to_scalars
from pytorch_lightning.utilities.rank_zero import rank_zero_warn
from pytorch_lightning.utilities.warnings import WarningCache
_IN_METRIC = Union[Metric, torch.Tensor] # Do not include scalars as they were converted to tensors
_OUT_METRIC = Union[torch.Tensor, Dict[str, torch.Tensor]]
_PBAR_METRIC = Union[float, Dict[str, float]]
_OUT_DICT = Dict[str, _OUT_METRIC]
_PBAR_DICT = Dict[str, _PBAR_METRIC]
class _METRICS(TypedDict):
callback: _OUT_DICT
log: _OUT_DICT
pbar: _PBAR_DICT
warning_cache = WarningCache()
@dataclass
class _Sync:
fn: Optional[Callable] = None
_should: bool = False
rank_zero_only: bool = False
_op: Optional[str] = None
_group: Optional[Any] = None
def __post_init__(self) -> None:
self._generate_sync_fn()
@property
def should(self) -> bool:
return self._should
@should.setter
def should(self, should: bool) -> None:
self._should = should
# `self._fn` needs to be re-generated.
self._generate_sync_fn()
@property
def op(self) -> Optional[str]:
return self._op
@op.setter
def op(self, op: Optional[str]) -> None:
self._op = op
# `self._fn` needs to be re-generated.
self._generate_sync_fn()
@property
def group(self) -> Optional[Any]:
return self._group
@group.setter
def group(self, group: Optional[Any]) -> None:
self._group = group
# `self._fn` needs to be re-generated.
self._generate_sync_fn()
def _generate_sync_fn(self) -> None:
"""Used to compute the syncing function and cache it."""
fn = self.no_op if self.fn is None or not self.should or self.rank_zero_only else self.fn
# save the function as `_fn` as the meta are being re-created and the object references need to match.
# ignore typing, bad support for `partial`: mypy/issues/1484
self._fn: Callable = partial(fn, reduce_op=self.op, group=self.group) # type: ignore [arg-type]
@property
def __call__(self) -> Any:
return self._fn
@staticmethod
def no_op(value: Any, *_: Any, **__: Any) -> Any:
return value
@dataclass
class _Metadata:
fx: str
name: str
prog_bar: bool = False
logger: bool = True
on_step: bool = False
on_epoch: bool = True
reduce_fx: Callable = torch.mean
enable_graph: bool = False
add_dataloader_idx: bool = True
dataloader_idx: Optional[int] = None
metric_attribute: Optional[str] = None
_sync: Optional[_Sync] = None
def __post_init__(self) -> None:
if not self.on_step and not self.on_epoch:
raise MisconfigurationException("`self.log(on_step=False, on_epoch=False)` is not useful.")
self._parse_reduce_fx()
def _parse_reduce_fx(self) -> None:
error = (
"Only `self.log(..., reduce_fx={min,max,mean,sum})` are currently supported."
" Please, open an issue in `https://github.com/PyTorchLightning/pytorch-lightning/issues`."
f" Found: {self.reduce_fx}"
)
if isinstance(self.reduce_fx, str):
reduce_fx = self.reduce_fx.lower()
if reduce_fx == "avg":
reduce_fx = "mean"
if reduce_fx not in ("min", "max", "mean", "sum"):
raise MisconfigurationException(error)
self.reduce_fx = getattr(torch, reduce_fx)
elif self.is_custom_reduction:
raise MisconfigurationException(error)
@property
def sync(self) -> _Sync:
assert self._sync is not None
return self._sync
@sync.setter
def sync(self, sync: _Sync) -> None:
if sync.op is None:
sync.op = self.reduce_fx.__name__
self._sync = sync
@property
def forked(self) -> bool:
return self.on_step and self.on_epoch
def forked_name(self, on_step: bool) -> str:
if self.forked:
return f'{self.name}_{'step' if on_step else 'epoch'}'
return self.name
@property
def is_mean_reduction(self) -> bool:
return self.reduce_fx is torch.mean
@property
def is_sum_reduction(self) -> bool:
return self.reduce_fx in (torch.sum, sum)
@property
def is_max_reduction(self) -> bool:
return self.reduce_fx in (torch.max, max)
@property
def is_min_reduction(self) -> bool:
return self.reduce_fx in (torch.min, min)
@property
def is_custom_reduction(self) -> bool:
return not (self.is_mean_reduction or self.is_max_reduction or self.is_min_reduction or self.is_sum_reduction)
def __getstate__(self) -> dict:
# drop the `sync.fn` to avoid potential pickle errors
# need to drop `fn` first otherwise `asdict` produces a `RecursionError`
copy = replace(self, _sync=replace(self.sync, fn=None))
d = asdict(copy)
# delete the `None` value so it does not override
del d["_sync"]["fn"]
return d
def __setstate__(self, state: dict, sync_fn: Optional[Callable] = None) -> None:
d = {**state, "_sync": _Sync(**state["_sync"], fn=sync_fn)}
self.__dict__.update(d)
@classmethod
def _reconstruct(cls, state: dict, sync_fn: Optional[Callable] = None) -> "_Metadata":
meta = cls(state["fx"], state["name"])
meta.__setstate__(state, sync_fn=sync_fn)
return meta
class _ResultMetric(Metric, DeviceDtypeModuleMixin):
"""Wraps the value provided to `:meth:`~pytorch_lightning.core.lightning.LightningModule.log`"""
def __init__(self, metadata: _Metadata, is_tensor: bool) -> None:
super().__init__()
self.is_tensor = is_tensor
self.meta = metadata
self.has_reset = False
if is_tensor:
if metadata.is_max_reduction:
default = float("-inf")
elif metadata.is_min_reduction:
default = float("inf")
else:
default = 0.0
# do not set a dtype in case the default dtype was changed
self.add_state("value", torch.tensor(default), dist_reduce_fx=torch.sum)
if self.meta.is_mean_reduction:
self.cumulated_batch_size: torch.Tensor
self.add_state("cumulated_batch_size", torch.tensor(0), dist_reduce_fx=torch.sum)
# this is defined here only because upstream is missing the type annotation
self._forward_cache: Optional[Any] = None
def update(self, value: _IN_METRIC, batch_size: int) -> None: # type: ignore[override]
if self.is_tensor:
value = cast(torch.Tensor, value)
if not torch.is_floating_point(value):
dtype = torch.get_default_dtype()
warning_cache.warn(
# do not include the value to avoid cache misses
f"You called `self.log({self.meta.name!r}, ...)` in your `{self.meta.fx}` but the value needs to"
f" be floating point. Converting it to {dtype}."
)
value = value.to(dtype)
if self.meta.on_step:
self._forward_cache = self.meta.sync(value.clone()) # `clone` because `sync` is in-place
# performance: no need to accumulate on values only logged on_step
if not self.meta.on_epoch:
self.value = self._forward_cache
return
# perform accumulation with reduction
if self.meta.is_mean_reduction:
# do not use `+=` as it doesn't do type promotion
self.value = self.value + value.mean() * batch_size
self.cumulated_batch_size = self.cumulated_batch_size + batch_size
elif self.meta.is_max_reduction or self.meta.is_min_reduction:
self.value = self.meta.reduce_fx(self.value, value.mean())
elif self.meta.is_sum_reduction:
self.value = self.value + value.mean()
else:
value = cast(Metric, value)
self.value = value
self._forward_cache = value._forward_cache
def compute(self) -> torch.Tensor:
if self.is_tensor:
value = self.meta.sync(self.value)
if self.meta.is_mean_reduction:
cumulated_batch_size = self.meta.sync(self.cumulated_batch_size)
return value / cumulated_batch_size
return value
return self.value.compute()
def reset(self) -> None:
if self.is_tensor:
super().reset()
else:
self.value.reset()
self.has_reset = True
def forward(self, value: _IN_METRIC, batch_size: int) -> None:
if self.meta.enable_graph:
with torch.no_grad():
self.update(value, batch_size)
else:
# performance: skip the `torch.no_grad` context manager by calling `update` directly
self.update(value, batch_size)
def _wrap_compute(self, compute: Any) -> Any:
# Override to avoid syncing - we handle it ourselves.
@wraps(compute)
def wrapped_func(*args: Any, **kwargs: Any) -> Optional[Any]:
if not self._update_called:
rank_zero_warn(
f"The ``compute`` method of metric {self.__class__.__name__}"
" was called before the ``update`` method which may lead to errors,"
" as metric states have not yet been updated.",
)
# return cached value
if self._computed is not None:
return self._computed
self._computed = compute(*args, **kwargs)
return self._computed
return wrapped_func
def __setattr__(self, key: str, value: Any) -> None:
# performance: skip the `torch.nn.Module.__setattr__` checks
object.__setattr__(self, key, value)
def __repr__(self) -> str:
state = f"{repr(self.meta.name)}, value={self.value}"
if self.is_tensor and self.meta.is_mean_reduction:
state += f", cumulated_batch_size={self.cumulated_batch_size}"
return f"{self.__class__.__name__}({state})"
def __getstate__(self, drop_value: bool = False) -> dict:
skip = ["update", "compute", "_update_signature", "_cache"]
if not self.is_tensor and drop_value:
# Avoid serializing ResultMetrics which are passed Metrics
skip.append("value")
d = {k: v for k, v in self.__dict__.items() if k not in skip}
d["meta"] = d["meta"].__getstate__()
d["_class"] = self.__class__.__name__
d["_is_synced"] = False # don't consider the state as synced on reload
return d
def __setstate__(self, state: dict, sync_fn: Optional[Callable] = None) -> None:
d = {**state, "meta": _Metadata._reconstruct(state["meta"], sync_fn=sync_fn)}
super().__setstate__(d)
@classmethod
def _reconstruct(cls, state: dict, sync_fn: Optional[Callable] = None) -> "_ResultMetric":
# need to reconstruct twice because `meta` is used in `__init__`
meta = _Metadata._reconstruct(state["meta"])
result_metric = cls(meta, state["is_tensor"])
result_metric.__setstate__(state, sync_fn=sync_fn)
return result_metric
def to(self, *args: Any, **kwargs: Any) -> "_ResultMetric":
self.__dict__.update(
apply_to_collection(self.__dict__, (torch.Tensor, Metric), move_data_to_device, *args, **kwargs)
)
return self
class _ResultMetricCollection(dict):
"""Dict wrapper for easy access to metadata.
All of the leaf items should be instances of
:class:`~pytorch_lightning.trainer.connectors.logger_connector.result._ResultMetric`
with the same metadata.
"""
@property
def meta(self) -> _Metadata:
return next(iter(self.values())).meta
@property
def has_tensor(self) -> bool:
return any(v.is_tensor for v in self.values())
def __getstate__(self, drop_value: bool = False) -> dict:
def getstate(item: _ResultMetric) -> dict:
return item.__getstate__(drop_value=drop_value)
items = apply_to_collection(dict(self), _ResultMetric, getstate)
return {"items": items, "meta": self.meta.__getstate__(), "_class": self.__class__.__name__}
def __setstate__(self, state: dict, sync_fn: Optional[Callable] = None) -> None:
# can't use `apply_to_collection` as it does not recurse items of the same type
items = {k: _ResultMetric._reconstruct(v, sync_fn=sync_fn) for k, v in state["items"].items()}
self.update(items)
@classmethod
def _reconstruct(cls, state: dict, sync_fn: Optional[Callable] = None) -> "_ResultMetricCollection":
rmc = cls()
rmc.__setstate__(state, sync_fn=sync_fn)
return rmc
_METRIC_COLLECTION = Union[_IN_METRIC, _ResultMetricCollection]
class _ResultCollection(dict):
"""
Collection (dictionary) of :class:`~pytorch_lightning.trainer.connectors.logger_connector.result._ResultMetric` or
:class:`~pytorch_lightning.trainer.connectors.logger_connector.result._ResultMetricCollection`
Example:
# `device` needs to be provided before logging
result = _ResultCollection(training=True, torch.device("cpu"))
# you can log to a specific collection.
# arguments: fx, key, value, metadata
result.log('training_step', 'acc', torch.tensor(...), on_step=True, on_epoch=True)
result.log('validation_step', 'recall', torch.tensor(...), on_step=True, on_epoch=True)
"""
DATALOADER_SUFFIX = "/dataloader_idx_{}"
def __init__(self, training: bool, device: Optional[Union[str, torch.device]] = None) -> None:
super().__init__()
self.training = training
self.device: Optional[Union[str, torch.device]] = device
self.batch: Optional[Any] = None
self.batch_size: Optional[int] = None
self.dataloader_idx: Optional[int] = None
@property
def result_metrics(self) -> List[_ResultMetric]:
o = []
def append_fn(v: _ResultMetric) -> None:
nonlocal o
o.append(v)
apply_to_collection(list(self.values()), _ResultMetric, append_fn)
return o
def _extract_batch_size(
self, value: Union[_ResultMetric, _ResultMetricCollection], batch_size: Optional[int], meta: _Metadata
) -> int:
# check if we have extracted the batch size already
if batch_size is None:
batch_size = self.batch_size
if batch_size is not None:
return batch_size
batch_size = 1
is_tensor = value.is_tensor if isinstance(value, _ResultMetric) else value.has_tensor
if self.batch is not None and is_tensor and meta.on_epoch and meta.is_mean_reduction:
batch_size = extract_batch_size(self.batch)
self.batch_size = batch_size
return batch_size
def log(
self,
fx: str,
name: str,
value: _METRIC_COLLECTION,
prog_bar: bool = False,
logger: bool = True,
on_step: bool = False,
on_epoch: bool = True,
reduce_fx: Callable = torch.mean,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_fn: Callable = _Sync.no_op,
sync_dist_group: Optional[Any] = None,
add_dataloader_idx: bool = True,
batch_size: Optional[int] = None,
metric_attribute: Optional[str] = None,
rank_zero_only: bool = False,
) -> None:
"""See :meth:`~pytorch_lightning.core.lightning.LightningModule.log`"""
# no metrics should be logged with graphs
if not enable_graph:
value = recursive_detach(value)
# move metrics to cpu on TPU.
if isinstance(value, torch.Tensor) and value.device.type == "xla":
value = value.cpu()
# storage key
key = f"{fx}.{name}"
# add dataloader_suffix to both key and fx
if add_dataloader_idx and self.dataloader_idx is not None:
key += f".{self.dataloader_idx}"
fx += f".{self.dataloader_idx}"
meta = _Metadata(
fx=fx,
name=name,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
add_dataloader_idx=add_dataloader_idx,
dataloader_idx=self.dataloader_idx,
metric_attribute=metric_attribute,
)
meta.sync = _Sync(_should=sync_dist, fn=sync_dist_fn, _group=sync_dist_group, rank_zero_only=rank_zero_only)
# register logged value if it doesn't exist
if key not in self:
self.register_key(key, meta, value)
# check the stored metadata and the current one match
elif meta != self[key].meta:
raise MisconfigurationException(
f"You called `self.log({name}, ...)` twice in `{fx}` with different arguments. This is not allowed"
)
batch_size = self._extract_batch_size(self[key], batch_size, meta)
self.update_metrics(key, value, batch_size)
def register_key(self, key: str, meta: _Metadata, value: _METRIC_COLLECTION) -> None:
"""Create one _ResultMetric object per value.
Value can be provided as a nested collection
"""
def fn(v: _IN_METRIC) -> _ResultMetric:
metric = _ResultMetric(meta, isinstance(v, torch.Tensor))
return metric.to(self.device)
value = apply_to_collection(value, (torch.Tensor, Metric), fn)
if isinstance(value, dict):
value = _ResultMetricCollection(value)
self[key] = value
def update_metrics(self, key: str, value: _METRIC_COLLECTION, batch_size: int) -> None:
def fn(result_metric: _ResultMetric, v: torch.Tensor) -> None:
# performance: avoid calling `__call__` to avoid the checks in `torch.nn.Module._call_impl`
result_metric.forward(v.to(self.device), batch_size)
result_metric.has_reset = False
apply_to_collections(self[key], value, _ResultMetric, fn)
@staticmethod
def _get_cache(result_metric: _ResultMetric, on_step: bool) -> Optional[torch.Tensor]:
cache = None
if on_step and result_metric.meta.on_step:
cache = result_metric._forward_cache
elif not on_step and result_metric.meta.on_epoch:
if result_metric._computed is None:
# always reduce on epoch end
should = result_metric.meta.sync.should
result_metric.meta.sync.should = True
result_metric.compute()
result_metric.meta.sync.should = should
cache = result_metric._computed
if cache is not None and not result_metric.meta.enable_graph:
return cache.detach()
return cache
def valid_items(self) -> Generator:
"""This function is used to iterate over current valid metrics."""
return (
(k, v)
for k, v in self.items()
if not (isinstance(v, _ResultMetric) and v.has_reset) and self.dataloader_idx == v.meta.dataloader_idx
)
def _forked_name(self, result_metric: _ResultMetric, on_step: bool) -> Tuple[str, str]:
name = result_metric.meta.name
forked_name = result_metric.meta.forked_name(on_step)
add_dataloader_idx = result_metric.meta.add_dataloader_idx
dl_idx = result_metric.meta.dataloader_idx
if add_dataloader_idx and dl_idx is not None:
dataloader_suffix = self.DATALOADER_SUFFIX.format(dl_idx)
name += dataloader_suffix
forked_name += dataloader_suffix
return name, forked_name
def metrics(self, on_step: bool) -> _METRICS:
metrics = _METRICS(callback={}, log={}, pbar={})
for _, result_metric in self.valid_items():
# extract forward_cache or computed from the _ResultMetric. ignore when the output is None
value = apply_to_collection(result_metric, _ResultMetric, self._get_cache, on_step, include_none=False)
# convert metric collection to dict container.
if isinstance(value, _ResultMetricCollection):
value = dict(value.items())
# check if the collection is empty
has_tensor = False
def any_tensor(_: Any) -> None:
nonlocal has_tensor
has_tensor = True
apply_to_collection(value, torch.Tensor, any_tensor)
if not has_tensor:
continue
name, forked_name = self._forked_name(result_metric, on_step)
# populate logging metrics
if result_metric.meta.logger:
metrics["log"][forked_name] = value
# populate callback metrics. callback metrics don't take `_step` forked metrics
if self.training or result_metric.meta.on_epoch and not on_step:
metrics["callback"][name] = value
metrics["callback"][forked_name] = value
# populate progress_bar metrics. convert tensors to numbers
if result_metric.meta.prog_bar:
metrics["pbar"][forked_name] = metrics_to_scalars(value)
return metrics
def reset(self, metrics: Optional[bool] = None, fx: Optional[str] = None) -> None:
"""Reset the result collection.
Args:
metrics: If True, only ``torchmetrics.Metric`` results are reset,
if False, only ``torch.Tensors`` are reset,
if ``None``, both are.
fx: Function to reset
"""
def fn(item: _ResultMetric) -> None:
requested_type = metrics is None or metrics ^ item.is_tensor
same_fx = fx is None or fx == item.meta.fx
if requested_type and same_fx:
item.reset()
apply_to_collection(self, _ResultMetric, fn)
def to(self, *args: Any, **kwargs: Any) -> "_ResultCollection":
"""Move all data to the given device."""
self.update(apply_to_collection(dict(self), (torch.Tensor, Metric), move_data_to_device, *args, **kwargs))
if "device" in kwargs:
self.device = kwargs["device"]
return self
def cpu(self) -> "_ResultCollection":
"""Move all data to CPU."""
return self.to(device="cpu")
def sync(self) -> None:
for result_metric in self.result_metrics:
if result_metric.is_tensor and not result_metric._is_synced:
result_metric.sync(should_sync=not result_metric.meta.sync.rank_zero_only)
def unsync(self) -> None:
for result_metric in self.result_metrics:
if result_metric.is_tensor and result_metric._is_synced:
result_metric.unsync()
def __str__(self) -> str:
# remove empty values
self_str = str({k: v for k, v in self.items() if v})
return f"{self.__class__.__name__}({self_str})"
def __repr__(self) -> str:
return f"{{{self.training}, {repr(self.device)}, {super().__repr__()}}}"
def __getstate__(self, drop_value: bool = True) -> dict:
d = self.__dict__.copy()
# all the items should be either `_ResultMetric`s or `_ResultMetricCollection`s
items = {k: v.__getstate__(drop_value=drop_value) for k, v in self.items()}
return {**d, "items": items}
def __setstate__(
self, state: dict, map_location: Optional[Union[str, torch.device]] = None, sync_fn: Optional[Callable] = None
) -> None:
self.__dict__.update({k: v for k, v in state.items() if k != "items"})
def setstate(k: str, item: dict) -> Union[_ResultMetric, _ResultMetricCollection]:
if not isinstance(item, dict):
raise ValueError(f"Unexpected value: {item}")
cls = item["_class"]
if cls == _ResultMetric.__name__:
cls = _ResultMetric
elif cls == _ResultMetricCollection.__name__:
cls = _ResultMetricCollection
else:
raise ValueError(f"Unexpected class name: {cls}")
_sync_fn = sync_fn or (self[k].meta.sync.fn if k in self else None)
return cls._reconstruct(item, sync_fn=_sync_fn)
items = {k: setstate(k, v) for k, v in state["items"].items()}
self.update(items)
device = map_location or self.device
self.to(device)
def state_dict(self, drop_value: bool = True) -> dict:
return self.__getstate__(drop_value)
def load_state_dict(
self,
state_dict: dict,
map_location: Optional[Union[str, torch.device]] = None,
sync_fn: Optional[Callable] = None,
metrics: Optional[Dict[str, Metric]] = None,
) -> None:
self.__setstate__(state_dict, map_location=map_location, sync_fn=sync_fn)
if not metrics:
return
# iterate through result metrics and re-attached Metric references on reload.
result_metrics = self.result_metrics
for metric_attribute, metric in metrics.items():
for result_metric in result_metrics:
if result_metric.meta.metric_attribute == metric_attribute:
result_metric.value = metric
| # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Generator
from dataclasses import asdict, dataclass, replace
from functools import partial, wraps
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union
import torch
from torchmetrics import Metric
from typing_extensions import TypedDict
from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin
from pytorch_lightning.utilities.apply_func import apply_to_collection, apply_to_collections, move_data_to_device
from pytorch_lightning.utilities.data import extract_batch_size
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.metrics import metrics_to_scalars
from pytorch_lightning.utilities.rank_zero import rank_zero_warn
from pytorch_lightning.utilities.warnings import WarningCache
_IN_METRIC = Union[Metric, torch.Tensor] # Do not include scalars as they were converted to tensors
_OUT_METRIC = Union[torch.Tensor, Dict[str, torch.Tensor]]
_PBAR_METRIC = Union[float, Dict[str, float]]
_OUT_DICT = Dict[str, _OUT_METRIC]
_PBAR_DICT = Dict[str, _PBAR_METRIC]
class _METRICS(TypedDict):
callback: _OUT_DICT
log: _OUT_DICT
pbar: _PBAR_DICT
warning_cache = WarningCache()
@dataclass
class _Sync:
fn: Optional[Callable] = None
_should: bool = False
rank_zero_only: bool = False
_op: Optional[str] = None
_group: Optional[Any] = None
def __post_init__(self) -> None:
self._generate_sync_fn()
@property
def should(self) -> bool:
return self._should
@should.setter
def should(self, should: bool) -> None:
self._should = should
# `self._fn` needs to be re-generated.
self._generate_sync_fn()
@property
def op(self) -> Optional[str]:
return self._op
@op.setter
def op(self, op: Optional[str]) -> None:
self._op = op
# `self._fn` needs to be re-generated.
self._generate_sync_fn()
@property
def group(self) -> Optional[Any]:
return self._group
@group.setter
def group(self, group: Optional[Any]) -> None:
self._group = group
# `self._fn` needs to be re-generated.
self._generate_sync_fn()
def _generate_sync_fn(self) -> None:
"""Used to compute the syncing function and cache it."""
fn = self.no_op if self.fn is None or not self.should or self.rank_zero_only else self.fn
# save the function as `_fn` as the meta are being re-created and the object references need to match.
# ignore typing, bad support for `partial`: mypy/issues/1484
self._fn: Callable = partial(fn, reduce_op=self.op, group=self.group) # type: ignore [arg-type]
@property
def __call__(self) -> Any:
return self._fn
@staticmethod
def no_op(value: Any, *_: Any, **__: Any) -> Any:
return value
@dataclass
class _Metadata:
fx: str
name: str
prog_bar: bool = False
logger: bool = True
on_step: bool = False
on_epoch: bool = True
reduce_fx: Callable = torch.mean
enable_graph: bool = False
add_dataloader_idx: bool = True
dataloader_idx: Optional[int] = None
metric_attribute: Optional[str] = None
_sync: Optional[_Sync] = None
def __post_init__(self) -> None:
if not self.on_step and not self.on_epoch:
raise MisconfigurationException("`self.log(on_step=False, on_epoch=False)` is not useful.")
self._parse_reduce_fx()
def _parse_reduce_fx(self) -> None:
error = (
"Only `self.log(..., reduce_fx={min,max,mean,sum})` are currently supported."
" Please, open an issue in `https://github.com/PyTorchLightning/pytorch-lightning/issues`."
f" Found: {self.reduce_fx}"
)
if isinstance(self.reduce_fx, str):
reduce_fx = self.reduce_fx.lower()
if reduce_fx == "avg":
reduce_fx = "mean"
if reduce_fx not in ("min", "max", "mean", "sum"):
raise MisconfigurationException(error)
self.reduce_fx = getattr(torch, reduce_fx)
elif self.is_custom_reduction:
raise MisconfigurationException(error)
@property
def sync(self) -> _Sync:
assert self._sync is not None
return self._sync
@sync.setter
def sync(self, sync: _Sync) -> None:
if sync.op is None:
sync.op = self.reduce_fx.__name__
self._sync = sync
@property
def forked(self) -> bool:
return self.on_step and self.on_epoch
def forked_name(self, on_step: bool) -> str:
if self.forked:
return f'{self.name}_{"step" if on_step else "epoch"}'
return self.name
@property
def is_mean_reduction(self) -> bool:
return self.reduce_fx is torch.mean
@property
def is_sum_reduction(self) -> bool:
return self.reduce_fx in (torch.sum, sum)
@property
def is_max_reduction(self) -> bool:
return self.reduce_fx in (torch.max, max)
@property
def is_min_reduction(self) -> bool:
return self.reduce_fx in (torch.min, min)
@property
def is_custom_reduction(self) -> bool:
return not (self.is_mean_reduction or self.is_max_reduction or self.is_min_reduction or self.is_sum_reduction)
def __getstate__(self) -> dict:
# drop the `sync.fn` to avoid potential pickle errors
# need to drop `fn` first otherwise `asdict` produces a `RecursionError`
copy = replace(self, _sync=replace(self.sync, fn=None))
d = asdict(copy)
# delete the `None` value so it does not override
del d["_sync"]["fn"]
return d
def __setstate__(self, state: dict, sync_fn: Optional[Callable] = None) -> None:
d = {**state, "_sync": _Sync(**state["_sync"], fn=sync_fn)}
self.__dict__.update(d)
@classmethod
def _reconstruct(cls, state: dict, sync_fn: Optional[Callable] = None) -> "_Metadata":
meta = cls(state["fx"], state["name"])
meta.__setstate__(state, sync_fn=sync_fn)
return meta
class _ResultMetric(Metric, DeviceDtypeModuleMixin):
"""Wraps the value provided to `:meth:`~pytorch_lightning.core.lightning.LightningModule.log`"""
def __init__(self, metadata: _Metadata, is_tensor: bool) -> None:
super().__init__()
self.is_tensor = is_tensor
self.meta = metadata
self.has_reset = False
if is_tensor:
if metadata.is_max_reduction:
default = float("-inf")
elif metadata.is_min_reduction:
default = float("inf")
else:
default = 0.0
# do not set a dtype in case the default dtype was changed
self.add_state("value", torch.tensor(default), dist_reduce_fx=torch.sum)
if self.meta.is_mean_reduction:
self.cumulated_batch_size: torch.Tensor
self.add_state("cumulated_batch_size", torch.tensor(0), dist_reduce_fx=torch.sum)
# this is defined here only because upstream is missing the type annotation
self._forward_cache: Optional[Any] = None
def update(self, value: _IN_METRIC, batch_size: int) -> None: # type: ignore[override]
if self.is_tensor:
value = cast(torch.Tensor, value)
if not torch.is_floating_point(value):
dtype = torch.get_default_dtype()
warning_cache.warn(
# do not include the value to avoid cache misses
f"You called `self.log({self.meta.name!r}, ...)` in your `{self.meta.fx}` but the value needs to"
f" be floating point. Converting it to {dtype}."
)
value = value.to(dtype)
if self.meta.on_step:
self._forward_cache = self.meta.sync(value.clone()) # `clone` because `sync` is in-place
# performance: no need to accumulate on values only logged on_step
if not self.meta.on_epoch:
self.value = self._forward_cache
return
# perform accumulation with reduction
if self.meta.is_mean_reduction:
# do not use `+=` as it doesn't do type promotion
self.value = self.value + value.mean() * batch_size
self.cumulated_batch_size = self.cumulated_batch_size + batch_size
elif self.meta.is_max_reduction or self.meta.is_min_reduction:
self.value = self.meta.reduce_fx(self.value, value.mean())
elif self.meta.is_sum_reduction:
self.value = self.value + value.mean()
else:
value = cast(Metric, value)
self.value = value
self._forward_cache = value._forward_cache
def compute(self) -> torch.Tensor:
if self.is_tensor:
value = self.meta.sync(self.value)
if self.meta.is_mean_reduction:
cumulated_batch_size = self.meta.sync(self.cumulated_batch_size)
return value / cumulated_batch_size
return value
return self.value.compute()
def reset(self) -> None:
if self.is_tensor:
super().reset()
else:
self.value.reset()
self.has_reset = True
def forward(self, value: _IN_METRIC, batch_size: int) -> None:
if self.meta.enable_graph:
with torch.no_grad():
self.update(value, batch_size)
else:
# performance: skip the `torch.no_grad` context manager by calling `update` directly
self.update(value, batch_size)
def _wrap_compute(self, compute: Any) -> Any:
# Override to avoid syncing - we handle it ourselves.
@wraps(compute)
def wrapped_func(*args: Any, **kwargs: Any) -> Optional[Any]:
if not self._update_called:
rank_zero_warn(
f"The ``compute`` method of metric {self.__class__.__name__}"
" was called before the ``update`` method which may lead to errors,"
" as metric states have not yet been updated.",
)
# return cached value
if self._computed is not None:
return self._computed
self._computed = compute(*args, **kwargs)
return self._computed
return wrapped_func
def __setattr__(self, key: str, value: Any) -> None:
# performance: skip the `torch.nn.Module.__setattr__` checks
object.__setattr__(self, key, value)
def __repr__(self) -> str:
state = f"{repr(self.meta.name)}, value={self.value}"
if self.is_tensor and self.meta.is_mean_reduction:
state += f", cumulated_batch_size={self.cumulated_batch_size}"
return f"{self.__class__.__name__}({state})"
def __getstate__(self, drop_value: bool = False) -> dict:
skip = ["update", "compute", "_update_signature", "_cache"]
if not self.is_tensor and drop_value:
# Avoid serializing ResultMetrics which are passed Metrics
skip.append("value")
d = {k: v for k, v in self.__dict__.items() if k not in skip}
d["meta"] = d["meta"].__getstate__()
d["_class"] = self.__class__.__name__
d["_is_synced"] = False # don't consider the state as synced on reload
return d
def __setstate__(self, state: dict, sync_fn: Optional[Callable] = None) -> None:
d = {**state, "meta": _Metadata._reconstruct(state["meta"], sync_fn=sync_fn)}
super().__setstate__(d)
@classmethod
def _reconstruct(cls, state: dict, sync_fn: Optional[Callable] = None) -> "_ResultMetric":
# need to reconstruct twice because `meta` is used in `__init__`
meta = _Metadata._reconstruct(state["meta"])
result_metric = cls(meta, state["is_tensor"])
result_metric.__setstate__(state, sync_fn=sync_fn)
return result_metric
def to(self, *args: Any, **kwargs: Any) -> "_ResultMetric":
self.__dict__.update(
apply_to_collection(self.__dict__, (torch.Tensor, Metric), move_data_to_device, *args, **kwargs)
)
return self
class _ResultMetricCollection(dict):
"""Dict wrapper for easy access to metadata.
All of the leaf items should be instances of
:class:`~pytorch_lightning.trainer.connectors.logger_connector.result._ResultMetric`
with the same metadata.
"""
@property
def meta(self) -> _Metadata:
return next(iter(self.values())).meta
@property
def has_tensor(self) -> bool:
return any(v.is_tensor for v in self.values())
def __getstate__(self, drop_value: bool = False) -> dict:
def getstate(item: _ResultMetric) -> dict:
return item.__getstate__(drop_value=drop_value)
items = apply_to_collection(dict(self), _ResultMetric, getstate)
return {"items": items, "meta": self.meta.__getstate__(), "_class": self.__class__.__name__}
def __setstate__(self, state: dict, sync_fn: Optional[Callable] = None) -> None:
# can't use `apply_to_collection` as it does not recurse items of the same type
items = {k: _ResultMetric._reconstruct(v, sync_fn=sync_fn) for k, v in state["items"].items()}
self.update(items)
@classmethod
def _reconstruct(cls, state: dict, sync_fn: Optional[Callable] = None) -> "_ResultMetricCollection":
rmc = cls()
rmc.__setstate__(state, sync_fn=sync_fn)
return rmc
_METRIC_COLLECTION = Union[_IN_METRIC, _ResultMetricCollection]
class _ResultCollection(dict):
"""
Collection (dictionary) of :class:`~pytorch_lightning.trainer.connectors.logger_connector.result._ResultMetric` or
:class:`~pytorch_lightning.trainer.connectors.logger_connector.result._ResultMetricCollection`
Example:
# `device` needs to be provided before logging
result = _ResultCollection(training=True, torch.device("cpu"))
# you can log to a specific collection.
# arguments: fx, key, value, metadata
result.log('training_step', 'acc', torch.tensor(...), on_step=True, on_epoch=True)
result.log('validation_step', 'recall', torch.tensor(...), on_step=True, on_epoch=True)
"""
DATALOADER_SUFFIX = "/dataloader_idx_{}"
def __init__(self, training: bool, device: Optional[Union[str, torch.device]] = None) -> None:
super().__init__()
self.training = training
self.device: Optional[Union[str, torch.device]] = device
self.batch: Optional[Any] = None
self.batch_size: Optional[int] = None
self.dataloader_idx: Optional[int] = None
@property
def result_metrics(self) -> List[_ResultMetric]:
o = []
def append_fn(v: _ResultMetric) -> None:
nonlocal o
o.append(v)
apply_to_collection(list(self.values()), _ResultMetric, append_fn)
return o
def _extract_batch_size(
self, value: Union[_ResultMetric, _ResultMetricCollection], batch_size: Optional[int], meta: _Metadata
) -> int:
# check if we have extracted the batch size already
if batch_size is None:
batch_size = self.batch_size
if batch_size is not None:
return batch_size
batch_size = 1
is_tensor = value.is_tensor if isinstance(value, _ResultMetric) else value.has_tensor
if self.batch is not None and is_tensor and meta.on_epoch and meta.is_mean_reduction:
batch_size = extract_batch_size(self.batch)
self.batch_size = batch_size
return batch_size
def log(
self,
fx: str,
name: str,
value: _METRIC_COLLECTION,
prog_bar: bool = False,
logger: bool = True,
on_step: bool = False,
on_epoch: bool = True,
reduce_fx: Callable = torch.mean,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_fn: Callable = _Sync.no_op,
sync_dist_group: Optional[Any] = None,
add_dataloader_idx: bool = True,
batch_size: Optional[int] = None,
metric_attribute: Optional[str] = None,
rank_zero_only: bool = False,
) -> None:
"""See :meth:`~pytorch_lightning.core.lightning.LightningModule.log`"""
# no metrics should be logged with graphs
if not enable_graph:
value = recursive_detach(value)
# move metrics to cpu on TPU.
if isinstance(value, torch.Tensor) and value.device.type == "xla":
value = value.cpu()
# storage key
key = f"{fx}.{name}"
# add dataloader_suffix to both key and fx
if add_dataloader_idx and self.dataloader_idx is not None:
key += f".{self.dataloader_idx}"
fx += f".{self.dataloader_idx}"
meta = _Metadata(
fx=fx,
name=name,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
add_dataloader_idx=add_dataloader_idx,
dataloader_idx=self.dataloader_idx,
metric_attribute=metric_attribute,
)
meta.sync = _Sync(_should=sync_dist, fn=sync_dist_fn, _group=sync_dist_group, rank_zero_only=rank_zero_only)
# register logged value if it doesn't exist
if key not in self:
self.register_key(key, meta, value)
# check the stored metadata and the current one match
elif meta != self[key].meta:
raise MisconfigurationException(
f"You called `self.log({name}, ...)` twice in `{fx}` with different arguments. This is not allowed"
)
batch_size = self._extract_batch_size(self[key], batch_size, meta)
self.update_metrics(key, value, batch_size)
def register_key(self, key: str, meta: _Metadata, value: _METRIC_COLLECTION) -> None:
"""Create one _ResultMetric object per value.
Value can be provided as a nested collection
"""
def fn(v: _IN_METRIC) -> _ResultMetric:
metric = _ResultMetric(meta, isinstance(v, torch.Tensor))
return metric.to(self.device)
value = apply_to_collection(value, (torch.Tensor, Metric), fn)
if isinstance(value, dict):
value = _ResultMetricCollection(value)
self[key] = value
def update_metrics(self, key: str, value: _METRIC_COLLECTION, batch_size: int) -> None:
def fn(result_metric: _ResultMetric, v: torch.Tensor) -> None:
# performance: avoid calling `__call__` to avoid the checks in `torch.nn.Module._call_impl`
result_metric.forward(v.to(self.device), batch_size)
result_metric.has_reset = False
apply_to_collections(self[key], value, _ResultMetric, fn)
@staticmethod
def _get_cache(result_metric: _ResultMetric, on_step: bool) -> Optional[torch.Tensor]:
cache = None
if on_step and result_metric.meta.on_step:
cache = result_metric._forward_cache
elif not on_step and result_metric.meta.on_epoch:
if result_metric._computed is None:
# always reduce on epoch end
should = result_metric.meta.sync.should
result_metric.meta.sync.should = True
result_metric.compute()
result_metric.meta.sync.should = should
cache = result_metric._computed
if cache is not None and not result_metric.meta.enable_graph:
return cache.detach()
return cache
def valid_items(self) -> Generator:
"""This function is used to iterate over current valid metrics."""
return (
(k, v)
for k, v in self.items()
if not (isinstance(v, _ResultMetric) and v.has_reset) and self.dataloader_idx == v.meta.dataloader_idx
)
def _forked_name(self, result_metric: _ResultMetric, on_step: bool) -> Tuple[str, str]:
name = result_metric.meta.name
forked_name = result_metric.meta.forked_name(on_step)
add_dataloader_idx = result_metric.meta.add_dataloader_idx
dl_idx = result_metric.meta.dataloader_idx
if add_dataloader_idx and dl_idx is not None:
dataloader_suffix = self.DATALOADER_SUFFIX.format(dl_idx)
name += dataloader_suffix
forked_name += dataloader_suffix
return name, forked_name
def metrics(self, on_step: bool) -> _METRICS:
metrics = _METRICS(callback={}, log={}, pbar={})
for _, result_metric in self.valid_items():
# extract forward_cache or computed from the _ResultMetric. ignore when the output is None
value = apply_to_collection(result_metric, _ResultMetric, self._get_cache, on_step, include_none=False)
# convert metric collection to dict container.
if isinstance(value, _ResultMetricCollection):
value = dict(value.items())
# check if the collection is empty
has_tensor = False
def any_tensor(_: Any) -> None:
nonlocal has_tensor
has_tensor = True
apply_to_collection(value, torch.Tensor, any_tensor)
if not has_tensor:
continue
name, forked_name = self._forked_name(result_metric, on_step)
# populate logging metrics
if result_metric.meta.logger:
metrics["log"][forked_name] = value
# populate callback metrics. callback metrics don't take `_step` forked metrics
if self.training or result_metric.meta.on_epoch and not on_step:
metrics["callback"][name] = value
metrics["callback"][forked_name] = value
# populate progress_bar metrics. convert tensors to numbers
if result_metric.meta.prog_bar:
metrics["pbar"][forked_name] = metrics_to_scalars(value)
return metrics
def reset(self, metrics: Optional[bool] = None, fx: Optional[str] = None) -> None:
"""Reset the result collection.
Args:
metrics: If True, only ``torchmetrics.Metric`` results are reset,
if False, only ``torch.Tensors`` are reset,
if ``None``, both are.
fx: Function to reset
"""
def fn(item: _ResultMetric) -> None:
requested_type = metrics is None or metrics ^ item.is_tensor
same_fx = fx is None or fx == item.meta.fx
if requested_type and same_fx:
item.reset()
apply_to_collection(self, _ResultMetric, fn)
def to(self, *args: Any, **kwargs: Any) -> "_ResultCollection":
"""Move all data to the given device."""
self.update(apply_to_collection(dict(self), (torch.Tensor, Metric), move_data_to_device, *args, **kwargs))
if "device" in kwargs:
self.device = kwargs["device"]
return self
def cpu(self) -> "_ResultCollection":
"""Move all data to CPU."""
return self.to(device="cpu")
def sync(self) -> None:
for result_metric in self.result_metrics:
if result_metric.is_tensor and not result_metric._is_synced:
result_metric.sync(should_sync=not result_metric.meta.sync.rank_zero_only)
def unsync(self) -> None:
for result_metric in self.result_metrics:
if result_metric.is_tensor and result_metric._is_synced:
result_metric.unsync()
def __str__(self) -> str:
# remove empty values
self_str = str({k: v for k, v in self.items() if v})
return f"{self.__class__.__name__}({self_str})"
def __repr__(self) -> str:
return f"{{{self.training}, {repr(self.device)}, {super().__repr__()}}}"
def __getstate__(self, drop_value: bool = True) -> dict:
d = self.__dict__.copy()
# all the items should be either `_ResultMetric`s or `_ResultMetricCollection`s
items = {k: v.__getstate__(drop_value=drop_value) for k, v in self.items()}
return {**d, "items": items}
def __setstate__(
self, state: dict, map_location: Optional[Union[str, torch.device]] = None, sync_fn: Optional[Callable] = None
) -> None:
self.__dict__.update({k: v for k, v in state.items() if k != "items"})
def setstate(k: str, item: dict) -> Union[_ResultMetric, _ResultMetricCollection]:
if not isinstance(item, dict):
raise ValueError(f"Unexpected value: {item}")
cls = item["_class"]
if cls == _ResultMetric.__name__:
cls = _ResultMetric
elif cls == _ResultMetricCollection.__name__:
cls = _ResultMetricCollection
else:
raise ValueError(f"Unexpected class name: {cls}")
_sync_fn = sync_fn or (self[k].meta.sync.fn if k in self else None)
return cls._reconstruct(item, sync_fn=_sync_fn)
items = {k: setstate(k, v) for k, v in state["items"].items()}
self.update(items)
device = map_location or self.device
self.to(device)
def state_dict(self, drop_value: bool = True) -> dict:
return self.__getstate__(drop_value)
def load_state_dict(
self,
state_dict: dict,
map_location: Optional[Union[str, torch.device]] = None,
sync_fn: Optional[Callable] = None,
metrics: Optional[Dict[str, Metric]] = None,
) -> None:
self.__setstate__(state_dict, map_location=map_location, sync_fn=sync_fn)
if not metrics:
return
# iterate through result metrics and re-attached Metric references on reload.
result_metrics = self.result_metrics
for metric_attribute, metric in metrics.items():
for result_metric in result_metrics:
if result_metric.meta.metric_attribute == metric_attribute:
result_metric.value = metric
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
from typing import Any
from typing import Dict
from typing import Text
import paddle
from paddle.optimizer import Optimizer
from paddle.regularizer import L2Decay
from paddlespeech.s2t.training.gradclip import ClipGradByGlobalNormWithLog
from paddlespeech.s2t.utils.dynamic_import import dynamic_import
from paddlespeech.s2t.utils.dynamic_import import instance_class
from paddlespeech.s2t.utils.log import Log
__all__ = ["OptimizerFactory"]
logger = Log(__name__).getlog()
OPTIMIZER_DICT = {
"sgd": "paddle.optimizer:SGD",
"momentum": "paddle.optimizer:Momentum",
"adadelta": "paddle.optimizer:Adadelta",
"adam": "paddle.optimizer:Adam",
"adamw": "paddle.optimizer:AdamW",
}
def register_optimizer(cls):
"""Register optimizer."""
alias = cls.__name__.lower()
OPTIMIZER_DICT[cls.__name__.lower()] = cls.__module__ + ":" + cls.__name__
return cls
@register_optimizer
class Noam(paddle.optimizer.Adam):
"""Seem to: espnet/nets/pytorch_backend/transformer/optimizer.py """
def __init__(self,
learning_rate=0,
beta1=0.9,
beta2=0.98,
epsilon=1e-9,
parameters=None,
weight_decay=None,
grad_clip=None,
lazy_mode=False,
multi_precision=False,
name=None):
super().__init__(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
parameters=parameters,
weight_decay=weight_decay,
grad_clip=grad_clip,
lazy_mode=lazy_mode,
multi_precision=multi_precision,
name=name)
def __repr__(self):
echo = f"<{self.__class__.__module__}.{self.__class__.__name__} object at {hex(id(self))}> "
echo += f"learning_rate: {self._learning_rate}, "
echo += f"(beta1: {self._beta1} beta2: {self._beta2}), "
echo += f"epsilon: {self._epsilon}"
def dynamic_import_optimizer(module):
"""Import Optimizer class dynamically.
Args:
module (str): module_name:class_name or alias in `OPTIMIZER_DICT`
Returns:
type: Optimizer class
"""
module_class = dynamic_import(module, OPTIMIZER_DICT)
assert issubclass(module_class,
Optimizer), f"{module} does not implement Optimizer"
return module_class
class OptimizerFactory():
@classmethod
def from_args(cls, name: str, args: Dict[Text, Any]):
assert "parameters" in args, "parameters not in args."
assert "learning_rate" in args, "learning_rate not in args."
grad_clip = ClipGradByGlobalNormWithLog(
args['grad_clip']) if "grad_clip" in args else None
weight_decay = L2Decay(
args['weight_decay']) if "weight_decay" in args else None
if weight_decay:
logger.info(f'<WeightDecay - {weight_decay}>')
if grad_clip:
logger.info(f'<GradClip - {grad_clip}>')
module_class = dynamic_import_optimizer(name.lower())
args.update({"grad_clip": grad_clip, "weight_decay": weight_decay})
opt = instance_class(module_class, args)
if "__repr__" in vars(opt):
logger.info(f"{opt}")
else:
logger.info(
f"<Optimizer {module_class.__module__}.{module_class.__name__}> LR: {args["learning_rate"]}"
)
return opt
| # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
from typing import Any
from typing import Dict
from typing import Text
import paddle
from paddle.optimizer import Optimizer
from paddle.regularizer import L2Decay
from paddlespeech.s2t.training.gradclip import ClipGradByGlobalNormWithLog
from paddlespeech.s2t.utils.dynamic_import import dynamic_import
from paddlespeech.s2t.utils.dynamic_import import instance_class
from paddlespeech.s2t.utils.log import Log
__all__ = ["OptimizerFactory"]
logger = Log(__name__).getlog()
OPTIMIZER_DICT = {
"sgd": "paddle.optimizer:SGD",
"momentum": "paddle.optimizer:Momentum",
"adadelta": "paddle.optimizer:Adadelta",
"adam": "paddle.optimizer:Adam",
"adamw": "paddle.optimizer:AdamW",
}
def register_optimizer(cls):
"""Register optimizer."""
alias = cls.__name__.lower()
OPTIMIZER_DICT[cls.__name__.lower()] = cls.__module__ + ":" + cls.__name__
return cls
@register_optimizer
class Noam(paddle.optimizer.Adam):
"""Seem to: espnet/nets/pytorch_backend/transformer/optimizer.py """
def __init__(self,
learning_rate=0,
beta1=0.9,
beta2=0.98,
epsilon=1e-9,
parameters=None,
weight_decay=None,
grad_clip=None,
lazy_mode=False,
multi_precision=False,
name=None):
super().__init__(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
parameters=parameters,
weight_decay=weight_decay,
grad_clip=grad_clip,
lazy_mode=lazy_mode,
multi_precision=multi_precision,
name=name)
def __repr__(self):
echo = f"<{self.__class__.__module__}.{self.__class__.__name__} object at {hex(id(self))}> "
echo += f"learning_rate: {self._learning_rate}, "
echo += f"(beta1: {self._beta1} beta2: {self._beta2}), "
echo += f"epsilon: {self._epsilon}"
def dynamic_import_optimizer(module):
"""Import Optimizer class dynamically.
Args:
module (str): module_name:class_name or alias in `OPTIMIZER_DICT`
Returns:
type: Optimizer class
"""
module_class = dynamic_import(module, OPTIMIZER_DICT)
assert issubclass(module_class,
Optimizer), f"{module} does not implement Optimizer"
return module_class
class OptimizerFactory():
@classmethod
def from_args(cls, name: str, args: Dict[Text, Any]):
assert "parameters" in args, "parameters not in args."
assert "learning_rate" in args, "learning_rate not in args."
grad_clip = ClipGradByGlobalNormWithLog(
args['grad_clip']) if "grad_clip" in args else None
weight_decay = L2Decay(
args['weight_decay']) if "weight_decay" in args else None
if weight_decay:
logger.info(f'<WeightDecay - {weight_decay}>')
if grad_clip:
logger.info(f'<GradClip - {grad_clip}>')
module_class = dynamic_import_optimizer(name.lower())
args.update({"grad_clip": grad_clip, "weight_decay": weight_decay})
opt = instance_class(module_class, args)
if "__repr__" in vars(opt):
logger.info(f"{opt}")
else:
logger.info(
f"<Optimizer {module_class.__module__}.{module_class.__name__}> LR: {args['learning_rate']}"
)
return opt
|
"""JSON-rpc proxy model for BIG-Bench."""
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional, Any, Dict, Union, List
import urllib.parse
import uuid
import bigbench.api.model as model
import requests
import requests_unixsocket
import time
_RPC_VERSION = '2.0'
_INET_URL_PREFIX = 'http://'
_UNIX_URL_PREFIX = 'http+unix://'
_MAX_RETRIES = 8
_RETRY_DELAY_SEC = 10
class JsonRpcModel(model.Model):
"""A BIGBench api-compatible json-rpc interface.
Attributes:
model_name: name of model
url: json rpc url
"""
def __init__(
self,
model_name: str,
host: Optional[str] = None,
port: Optional[int] = None,
socket_path: Optional[str] = None,
):
"""Initializes instance.
Either host+port or socket_path must be specified.
Args:
model_name: name of model
host: hostname for inet connections
port: port number for inet connections
socket_path: path to socket for unix-domain socket connections
Raises:
ValueError if host+port or socket_path is not set
"""
if socket_path is None:
if host is None or port is None:
raise ValueError('either host+port or socket_path must be specified')
self.url = f'{_INET_URL_PREFIX}{host}:{port}/jsonrpc'
else:
abs_socket_path = os.path.abspath(os.path.expanduser(socket_path))
encoded_path = urllib.parse.quote_plus(abs_socket_path)
self.url = f'{_UNIX_URL_PREFIX}{encoded_path}/jsonrpc'
self.model_name = model_name
def _post(self, payload: Dict[str, Any]) -> Optional[Any]:
"""Posts request to server.
Args:
payload: dictionary containing rpc params
Returns:
result from rpc call
Raises:
RuntimeError: An error from the server.
HTTPError: An error communicating with server.
"""
for i in range(_MAX_RETRIES):
if self.url.startswith(_UNIX_URL_PREFIX):
with requests_unixsocket.monkeypatch():
response = requests.post(self.url, json=payload)
else:
response = requests.post(self.url, json=payload)
if response.status_code != requests.codes.ok:
#response.raise_for_status()
print(f'error from server: {response}')
time.sleep(_RETRY_DELAY_SEC)
continue
json_response = response.json()
if 'result' not in json_response:
#raise RuntimeError(f'response from server: {json_response}')
print(f'error: bad response from server: {json_response}')
time.sleep(_RETRY_DELAY_SEC)
continue
#print(f'response:\n{json_response['result']}')
return json_response['result']
raise RuntimeError(f'error: retry count exceeded')
def _payload(self, method: str, params: Dict[str, Any]) -> Dict[str, Any]:
"""Generates payload dictionary for rpc call.
Args:
method: rpc method name
params: parameter dictonary for rpc call
Returns:
json-serializable payload dictionary
"""
return {
'method': method,
'params': {**params, 'model_name': self.model_name},
'jsonrpc': _RPC_VERSION,
'id': uuid.uuid1().int,
}
def generate_text(
self,
inputs: Union[str, List[str]],
max_length: Optional[int] = None,
stop_string: Optional[str] = None,
output_regex: Optional[str] = None,
) -> Union[str, List[str]]:
"""Generates and returns outputs from language model.
Args:
inputs: String or list of input strings.
max_length: Maximum output length, if None, limited only by model max
output length
stop_string: If specified, model output will be truncated to the shortest
string which includes stop_string.
output_regex: If specified, the first match to the python regular
expression output_regex in the model output will be returned. If there
is no match, an empty string will be returned.
Returns:
list of lists (of length=num_outputs) with generated responses
Raises:
RuntimeError: An error from the server.
HTTPError: An error communicating with the server.
"""
return self._post(
self._payload(
method='generate_text',
params={
'inputs': inputs,
'max_length': max_length,
'stop_string': stop_string,
'output_regex': output_regex,
}))
def cond_log_prob(
self,
inputs: Union[str, List[str]],
targets: Union[List[str], List[List[str]]],
absolute_normalization: Optional[bool] = False,
) -> Union[List[float], List[List[float]]]:
"""Computes conditional log probabilities of targets given inputs.
Args:
inputs: A single string input or a list of string inputs.
targets: Possible string outputs for each input. If input is a string,
this is a list `[t_1, t_2, ..., t_n]` of possible string outputs. If
input is a list of strings, then this is a nested list `[[t_1, t_2, ...,
t_n], ...]` with length equal to `len(inputs)`.
absolute_normalization: When True, the function returns the log
probability of unconstrained generation or the target sequence. When
False (default), log probabilities are normalized so that the
probabilities of generating `targets` sum to 1. Note that setting
`absolute_normalization` to True restricts the class of models that can
be evaluated to those that can assign absolute probabilities to
sequences.
Returns:
If a single string input is provided, returns a list of
log-probabilities `[lp_1, lp_2, ..., lp_n]` predicted by the model,
where `lp_i = log(prob(t_i | input)` is the conditional log-prob
to generate target `t_i` given input. If a list of string inputs
was provided, returns a list of such elements of the form
`[[lp_1, lp_2, ..., lp_n], ...]`, where each element contains the
log-probabilities for the corresponding input and targets.
In this case, the length of the returned list is `len(input)`.
If conditional probabilities are not supported by the model, the
model returns None.
Raises:
RuntimeError: An error from the server.
HTTPError: An error communicating with the server.
"""
return self._post(
self._payload(
method='cond_log_prob',
params={
'inputs': inputs,
'targets': targets,
'absolute_normalization': absolute_normalization,
}))
def shutdown_server(self) -> None:
"""Shuts down remote proxy server gracefully."""
self._post(self._payload(method='shutdown', params={}))
| """JSON-rpc proxy model for BIG-Bench."""
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional, Any, Dict, Union, List
import urllib.parse
import uuid
import bigbench.api.model as model
import requests
import requests_unixsocket
import time
_RPC_VERSION = '2.0'
_INET_URL_PREFIX = 'http://'
_UNIX_URL_PREFIX = 'http+unix://'
_MAX_RETRIES = 8
_RETRY_DELAY_SEC = 10
class JsonRpcModel(model.Model):
"""A BIGBench api-compatible json-rpc interface.
Attributes:
model_name: name of model
url: json rpc url
"""
def __init__(
self,
model_name: str,
host: Optional[str] = None,
port: Optional[int] = None,
socket_path: Optional[str] = None,
):
"""Initializes instance.
Either host+port or socket_path must be specified.
Args:
model_name: name of model
host: hostname for inet connections
port: port number for inet connections
socket_path: path to socket for unix-domain socket connections
Raises:
ValueError if host+port or socket_path is not set
"""
if socket_path is None:
if host is None or port is None:
raise ValueError('either host+port or socket_path must be specified')
self.url = f'{_INET_URL_PREFIX}{host}:{port}/jsonrpc'
else:
abs_socket_path = os.path.abspath(os.path.expanduser(socket_path))
encoded_path = urllib.parse.quote_plus(abs_socket_path)
self.url = f'{_UNIX_URL_PREFIX}{encoded_path}/jsonrpc'
self.model_name = model_name
def _post(self, payload: Dict[str, Any]) -> Optional[Any]:
"""Posts request to server.
Args:
payload: dictionary containing rpc params
Returns:
result from rpc call
Raises:
RuntimeError: An error from the server.
HTTPError: An error communicating with server.
"""
for i in range(_MAX_RETRIES):
if self.url.startswith(_UNIX_URL_PREFIX):
with requests_unixsocket.monkeypatch():
response = requests.post(self.url, json=payload)
else:
response = requests.post(self.url, json=payload)
if response.status_code != requests.codes.ok:
#response.raise_for_status()
print(f'error from server: {response}')
time.sleep(_RETRY_DELAY_SEC)
continue
json_response = response.json()
if 'result' not in json_response:
#raise RuntimeError(f'response from server: {json_response}')
print(f'error: bad response from server: {json_response}')
time.sleep(_RETRY_DELAY_SEC)
continue
#print(f'response:\n{json_response["result"]}')
return json_response['result']
raise RuntimeError(f'error: retry count exceeded')
def _payload(self, method: str, params: Dict[str, Any]) -> Dict[str, Any]:
"""Generates payload dictionary for rpc call.
Args:
method: rpc method name
params: parameter dictonary for rpc call
Returns:
json-serializable payload dictionary
"""
return {
'method': method,
'params': {**params, 'model_name': self.model_name},
'jsonrpc': _RPC_VERSION,
'id': uuid.uuid1().int,
}
def generate_text(
self,
inputs: Union[str, List[str]],
max_length: Optional[int] = None,
stop_string: Optional[str] = None,
output_regex: Optional[str] = None,
) -> Union[str, List[str]]:
"""Generates and returns outputs from language model.
Args:
inputs: String or list of input strings.
max_length: Maximum output length, if None, limited only by model max
output length
stop_string: If specified, model output will be truncated to the shortest
string which includes stop_string.
output_regex: If specified, the first match to the python regular
expression output_regex in the model output will be returned. If there
is no match, an empty string will be returned.
Returns:
list of lists (of length=num_outputs) with generated responses
Raises:
RuntimeError: An error from the server.
HTTPError: An error communicating with the server.
"""
return self._post(
self._payload(
method='generate_text',
params={
'inputs': inputs,
'max_length': max_length,
'stop_string': stop_string,
'output_regex': output_regex,
}))
def cond_log_prob(
self,
inputs: Union[str, List[str]],
targets: Union[List[str], List[List[str]]],
absolute_normalization: Optional[bool] = False,
) -> Union[List[float], List[List[float]]]:
"""Computes conditional log probabilities of targets given inputs.
Args:
inputs: A single string input or a list of string inputs.
targets: Possible string outputs for each input. If input is a string,
this is a list `[t_1, t_2, ..., t_n]` of possible string outputs. If
input is a list of strings, then this is a nested list `[[t_1, t_2, ...,
t_n], ...]` with length equal to `len(inputs)`.
absolute_normalization: When True, the function returns the log
probability of unconstrained generation or the target sequence. When
False (default), log probabilities are normalized so that the
probabilities of generating `targets` sum to 1. Note that setting
`absolute_normalization` to True restricts the class of models that can
be evaluated to those that can assign absolute probabilities to
sequences.
Returns:
If a single string input is provided, returns a list of
log-probabilities `[lp_1, lp_2, ..., lp_n]` predicted by the model,
where `lp_i = log(prob(t_i | input)` is the conditional log-prob
to generate target `t_i` given input. If a list of string inputs
was provided, returns a list of such elements of the form
`[[lp_1, lp_2, ..., lp_n], ...]`, where each element contains the
log-probabilities for the corresponding input and targets.
In this case, the length of the returned list is `len(input)`.
If conditional probabilities are not supported by the model, the
model returns None.
Raises:
RuntimeError: An error from the server.
HTTPError: An error communicating with the server.
"""
return self._post(
self._payload(
method='cond_log_prob',
params={
'inputs': inputs,
'targets': targets,
'absolute_normalization': absolute_normalization,
}))
def shutdown_server(self) -> None:
"""Shuts down remote proxy server gracefully."""
self._post(self._payload(method='shutdown', params={}))
|
import time
import fastapi
import uvicorn
import logging
import os
from process_doc import process_image
from models.document_model import DocumentModel
from pdf2jpg import pdf2jpg
from datetime import datetime
logs_folder_path = os.path.join(os.getcwd(), 'logs')
os.makedirs(logs_folder_path, exist_ok=True)
log_path = os.path.join(f'{logs_folder_path}', f'{datetime.date(datetime.now())}.log')
logging.basicConfig(format=u'%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=log_path,
filemode='a',
datefmt='%d-%b-%y %H:%M:%S')
app = fastapi.FastAPI()
@app.get('/')
def index():
return {
'message': "Hello World!"
}
@app.get('/process_doc/{filename}', response_model=DocumentModel)
def process_doc(filename: str):
file_extension = filename.split('.')[-1]
if file_extension == "pdf":
return process_pdf(filename)
elif file_extension == "jpg" \
or file_extension == "jpeg" \
or file_extension == "png":
return process_img(filename)
def process_pdf(filename: str):
pages_paths = pdf2jpg(filename)
pages = []
for page_path in pages_paths:
pages.append(process_image(page_path))
result = {
"document_path": filename,
"temporal_files_dir": f"/data/converted/{filename.split(".")[-2]}",
"num_pages": len(pages),
"pages": pages
}
return DocumentModel(**result).dict()
def process_img(filename: str):
try:
filepath = '/data/' + filename
for i in range(10):
if os.path.exists(filepath):
break
time.sleep(1)
result = {
"document_path": filename,
"temporal_files_dir": "",
"num_pages": 1,
"pages": [process_image("/data/" + filename)]
}
return DocumentModel(**result).dict()
except Exception as e:
logging.error(f'Error occured: {e}', exc_info=True)
return {
'message': 'Error occured, see logs for details.'
}
if __name__ == '__main__':
uvicorn.run(app)
| import time
import fastapi
import uvicorn
import logging
import os
from process_doc import process_image
from models.document_model import DocumentModel
from pdf2jpg import pdf2jpg
from datetime import datetime
logs_folder_path = os.path.join(os.getcwd(), 'logs')
os.makedirs(logs_folder_path, exist_ok=True)
log_path = os.path.join(f'{logs_folder_path}', f'{datetime.date(datetime.now())}.log')
logging.basicConfig(format=u'%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=log_path,
filemode='a',
datefmt='%d-%b-%y %H:%M:%S')
app = fastapi.FastAPI()
@app.get('/')
def index():
return {
'message': "Hello World!"
}
@app.get('/process_doc/{filename}', response_model=DocumentModel)
def process_doc(filename: str):
file_extension = filename.split('.')[-1]
if file_extension == "pdf":
return process_pdf(filename)
elif file_extension == "jpg" \
or file_extension == "jpeg" \
or file_extension == "png":
return process_img(filename)
def process_pdf(filename: str):
pages_paths = pdf2jpg(filename)
pages = []
for page_path in pages_paths:
pages.append(process_image(page_path))
result = {
"document_path": filename,
"temporal_files_dir": f"/data/converted/{filename.split('.')[-2]}",
"num_pages": len(pages),
"pages": pages
}
return DocumentModel(**result).dict()
def process_img(filename: str):
try:
filepath = '/data/' + filename
for i in range(10):
if os.path.exists(filepath):
break
time.sleep(1)
result = {
"document_path": filename,
"temporal_files_dir": "",
"num_pages": 1,
"pages": [process_image("/data/" + filename)]
}
return DocumentModel(**result).dict()
except Exception as e:
logging.error(f'Error occured: {e}', exc_info=True)
return {
'message': 'Error occured, see logs for details.'
}
if __name__ == '__main__':
uvicorn.run(app)
|
import logging
from ..functions import erase_lines
from ..questions import SettingsQuestions
from ...fsm import State, Transition
from ...constants import NO
from ...constants import (YES, SUCCESS)
from ...core.settings import SettingsManager
logger = logging.getLogger('gryphon')
def back_to_previous(history, **kwargs):
history.pop()
erase_lines(**kwargs)
#####
def _condition_from_add_remote_registry_to_end(context: dict) -> bool:
return context["confirmation_option"] == YES
def _callback_from_add_remote_registry_to_end(context: dict) -> dict:
manager = SettingsManager()
manager.add_git_template_registry(
registry_name=context["registry_name"],
registry_repo=context["url"]
)
logger.log(SUCCESS, f'Successfully added registry {context['registry_name']}.')
context["history"] = []
print("\n")
return context
####
def _condition_from_add_remote_registry_to_ask_option(context: dict) -> bool:
return context["confirmation_option"] == NO
def _callback_from_from_add_remote_registry_to_ask_option(context: dict) -> dict:
# remove 2 entries from history
back_to_previous(context["history"], n_lines=2)
back_to_previous(context["history"], n_lines=2)
return context
class AddRemoteRegistry(State):
name = "add_remote_registry"
transitions = [
Transition(
next_state="ask_option",
condition=_condition_from_add_remote_registry_to_ask_option,
callback=_callback_from_from_add_remote_registry_to_ask_option
),
Transition(
next_state="ask_option",
condition=_condition_from_add_remote_registry_to_end,
callback=_callback_from_add_remote_registry_to_end
)
]
def on_start(self, context: dict) -> dict:
context["registry_name"] = SettingsQuestions.ask_registry_name()
context["url"] = SettingsQuestions.ask_git_url()
context["confirmation_option"] = SettingsQuestions.confirm_registry_addition(context["registry_name"])
return context
| import logging
from ..functions import erase_lines
from ..questions import SettingsQuestions
from ...fsm import State, Transition
from ...constants import NO
from ...constants import (YES, SUCCESS)
from ...core.settings import SettingsManager
logger = logging.getLogger('gryphon')
def back_to_previous(history, **kwargs):
history.pop()
erase_lines(**kwargs)
#####
def _condition_from_add_remote_registry_to_end(context: dict) -> bool:
return context["confirmation_option"] == YES
def _callback_from_add_remote_registry_to_end(context: dict) -> dict:
manager = SettingsManager()
manager.add_git_template_registry(
registry_name=context["registry_name"],
registry_repo=context["url"]
)
logger.log(SUCCESS, f'Successfully added registry {context["registry_name"]}.')
context["history"] = []
print("\n")
return context
####
def _condition_from_add_remote_registry_to_ask_option(context: dict) -> bool:
return context["confirmation_option"] == NO
def _callback_from_from_add_remote_registry_to_ask_option(context: dict) -> dict:
# remove 2 entries from history
back_to_previous(context["history"], n_lines=2)
back_to_previous(context["history"], n_lines=2)
return context
class AddRemoteRegistry(State):
name = "add_remote_registry"
transitions = [
Transition(
next_state="ask_option",
condition=_condition_from_add_remote_registry_to_ask_option,
callback=_callback_from_from_add_remote_registry_to_ask_option
),
Transition(
next_state="ask_option",
condition=_condition_from_add_remote_registry_to_end,
callback=_callback_from_add_remote_registry_to_end
)
]
def on_start(self, context: dict) -> dict:
context["registry_name"] = SettingsQuestions.ask_registry_name()
context["url"] = SettingsQuestions.ask_git_url()
context["confirmation_option"] = SettingsQuestions.confirm_registry_addition(context["registry_name"])
return context
|
import warnings
from collections import Counter
from dataclasses import dataclass
from datetime import datetime, date
from typing import Iterable, Optional, Set, Tuple, Union, List
import shapely
import shapely.ops
import structlog
from shapely.geometry import MultiPolygon
from shapely.geometry.base import BaseGeometry
from datacube.model import Dataset, Range
from datacube.utils.geometry import Geometry
_LOG = structlog.get_logger()
@dataclass
class TimePeriodOverview:
# These four elements make up a pseudo-id of the time period we've summarised.
#
# -> None means "all"
product_name: str
year: Optional[int]
month: Optional[int]
day: Optional[int]
dataset_count: int
timeline_dataset_counts: Counter
region_dataset_counts: Counter
timeline_period: str
time_range: Range
footprint_geometry: Union[shapely.geometry.MultiPolygon, shapely.geometry.Polygon]
footprint_crs: str
footprint_count: int
# The most newly created dataset
newest_dataset_creation_time: datetime
# List of CRSes that these datasets are in
crses: Set[str]
size_bytes: int
# What version of our product table this was based on (the last_refresh_time on ProductSummary)
product_refresh_time: datetime
# When this summary was generated. Set on the server.
summary_gen_time: datetime = None
def __str__(self):
return (
f"{self.label} "
f"({self.dataset_count} dataset{"s" if self.dataset_count > 1 else ""})"
)
@property
def label(self):
return " ".join([(str(p) if p else "all") for p in self.period_tuple])
@property
def period_tuple(self):
"""
This is the pseudo-id of the product time period we've summarised.
Any of them can be None to represent 'all'
"""
return self.product_name, self.year, self.month, self.day
@period_tuple.setter
def period_tuple(self, v: Tuple[str, Optional[int], Optional[int], Optional[int]]):
self.product_name, self.year, self.month, self.day = v
def as_flat_period(self):
"""
How we "flatten" the time-slice for storage in DB columns. Must remain stable!
A "period type" enum, and a single date.
"""
return self.flat_period_representation(self.year, self.month, self.day)
@classmethod
def flat_period_representation(
cls, year: Optional[int], month: Optional[int], day: Optional[int]
):
period = "all"
if year:
period = "year"
if month:
period = "month"
if day:
period = "day"
return period, date(year or 1900, month or 1, day or 1)
@classmethod
def from_flat_period_representation(self, period_type: str, start_day: date):
year = None
month = None
day = None
if period_type != "all":
year = start_day.year
if period_type != "year":
month = start_day.month
if period_type != "month":
day = start_day.day
return year, month, day
@classmethod
def add_periods(
cls,
periods: Iterable["TimePeriodOverview"],
# This is in CRS units. Albers, so 1KM.
# Lower value will have a more accurate footprint and much larger page load times.
footprint_tolerance=1000.0,
):
periods = [p for p in periods if p is not None and p.dataset_count > 0]
period = "day"
crses = set(p.footprint_crs for p in periods)
if not crses:
footprint_crs = None
elif len(crses) == 1:
[footprint_crs] = crses
else:
# All generated summaries should be the same, so this can only occur if someone's changes
# output crs setting on an existing cubedash instance.
raise NotImplementedError("Time summaries use inconsistent CRSes.")
timeline_counter = Counter()
for p in periods:
timeline_counter.update(p.timeline_dataset_counts)
period = p.timeline_period
timeline_counter, period = cls._group_counter_if_needed(
timeline_counter, period
)
# The period elements that are the same across all of them.
# (it will be the period of the result)
common_time_period = list(periods[0].period_tuple) if periods else ([None] * 4)
region_counter = Counter()
for time_period in periods:
region_counter.update(time_period.region_dataset_counts)
# Attempt to fix broken geometries.
# -> The 'high_tide_comp_20p' tests give an example of this: geometry is valid when
# created, but after serialisation+deserialisation become invalid due to float
# rounding.
if (
time_period.footprint_geometry
and not time_period.footprint_geometry.is_valid
):
_LOG.info("invalid_stored_geometry", summary=time_period.period_tuple)
time_period.footprint_geometry = time_period.footprint_geometry.buffer(
0
)
# We're looking for the time period common to them all.
# Strike out any elements that differ between our periods.
this_period = time_period.period_tuple
for i, elem in enumerate(common_time_period):
if elem is not None and (elem != this_period[i]):
# All following should be blank too, since this is a hierarchy.
_erase_elements_from(common_time_period, i)
break
with_valid_geometries = [
p
for p in periods
if p.footprint_count
and p.footprint_geometry
and p.footprint_geometry.is_valid
and not p.footprint_geometry.is_empty
]
geometry_union = _create_unified_footprint(
with_valid_geometries, footprint_tolerance
)
total_datasets = sum(p.dataset_count for p in periods)
# Non-null properties here are the ones that are the same across all inputs.
product_name, year, month, day = common_time_period
return TimePeriodOverview(
product_name=product_name,
year=year,
month=month,
day=day,
dataset_count=total_datasets,
timeline_dataset_counts=timeline_counter,
timeline_period=period,
region_dataset_counts=region_counter,
time_range=Range(
min(r.time_range.begin for r in periods) if periods else None,
max(r.time_range.end for r in periods) if periods else None,
),
footprint_geometry=geometry_union,
footprint_crs=footprint_crs,
footprint_count=sum(p.footprint_count for p in with_valid_geometries),
newest_dataset_creation_time=max(
(
p.newest_dataset_creation_time
for p in periods
if p.newest_dataset_creation_time is not None
),
default=None,
),
crses=set.union(*(o.crses for o in periods)) if periods else set(),
# Why choose the max version? Because we assume older ones didn't need to be replaced,
# so the most recent refresh time is the version that we are current with.
product_refresh_time=max(
(
p.product_refresh_time
for p in periods
if p.product_refresh_time is not None
),
default=None,
),
summary_gen_time=min(
(p.summary_gen_time for p in periods if p.summary_gen_time is not None),
default=None,
),
size_bytes=sum(p.size_bytes for p in periods if p.size_bytes is not None),
)
@property
def footprint_wgs84(self) -> Optional[MultiPolygon]:
if not self.footprint_geometry:
return None
if not self.footprint_crs:
warnings.warn(f"Geometry without a crs for {self}")
return None
return (
Geometry(self.footprint_geometry, crs=self.footprint_crs)
.to_crs("EPSG:4326", wrapdateline=True)
.geom
)
@staticmethod
def _group_counter_if_needed(counter, period):
if len(counter) > 366:
if period == "day":
counter = Counter(
datetime(date.year, date.month, 1).date()
for date in counter.elements()
)
period = "month"
elif period == "month":
counter = Counter(
datetime(date.year, 1, 1).date() for date in counter.elements()
)
period = "year"
return counter, period
@property
def footprint_srid(self):
if self.footprint_crs is None:
return None
epsg = self.footprint_crs.lower()
if not epsg.startswith("epsg:"):
_LOG.warn("unsupported.to_srid", crs=self.footprint_crs)
return None
return int(epsg.split(":")[1])
def _has_shape(datasets: Tuple[Dataset, Tuple[BaseGeometry, bool]]) -> bool:
dataset, (shape, was_valid) = datasets
return shape is not None
def _erase_elements_from(items: List, start_i: int):
"""
Erase from the given 'i' onward
>>> _erase_elements_from([1, 2, 3], 0)
[None, None, None]
>>> _erase_elements_from([1, 2, 3], 1)
[1, None, None]
>>> _erase_elements_from([1, 2, 3], 2)
[1, 2, None]
>>> _erase_elements_from([1, 2, 3], 3)
[1, 2, 3]
"""
items[start_i:] = [None] * (len(items) - start_i)
# Return the list just for convenience in doctest. It's actually mutable.
return items
def _create_unified_footprint(
with_valid_geometries: List["TimePeriodOverview"], footprint_tolerance: float
):
"""
Union the given time period's footprints, trying to fix any invalid geometries.
"""
if not with_valid_geometries:
return None
try:
geometry_union = shapely.ops.unary_union(
[p.footprint_geometry for p in with_valid_geometries]
)
except ValueError:
# Attempt 2 at union: Exaggerate the overlap *slightly* to
# avoid non-noded intersection.
# TODO: does shapely have a snap-to-grid?
try:
_LOG.warn("summary.footprint.invalid_union", exc_info=True)
geometry_union = shapely.ops.unary_union(
[p.footprint_geometry.buffer(0.001) for p in with_valid_geometries]
)
except ValueError:
_LOG.warn("summary.footprint.invalid_buffered_union", exc_info=True)
# Attempt 3 at union: Recursive filter bad polygons first
polygonlist = _polygon_chain(with_valid_geometries)
filtered_geom = _filter_geom(polygonlist)
geometry_union = shapely.ops.unary_union(filtered_geom)
if footprint_tolerance is not None:
geometry_union = geometry_union.simplify(footprint_tolerance)
return geometry_union
def _polygon_chain(valid_geometries: Iterable[TimePeriodOverview]) -> list:
"""Chain all the given [Mutli]Polygons into a single list."""
polygonlist = []
for poly in valid_geometries:
if type(poly.footprint_geometry) is MultiPolygon:
for p in list(poly.footprint_geometry):
polygonlist.append(p)
else:
polygonlist.append(poly.footprint_geometry)
return polygonlist
def _filter_geom(geomlist: List[BaseGeometry], start=0) -> List[BaseGeometry]:
"""
Recursive filtering of un-unionable polygons. Input list is modified in-place.
Exhaustively searches for a run of polygons that cause a union error
(eg. "non-noded intersection"), and cuts out the first one that it finds.
"""
# Pass through empty lists
if len(geomlist) == 0:
return geomlist
# Process non-empty lists
if start == len(geomlist):
geomlist.pop()
return geomlist
else:
for i in range(len(geomlist) - start):
try:
shapely.ops.unary_union(geomlist[0 : i + start])
except ValueError:
del geomlist[i + start]
start = start + i
break
if i == len(geomlist) - 1 - start:
return geomlist
_filter_geom(geomlist, start)
return geomlist
| import warnings
from collections import Counter
from dataclasses import dataclass
from datetime import datetime, date
from typing import Iterable, Optional, Set, Tuple, Union, List
import shapely
import shapely.ops
import structlog
from shapely.geometry import MultiPolygon
from shapely.geometry.base import BaseGeometry
from datacube.model import Dataset, Range
from datacube.utils.geometry import Geometry
_LOG = structlog.get_logger()
@dataclass
class TimePeriodOverview:
# These four elements make up a pseudo-id of the time period we've summarised.
#
# -> None means "all"
product_name: str
year: Optional[int]
month: Optional[int]
day: Optional[int]
dataset_count: int
timeline_dataset_counts: Counter
region_dataset_counts: Counter
timeline_period: str
time_range: Range
footprint_geometry: Union[shapely.geometry.MultiPolygon, shapely.geometry.Polygon]
footprint_crs: str
footprint_count: int
# The most newly created dataset
newest_dataset_creation_time: datetime
# List of CRSes that these datasets are in
crses: Set[str]
size_bytes: int
# What version of our product table this was based on (the last_refresh_time on ProductSummary)
product_refresh_time: datetime
# When this summary was generated. Set on the server.
summary_gen_time: datetime = None
def __str__(self):
return (
f"{self.label} "
f"({self.dataset_count} dataset{'s' if self.dataset_count > 1 else ''})"
)
@property
def label(self):
return " ".join([(str(p) if p else "all") for p in self.period_tuple])
@property
def period_tuple(self):
"""
This is the pseudo-id of the product time period we've summarised.
Any of them can be None to represent 'all'
"""
return self.product_name, self.year, self.month, self.day
@period_tuple.setter
def period_tuple(self, v: Tuple[str, Optional[int], Optional[int], Optional[int]]):
self.product_name, self.year, self.month, self.day = v
def as_flat_period(self):
"""
How we "flatten" the time-slice for storage in DB columns. Must remain stable!
A "period type" enum, and a single date.
"""
return self.flat_period_representation(self.year, self.month, self.day)
@classmethod
def flat_period_representation(
cls, year: Optional[int], month: Optional[int], day: Optional[int]
):
period = "all"
if year:
period = "year"
if month:
period = "month"
if day:
period = "day"
return period, date(year or 1900, month or 1, day or 1)
@classmethod
def from_flat_period_representation(self, period_type: str, start_day: date):
year = None
month = None
day = None
if period_type != "all":
year = start_day.year
if period_type != "year":
month = start_day.month
if period_type != "month":
day = start_day.day
return year, month, day
@classmethod
def add_periods(
cls,
periods: Iterable["TimePeriodOverview"],
# This is in CRS units. Albers, so 1KM.
# Lower value will have a more accurate footprint and much larger page load times.
footprint_tolerance=1000.0,
):
periods = [p for p in periods if p is not None and p.dataset_count > 0]
period = "day"
crses = set(p.footprint_crs for p in periods)
if not crses:
footprint_crs = None
elif len(crses) == 1:
[footprint_crs] = crses
else:
# All generated summaries should be the same, so this can only occur if someone's changes
# output crs setting on an existing cubedash instance.
raise NotImplementedError("Time summaries use inconsistent CRSes.")
timeline_counter = Counter()
for p in periods:
timeline_counter.update(p.timeline_dataset_counts)
period = p.timeline_period
timeline_counter, period = cls._group_counter_if_needed(
timeline_counter, period
)
# The period elements that are the same across all of them.
# (it will be the period of the result)
common_time_period = list(periods[0].period_tuple) if periods else ([None] * 4)
region_counter = Counter()
for time_period in periods:
region_counter.update(time_period.region_dataset_counts)
# Attempt to fix broken geometries.
# -> The 'high_tide_comp_20p' tests give an example of this: geometry is valid when
# created, but after serialisation+deserialisation become invalid due to float
# rounding.
if (
time_period.footprint_geometry
and not time_period.footprint_geometry.is_valid
):
_LOG.info("invalid_stored_geometry", summary=time_period.period_tuple)
time_period.footprint_geometry = time_period.footprint_geometry.buffer(
0
)
# We're looking for the time period common to them all.
# Strike out any elements that differ between our periods.
this_period = time_period.period_tuple
for i, elem in enumerate(common_time_period):
if elem is not None and (elem != this_period[i]):
# All following should be blank too, since this is a hierarchy.
_erase_elements_from(common_time_period, i)
break
with_valid_geometries = [
p
for p in periods
if p.footprint_count
and p.footprint_geometry
and p.footprint_geometry.is_valid
and not p.footprint_geometry.is_empty
]
geometry_union = _create_unified_footprint(
with_valid_geometries, footprint_tolerance
)
total_datasets = sum(p.dataset_count for p in periods)
# Non-null properties here are the ones that are the same across all inputs.
product_name, year, month, day = common_time_period
return TimePeriodOverview(
product_name=product_name,
year=year,
month=month,
day=day,
dataset_count=total_datasets,
timeline_dataset_counts=timeline_counter,
timeline_period=period,
region_dataset_counts=region_counter,
time_range=Range(
min(r.time_range.begin for r in periods) if periods else None,
max(r.time_range.end for r in periods) if periods else None,
),
footprint_geometry=geometry_union,
footprint_crs=footprint_crs,
footprint_count=sum(p.footprint_count for p in with_valid_geometries),
newest_dataset_creation_time=max(
(
p.newest_dataset_creation_time
for p in periods
if p.newest_dataset_creation_time is not None
),
default=None,
),
crses=set.union(*(o.crses for o in periods)) if periods else set(),
# Why choose the max version? Because we assume older ones didn't need to be replaced,
# so the most recent refresh time is the version that we are current with.
product_refresh_time=max(
(
p.product_refresh_time
for p in periods
if p.product_refresh_time is not None
),
default=None,
),
summary_gen_time=min(
(p.summary_gen_time for p in periods if p.summary_gen_time is not None),
default=None,
),
size_bytes=sum(p.size_bytes for p in periods if p.size_bytes is not None),
)
@property
def footprint_wgs84(self) -> Optional[MultiPolygon]:
if not self.footprint_geometry:
return None
if not self.footprint_crs:
warnings.warn(f"Geometry without a crs for {self}")
return None
return (
Geometry(self.footprint_geometry, crs=self.footprint_crs)
.to_crs("EPSG:4326", wrapdateline=True)
.geom
)
@staticmethod
def _group_counter_if_needed(counter, period):
if len(counter) > 366:
if period == "day":
counter = Counter(
datetime(date.year, date.month, 1).date()
for date in counter.elements()
)
period = "month"
elif period == "month":
counter = Counter(
datetime(date.year, 1, 1).date() for date in counter.elements()
)
period = "year"
return counter, period
@property
def footprint_srid(self):
if self.footprint_crs is None:
return None
epsg = self.footprint_crs.lower()
if not epsg.startswith("epsg:"):
_LOG.warn("unsupported.to_srid", crs=self.footprint_crs)
return None
return int(epsg.split(":")[1])
def _has_shape(datasets: Tuple[Dataset, Tuple[BaseGeometry, bool]]) -> bool:
dataset, (shape, was_valid) = datasets
return shape is not None
def _erase_elements_from(items: List, start_i: int):
"""
Erase from the given 'i' onward
>>> _erase_elements_from([1, 2, 3], 0)
[None, None, None]
>>> _erase_elements_from([1, 2, 3], 1)
[1, None, None]
>>> _erase_elements_from([1, 2, 3], 2)
[1, 2, None]
>>> _erase_elements_from([1, 2, 3], 3)
[1, 2, 3]
"""
items[start_i:] = [None] * (len(items) - start_i)
# Return the list just for convenience in doctest. It's actually mutable.
return items
def _create_unified_footprint(
with_valid_geometries: List["TimePeriodOverview"], footprint_tolerance: float
):
"""
Union the given time period's footprints, trying to fix any invalid geometries.
"""
if not with_valid_geometries:
return None
try:
geometry_union = shapely.ops.unary_union(
[p.footprint_geometry for p in with_valid_geometries]
)
except ValueError:
# Attempt 2 at union: Exaggerate the overlap *slightly* to
# avoid non-noded intersection.
# TODO: does shapely have a snap-to-grid?
try:
_LOG.warn("summary.footprint.invalid_union", exc_info=True)
geometry_union = shapely.ops.unary_union(
[p.footprint_geometry.buffer(0.001) for p in with_valid_geometries]
)
except ValueError:
_LOG.warn("summary.footprint.invalid_buffered_union", exc_info=True)
# Attempt 3 at union: Recursive filter bad polygons first
polygonlist = _polygon_chain(with_valid_geometries)
filtered_geom = _filter_geom(polygonlist)
geometry_union = shapely.ops.unary_union(filtered_geom)
if footprint_tolerance is not None:
geometry_union = geometry_union.simplify(footprint_tolerance)
return geometry_union
def _polygon_chain(valid_geometries: Iterable[TimePeriodOverview]) -> list:
"""Chain all the given [Mutli]Polygons into a single list."""
polygonlist = []
for poly in valid_geometries:
if type(poly.footprint_geometry) is MultiPolygon:
for p in list(poly.footprint_geometry):
polygonlist.append(p)
else:
polygonlist.append(poly.footprint_geometry)
return polygonlist
def _filter_geom(geomlist: List[BaseGeometry], start=0) -> List[BaseGeometry]:
"""
Recursive filtering of un-unionable polygons. Input list is modified in-place.
Exhaustively searches for a run of polygons that cause a union error
(eg. "non-noded intersection"), and cuts out the first one that it finds.
"""
# Pass through empty lists
if len(geomlist) == 0:
return geomlist
# Process non-empty lists
if start == len(geomlist):
geomlist.pop()
return geomlist
else:
for i in range(len(geomlist) - start):
try:
shapely.ops.unary_union(geomlist[0 : i + start])
except ValueError:
del geomlist[i + start]
start = start + i
break
if i == len(geomlist) - 1 - start:
return geomlist
_filter_geom(geomlist, start)
return geomlist
|
from types import GeneratorType
from typing import List, Mapping, Union
__all__ = [
'clean_picard_style_value',
'snakecase_to_kebab_case',
'clean_picard_style_key',
'format_bedtools_params',
'format_bwa_params',
'format_dwgsim_params',
'format_fgbio_params',
'format_kraken_params',
'format_picard_params',
]
def clean_picard_style_value(value: Union[List[str], str]) -> Union[List[str], str]:
"""Clean a dictionary of Picard key-value pairs."""
if isinstance(value, (list, tuple, GeneratorType)):
return list(map(clean_picard_style_value, value)) # type: ignore
elif value is None:
return 'null'
elif value is True:
return 'true'
elif value is False:
return 'false'
else:
return value
def format_bed_key(key: str) -> str:
"""Clean a bedtools parameter key."""
return '-' + key.replace('_', '')
def snakecase_to_kebab_case(key: str) -> str:
"""Convert snake_case to kebab-case."""
return f'--{key.lower().replace('_', '-')}'
def clean_picard_style_key(key: str) -> str:
"""Clean a Picard parameter key."""
return key.upper()
def format_bedtools_params(params: Mapping) -> str:
"""Clean a dictionary of bedtools key-value pairs."""
formatted_params = ''
for key, value in params.items():
if key == 'extra':
continue
key = format_bed_key(key)
if value is True:
formatted_params += f' {key}'
elif value is False:
continue
else:
formatted_params += f' {key} {value}'
return formatted_params
def format_bwa_params(params: Mapping) -> str:
"""Clean a dictionary of bwa key-value pairs."""
formatted_params = ''
for key, value in params.items():
if key == 'extra':
continue
elif value is True:
formatted_params += f' -{key}'
elif value is False:
continue
else:
formatted_params += f' -{key} {value}'
return formatted_params
def format_dwgsim_params(params: Mapping) -> str:
"""Clean a dictionary of dwgsim key-value pairs."""
formatted_params = ''
for key, value in params.items():
if key in ('extra', 'output_prefix'):
continue
key = '1' if key == 'r1' else key
key = '2' if key == 'r2' else key
if value is True:
formatted_params += f' -{key}'
elif value is False:
continue
else:
formatted_params += f' -{key} {value}'
return formatted_params
def format_fgbio_params(params: Mapping) -> str:
"""Clean a dictionary of fgbio key-value pairs."""
formatted_params = ''
for key, value in params.items():
key = snakecase_to_kebab_case(key)
value = clean_picard_style_value(value)
if key == 'extra':
continue
elif isinstance(value, list):
formatted_params += ''.join(f' --{key}={v}' for v in value)
else:
formatted_params += f' --{key}={value}'
return formatted_params
def format_kraken_params(params: Mapping) -> str:
"""Clean a dictionary of kraken key-value pairs."""
formatted_params = ''
for key, value in params.items():
key = snakecase_to_kebab_case(key)
if key == 'extra':
continue
elif value is True:
formatted_params += f' --{key}'
elif value is False:
continue
else:
formatted_params += f' --{key} {value}'
return formatted_params
def format_picard_params(params: Mapping) -> str:
"""Clean a dictionary of picard key-value pairs."""
formatted_params = ''
for key, value in params.items():
key = clean_picard_style_key(key)
value = clean_picard_style_value(value)
if key == 'extra':
continue
elif isinstance(value, list):
formatted_params += ''.join(f' {key}={v}' for v in value)
else:
formatted_params += f' {key}={value}'
return formatted_params
| from types import GeneratorType
from typing import List, Mapping, Union
__all__ = [
'clean_picard_style_value',
'snakecase_to_kebab_case',
'clean_picard_style_key',
'format_bedtools_params',
'format_bwa_params',
'format_dwgsim_params',
'format_fgbio_params',
'format_kraken_params',
'format_picard_params',
]
def clean_picard_style_value(value: Union[List[str], str]) -> Union[List[str], str]:
"""Clean a dictionary of Picard key-value pairs."""
if isinstance(value, (list, tuple, GeneratorType)):
return list(map(clean_picard_style_value, value)) # type: ignore
elif value is None:
return 'null'
elif value is True:
return 'true'
elif value is False:
return 'false'
else:
return value
def format_bed_key(key: str) -> str:
"""Clean a bedtools parameter key."""
return '-' + key.replace('_', '')
def snakecase_to_kebab_case(key: str) -> str:
"""Convert snake_case to kebab-case."""
return f'--{key.lower().replace("_", "-")}'
def clean_picard_style_key(key: str) -> str:
"""Clean a Picard parameter key."""
return key.upper()
def format_bedtools_params(params: Mapping) -> str:
"""Clean a dictionary of bedtools key-value pairs."""
formatted_params = ''
for key, value in params.items():
if key == 'extra':
continue
key = format_bed_key(key)
if value is True:
formatted_params += f' {key}'
elif value is False:
continue
else:
formatted_params += f' {key} {value}'
return formatted_params
def format_bwa_params(params: Mapping) -> str:
"""Clean a dictionary of bwa key-value pairs."""
formatted_params = ''
for key, value in params.items():
if key == 'extra':
continue
elif value is True:
formatted_params += f' -{key}'
elif value is False:
continue
else:
formatted_params += f' -{key} {value}'
return formatted_params
def format_dwgsim_params(params: Mapping) -> str:
"""Clean a dictionary of dwgsim key-value pairs."""
formatted_params = ''
for key, value in params.items():
if key in ('extra', 'output_prefix'):
continue
key = '1' if key == 'r1' else key
key = '2' if key == 'r2' else key
if value is True:
formatted_params += f' -{key}'
elif value is False:
continue
else:
formatted_params += f' -{key} {value}'
return formatted_params
def format_fgbio_params(params: Mapping) -> str:
"""Clean a dictionary of fgbio key-value pairs."""
formatted_params = ''
for key, value in params.items():
key = snakecase_to_kebab_case(key)
value = clean_picard_style_value(value)
if key == 'extra':
continue
elif isinstance(value, list):
formatted_params += ''.join(f' --{key}={v}' for v in value)
else:
formatted_params += f' --{key}={value}'
return formatted_params
def format_kraken_params(params: Mapping) -> str:
"""Clean a dictionary of kraken key-value pairs."""
formatted_params = ''
for key, value in params.items():
key = snakecase_to_kebab_case(key)
if key == 'extra':
continue
elif value is True:
formatted_params += f' --{key}'
elif value is False:
continue
else:
formatted_params += f' --{key} {value}'
return formatted_params
def format_picard_params(params: Mapping) -> str:
"""Clean a dictionary of picard key-value pairs."""
formatted_params = ''
for key, value in params.items():
key = clean_picard_style_key(key)
value = clean_picard_style_value(value)
if key == 'extra':
continue
elif isinstance(value, list):
formatted_params += ''.join(f' {key}={v}' for v in value)
else:
formatted_params += f' {key}={value}'
return formatted_params
|
from ortools.linear_solver import pywraplp
import pandas as pd
import numpy as np
def create_cost_matrix(distances, pref_big_school, pref_rural):
cost_matrix = distances + 10 * pref_big_school + 10 * pref_rural
return cost_matrix
def find_optimal_allocation(df_schools, distances, pref_big_school, pref_rural, number_teachers, number_schools):
# Create cost matrix
cost_matrix = create_cost_matrix(distances, pref_big_school, pref_rural)
# Create the mip solver with the SCIP backend.
solver = pywraplp.Solver.CreateSolver('SCIP')
# x[t,s] is an array of 0-1 variables, which will be 1 if teacher t is assigned to school s.
x = {}
for t in range(number_teachers):
for s in range(number_schools):
x[t, s] = solver.IntVar(0, 1, '')
# Constraint 1: Each teacher is assigned to one school.
for t in range(number_teachers):
solver.Add(solver.Sum([x[t, s] for s in range(number_schools)]) == 1)
# Constraint 2: Each school is assigned to minimum x teachers.
for s in range(number_schools):
solver.Add(solver.Sum([x[t, s] for t in range(number_teachers)]) >= df_schools['min_number_of_teachers'][s])
# Constraint 3: Each school is assigned to maximal x+20 teachers.
for s in range(number_schools):
solver.Add(
solver.Sum([x[t, s] for t in range(number_teachers)]) <= df_schools['min_number_of_teachers'][s] + 20)
# Constraint 4: Each teacher has a maximum cost of 100.
for t in range(number_teachers):
solver.Add(solver.Sum([cost_matrix[t][s] * x[t, s] for s in range(number_schools)]) <= 100)
# Objective
objective_terms = []
for t in range(number_teachers):
for s in range(number_schools):
objective_terms.append(cost_matrix[t][s] * x[t, s])
solver.Minimize(solver.Sum(objective_terms))
# Solve
status = solver.Solve()
df = pd.DataFrame(columns=['iteration', 'teacher', 'school', 'cost', 'dist'])
# Save costs for further iterations
costs_per_teacher = []
# Print solution.
if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:
print(f'Total cost = {solver.Objective().Value()}\n')
for t in range(number_teachers):
for s in range(number_schools):
# Test if x[t,s] is 1 (with tolerance for floating point arithmetic).
if x[t, s].solution_value() > 0.5:
print(f'Teacher {t} assigned to school {s}. Cost={cost_matrix[t][s]}')
df = df.append({'iteration': 1, 'teacher': t, 'school': s, 'cost': cost_matrix[t][s],
'dist': distances[t][s],
'pref_school_size_unsatisfied': pref_big_school[t][s],
'pref_urban_rural_unsatisfied': pref_rural[t][s]},
ignore_index=True)
costs_per_teacher.append(cost_matrix[t][s])
adapted_costs = cost_matrix * np.array(costs_per_teacher)[:, np.newaxis] / 10
return df, adapted_costs
def find_optimal_allocation_it2(df_schools, distances, pref_big_school, pref_rural, number_teachers, number_schools,
adapted_cost_matrix):
# Create cost matrix
cost_matrix = create_cost_matrix(distances, pref_big_school, pref_rural)
# Create the mip solver with the SCIP backend.
solver = pywraplp.Solver.CreateSolver('SCIP')
# x[t,s] is an array of 0-1 variables, which will be 1 if teacher t is assigned to school s.
x = {}
for t in range(number_teachers):
for s in range(number_schools):
x[t, s] = solver.IntVar(0, 1, '')
# Constraint 1: Each teacher is assigned to one school.
for t in range(number_teachers):
solver.Add(solver.Sum([x[t, s] for s in range(number_schools)]) == 1)
# Constraint 2: Each school is assigned to minimum x teachers.
for s in range(number_schools):
solver.Add(solver.Sum([x[t, s] for t in range(number_teachers)]) >= df_schools['min_number_of_teachers'][s])
# Constraint 3: Each school is assigned to maximal x+20 teachers.
for s in range(number_schools):
solver.Add(
solver.Sum([x[t, s] for t in range(number_teachers)]) <= df_schools['min_number_of_teachers'][s] + 20)
# Constraint 4: Each teacher has a maximum cost of 100.
for t in range(number_teachers):
solver.Add(solver.Sum([cost_matrix[t][s] * x[t, s] for s in range(number_schools)]) <= 100)
# Objective
objective_terms = []
for t in range(number_teachers):
for s in range(number_schools):
objective_terms.append(adapted_cost_matrix[t][s] * x[t, s])
solver.Minimize(solver.Sum(objective_terms))
# Solve
status = solver.Solve()
df = pd.DataFrame(columns=['iteration', 'teacher', 'school', 'cost', 'dist'])
# Print solution.
if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:
print(f'Total cost = {solver.Objective().Value()}\n')
for t in range(number_teachers):
for s in range(number_schools):
# Test if x[t,s] is 1 (with tolerance for floating point arithmetic).
if x[t, s].solution_value() > 0.5:
print(f'Teacher {t} assigned to school {s}. Cost={cost_matrix[t][s]}')
df = df.append({'iteration': 2, 'teacher': t, 'school': s, 'cost': cost_matrix[t][s],
'dist': distances[t][s],
'pref_school_size_unsatisfied': pref_big_school[t][s],
'pref_urban_rural_unsatisfied': pref_rural[t][s]},
ignore_index=True)
return df
if __name__ == '__main__':
nb_of_teachers = 761
nb_of_schools = 58
# Get school data
df_schools = pd.read_csv('../data/school_dataset.csv')
# Get cost matrix
distances = pd.read_pickle('../data/geopy_distance_matrix_Waldorfschule.pkl')
# distances = np.random.rand(nb_of_teachers, nb_of_schools) * 200
pref_big_school = pd.read_pickle(r'../data/preference_big_school_Waldorfschule.pkl')
pref_rural = pd.read_pickle(r'../data/preference_rural_Waldorfschule.pkl')
df, adapted_costs = find_optimal_allocation(df_schools, distances, pref_big_school, pref_rural,
number_teachers=nb_of_teachers, number_schools=nb_of_schools)
print(df)
print(df.groupby(['school']).count()['teacher'])
print(f'Average costs: {df['cost'].mean()}.')
print(f'Teacher {df['cost'].argmin()} has minimum costs ({df['cost'].min()}).')
print(f'Teacher {df['cost'].argmax()} has maximal costs ({df['cost'].max()}).')
print(adapted_costs)
df2 = find_optimal_allocation_it2(df_schools, distances, pref_big_school, pref_rural, number_teachers=nb_of_teachers,
number_schools=nb_of_schools, adapted_cost_matrix=adapted_costs)
print(df2)
print(df2.groupby(['school']).count()['teacher'])
print(f'Average costs: {df2['cost'].mean()}.')
print(f'Teacher {df2['cost'].argmin()} has minimum costs ({df2['cost'].min()}).')
print(f'Teacher {df2['cost'].argmax()} has maximal costs ({df2['cost'].max()}).')
df_all = df.append(df2)
df_all.to_csv('../data/results.csv', index=False)
| from ortools.linear_solver import pywraplp
import pandas as pd
import numpy as np
def create_cost_matrix(distances, pref_big_school, pref_rural):
cost_matrix = distances + 10 * pref_big_school + 10 * pref_rural
return cost_matrix
def find_optimal_allocation(df_schools, distances, pref_big_school, pref_rural, number_teachers, number_schools):
# Create cost matrix
cost_matrix = create_cost_matrix(distances, pref_big_school, pref_rural)
# Create the mip solver with the SCIP backend.
solver = pywraplp.Solver.CreateSolver('SCIP')
# x[t,s] is an array of 0-1 variables, which will be 1 if teacher t is assigned to school s.
x = {}
for t in range(number_teachers):
for s in range(number_schools):
x[t, s] = solver.IntVar(0, 1, '')
# Constraint 1: Each teacher is assigned to one school.
for t in range(number_teachers):
solver.Add(solver.Sum([x[t, s] for s in range(number_schools)]) == 1)
# Constraint 2: Each school is assigned to minimum x teachers.
for s in range(number_schools):
solver.Add(solver.Sum([x[t, s] for t in range(number_teachers)]) >= df_schools['min_number_of_teachers'][s])
# Constraint 3: Each school is assigned to maximal x+20 teachers.
for s in range(number_schools):
solver.Add(
solver.Sum([x[t, s] for t in range(number_teachers)]) <= df_schools['min_number_of_teachers'][s] + 20)
# Constraint 4: Each teacher has a maximum cost of 100.
for t in range(number_teachers):
solver.Add(solver.Sum([cost_matrix[t][s] * x[t, s] for s in range(number_schools)]) <= 100)
# Objective
objective_terms = []
for t in range(number_teachers):
for s in range(number_schools):
objective_terms.append(cost_matrix[t][s] * x[t, s])
solver.Minimize(solver.Sum(objective_terms))
# Solve
status = solver.Solve()
df = pd.DataFrame(columns=['iteration', 'teacher', 'school', 'cost', 'dist'])
# Save costs for further iterations
costs_per_teacher = []
# Print solution.
if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:
print(f'Total cost = {solver.Objective().Value()}\n')
for t in range(number_teachers):
for s in range(number_schools):
# Test if x[t,s] is 1 (with tolerance for floating point arithmetic).
if x[t, s].solution_value() > 0.5:
print(f'Teacher {t} assigned to school {s}. Cost={cost_matrix[t][s]}')
df = df.append({'iteration': 1, 'teacher': t, 'school': s, 'cost': cost_matrix[t][s],
'dist': distances[t][s],
'pref_school_size_unsatisfied': pref_big_school[t][s],
'pref_urban_rural_unsatisfied': pref_rural[t][s]},
ignore_index=True)
costs_per_teacher.append(cost_matrix[t][s])
adapted_costs = cost_matrix * np.array(costs_per_teacher)[:, np.newaxis] / 10
return df, adapted_costs
def find_optimal_allocation_it2(df_schools, distances, pref_big_school, pref_rural, number_teachers, number_schools,
adapted_cost_matrix):
# Create cost matrix
cost_matrix = create_cost_matrix(distances, pref_big_school, pref_rural)
# Create the mip solver with the SCIP backend.
solver = pywraplp.Solver.CreateSolver('SCIP')
# x[t,s] is an array of 0-1 variables, which will be 1 if teacher t is assigned to school s.
x = {}
for t in range(number_teachers):
for s in range(number_schools):
x[t, s] = solver.IntVar(0, 1, '')
# Constraint 1: Each teacher is assigned to one school.
for t in range(number_teachers):
solver.Add(solver.Sum([x[t, s] for s in range(number_schools)]) == 1)
# Constraint 2: Each school is assigned to minimum x teachers.
for s in range(number_schools):
solver.Add(solver.Sum([x[t, s] for t in range(number_teachers)]) >= df_schools['min_number_of_teachers'][s])
# Constraint 3: Each school is assigned to maximal x+20 teachers.
for s in range(number_schools):
solver.Add(
solver.Sum([x[t, s] for t in range(number_teachers)]) <= df_schools['min_number_of_teachers'][s] + 20)
# Constraint 4: Each teacher has a maximum cost of 100.
for t in range(number_teachers):
solver.Add(solver.Sum([cost_matrix[t][s] * x[t, s] for s in range(number_schools)]) <= 100)
# Objective
objective_terms = []
for t in range(number_teachers):
for s in range(number_schools):
objective_terms.append(adapted_cost_matrix[t][s] * x[t, s])
solver.Minimize(solver.Sum(objective_terms))
# Solve
status = solver.Solve()
df = pd.DataFrame(columns=['iteration', 'teacher', 'school', 'cost', 'dist'])
# Print solution.
if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:
print(f'Total cost = {solver.Objective().Value()}\n')
for t in range(number_teachers):
for s in range(number_schools):
# Test if x[t,s] is 1 (with tolerance for floating point arithmetic).
if x[t, s].solution_value() > 0.5:
print(f'Teacher {t} assigned to school {s}. Cost={cost_matrix[t][s]}')
df = df.append({'iteration': 2, 'teacher': t, 'school': s, 'cost': cost_matrix[t][s],
'dist': distances[t][s],
'pref_school_size_unsatisfied': pref_big_school[t][s],
'pref_urban_rural_unsatisfied': pref_rural[t][s]},
ignore_index=True)
return df
if __name__ == '__main__':
nb_of_teachers = 761
nb_of_schools = 58
# Get school data
df_schools = pd.read_csv('../data/school_dataset.csv')
# Get cost matrix
distances = pd.read_pickle('../data/geopy_distance_matrix_Waldorfschule.pkl')
# distances = np.random.rand(nb_of_teachers, nb_of_schools) * 200
pref_big_school = pd.read_pickle(r'../data/preference_big_school_Waldorfschule.pkl')
pref_rural = pd.read_pickle(r'../data/preference_rural_Waldorfschule.pkl')
df, adapted_costs = find_optimal_allocation(df_schools, distances, pref_big_school, pref_rural,
number_teachers=nb_of_teachers, number_schools=nb_of_schools)
print(df)
print(df.groupby(['school']).count()['teacher'])
print(f'Average costs: {df["cost"].mean()}.')
print(f'Teacher {df["cost"].argmin()} has minimum costs ({df["cost"].min()}).')
print(f'Teacher {df["cost"].argmax()} has maximal costs ({df["cost"].max()}).')
print(adapted_costs)
df2 = find_optimal_allocation_it2(df_schools, distances, pref_big_school, pref_rural, number_teachers=nb_of_teachers,
number_schools=nb_of_schools, adapted_cost_matrix=adapted_costs)
print(df2)
print(df2.groupby(['school']).count()['teacher'])
print(f'Average costs: {df2["cost"].mean()}.')
print(f'Teacher {df2["cost"].argmin()} has minimum costs ({df2["cost"].min()}).')
print(f'Teacher {df2["cost"].argmax()} has maximal costs ({df2["cost"].max()}).')
df_all = df.append(df2)
df_all.to_csv('../data/results.csv', index=False)
|
import logging
import psycopg2
from datetime import datetime, timezone
from django.conf import settings
from django.core.management import call_command
from pathlib import Path
from typing import Tuple
from usaspending_api.broker.helpers.last_load_date import get_last_load_date, update_last_load_date
from usaspending_api.common.etl import ETLDBLinkTable, ETLTable, operations
from usaspending_api.common.helpers.date_helper import datetime_command_line_argument_type
from usaspending_api.common.helpers.sql_helpers import get_broker_dsn_string
from usaspending_api.common.helpers.timing_helpers import ScriptTimer as Timer
from usaspending_api.common.retrieve_file_from_uri import SCHEMA_HELP_TEXT
from usaspending_api.transactions.loader_functions import filepath_command_line_argument_type
from usaspending_api.transactions.loader_functions import read_file_for_database_ids
from usaspending_api.transactions.loader_functions import store_ids_in_file
logger = logging.getLogger("script")
class AgnosticTransactionLoader:
begining_of_time = "1970-01-01"
chunk_size = 25000
is_incremental = False
successful_run = False
upsert_records = 0
def add_arguments(self, parser):
mutually_exclusive_group = parser.add_mutually_exclusive_group(required=True)
mutually_exclusive_group.add_argument(
"--ids", nargs="+", help=f"Load/Reload transactions using this {self.shared_pk} list (space-separated)",
)
mutually_exclusive_group.add_argument(
"--date",
dest="datetime",
type=datetime_command_line_argument_type(naive=True), # Broker date/times are naive.
help="Load/Reload records from the provided datetime to the script execution start time.",
)
mutually_exclusive_group.add_argument(
"--since-last-load",
dest="incremental_date",
action="store_true",
help="Equivalent to loading from date, but date is drawn from last update date recorded in DB.",
)
mutually_exclusive_group.add_argument(
"--file",
dest="file",
type=filepath_command_line_argument_type(chunk_count=self.chunk_size),
help=(
f"Load/Reload transactions using {self.shared_pk} values stored at this file path"
f" (one ID per line) {SCHEMA_HELP_TEXT}"
),
)
mutually_exclusive_group.add_argument(
"--reload-all",
action="store_true",
help=(
f"Script will load or reload all {self.broker_source_table_name} records from broker database,"
" from all time. This does NOT clear the USASpending database first."
),
)
parser.add_argument(
"--process-deletes",
action="store_true",
help=(
"If not in local mode, process deletes before beginning the upsert operations."
" This shouldn't be used with --file or --ids parameters"
),
)
def handle(self, *args, **options):
with Timer(message="Script"):
self.run_script(*args, **options)
def run_script(self, *args, **options):
self.start_time = datetime.now(timezone.utc)
self.options = options
if self.options["incremental_date"]:
self.is_incremental = True
self.options["datetime"] = self.obtain_last_date()
if self.options["process_deletes"]:
delete_date = self.options["datetime"]
if not delete_date:
delete_date = self.begining_of_time
with Timer(message="Processing deletes"):
delete_job_status = call_command(self.delete_management_command, f"--date={delete_date}")
if delete_job_status != 0:
raise RuntimeError("Fatal error. Problem with the deletes")
try:
with Timer(message="Load Process"):
self.process()
self.successful_run = True
except (Exception, SystemExit, KeyboardInterrupt):
logger.exception("Fatal error")
finally:
self.cleanup()
def obtain_last_date(self):
dt = get_last_load_date(self.last_load_record, self.lookback_minutes)
if not dt:
raise SystemExit("No datetime stored in the database, unable to use --since-last-load")
return dt
def process(self) -> None:
with Timer(message="Compiling IDs to process"):
self.file_path, self.total_ids_to_process = self.compile_transactions_to_process()
logger.info(f"{self.total_ids_to_process:,} IDs stored")
with Timer(message="Transfering Data"):
self.copy_broker_table_data(self.broker_source_table_name, self.destination_table_name, self.shared_pk)
def cleanup(self) -> None:
"""Finalize the execution and cleanup for the next script run"""
logger.info(f"Processed {self.upsert_records:,} transction records (insert/update)")
if self.successful_run and (self.is_incremental or self.options["reload_all"]):
logger.info("Updated last run time for next incremental load")
update_last_load_date(self.last_load_record, self.start_time)
if hasattr(self, "file_path") and self.file_path.exists():
# If the script fails before the file is created, skip
# If the file still exists, remove
self.file_path.unlink()
if self.successful_run:
logger.info(f"Loading {self.destination_table_name} completed successfully")
else:
logger.info("Failed state on exit")
raise SystemExit(1)
def compile_transactions_to_process(self) -> Tuple[Path, int]:
ids = []
if self.options["file"]:
ids = self.options["file"]
logger.info("using provided IDs in file")
elif self.options["ids"]:
ids = self.options["ids"]
logger.info("using provided IDs")
else:
ids = self.generate_ids_from_broker()
file_name = f"{self.working_file_prefix}_{self.start_time.strftime("%Y%m%d_%H%M%S_%f")}"
return store_ids_in_file(ids, file_name, is_numeric=False)
def generate_ids_from_broker(self):
sql = self.combine_sql()
with psycopg2.connect(dsn=get_broker_dsn_string()) as connection:
with connection.cursor("usaspending_data_transfer") as cursor:
cursor.execute(sql.strip("\n"))
while True:
id_list = [id[0] for id in cursor.fetchmany(size=self.chunk_size)]
if not id_list:
break
for broker_id in id_list:
yield broker_id
def combine_sql(self):
"""Create SQL used to fetch transaction ids for records marked to transfer"""
if self.options["reload_all"]:
logger.info("FULL RELOAD")
sql = self.broker_full_select_sql
optional_predicate = ""
elif self.options["datetime"]:
logger.info(f"Using datetime '{self.options["datetime"]}'")
sql = self.broker_incremental_select_sql
predicate = f"\"updated_at\" >= '{self.options["datetime"]}'"
if "where" in sql.lower():
optional_predicate = f"and {predicate}"
else:
optional_predicate = f"where {predicate}"
return sql.format(id=self.shared_pk, table=self.broker_source_table_name, optional_predicate=optional_predicate)
def copy_broker_table_data(self, source_tablename, dest_tablename, primary_key):
"""Loop through the batches of IDs and load using the ETL tables"""
destination = ETLTable(dest_tablename)
source = ETLDBLinkTable(source_tablename, settings.DATA_BROKER_DBLINK_NAME, destination.data_types)
transactions_remaining_count = self.total_ids_to_process
for id_list in read_file_for_database_ids(str(self.file_path), self.chunk_size, is_numeric=False):
with Timer(message=f"Upsert {len(id_list):,} records"):
if len(id_list) != 0:
predicate = self.extra_predicate + [{"field": primary_key, "op": "IN", "values": tuple(id_list)}]
record_count = operations.upsert_records_with_predicate(source, destination, predicate, primary_key)
else:
logger.warning("No records to load. Please check parameters and settings to confirm accuracy")
record_count = 0
if transactions_remaining_count > len(id_list):
transactions_remaining_count -= len(id_list)
else:
transactions_remaining_count = 0
self.upsert_records += record_count
logger.info(f"{self.upsert_records:,} successful upserts, {transactions_remaining_count:,} remaining.")
| import logging
import psycopg2
from datetime import datetime, timezone
from django.conf import settings
from django.core.management import call_command
from pathlib import Path
from typing import Tuple
from usaspending_api.broker.helpers.last_load_date import get_last_load_date, update_last_load_date
from usaspending_api.common.etl import ETLDBLinkTable, ETLTable, operations
from usaspending_api.common.helpers.date_helper import datetime_command_line_argument_type
from usaspending_api.common.helpers.sql_helpers import get_broker_dsn_string
from usaspending_api.common.helpers.timing_helpers import ScriptTimer as Timer
from usaspending_api.common.retrieve_file_from_uri import SCHEMA_HELP_TEXT
from usaspending_api.transactions.loader_functions import filepath_command_line_argument_type
from usaspending_api.transactions.loader_functions import read_file_for_database_ids
from usaspending_api.transactions.loader_functions import store_ids_in_file
logger = logging.getLogger("script")
class AgnosticTransactionLoader:
begining_of_time = "1970-01-01"
chunk_size = 25000
is_incremental = False
successful_run = False
upsert_records = 0
def add_arguments(self, parser):
mutually_exclusive_group = parser.add_mutually_exclusive_group(required=True)
mutually_exclusive_group.add_argument(
"--ids", nargs="+", help=f"Load/Reload transactions using this {self.shared_pk} list (space-separated)",
)
mutually_exclusive_group.add_argument(
"--date",
dest="datetime",
type=datetime_command_line_argument_type(naive=True), # Broker date/times are naive.
help="Load/Reload records from the provided datetime to the script execution start time.",
)
mutually_exclusive_group.add_argument(
"--since-last-load",
dest="incremental_date",
action="store_true",
help="Equivalent to loading from date, but date is drawn from last update date recorded in DB.",
)
mutually_exclusive_group.add_argument(
"--file",
dest="file",
type=filepath_command_line_argument_type(chunk_count=self.chunk_size),
help=(
f"Load/Reload transactions using {self.shared_pk} values stored at this file path"
f" (one ID per line) {SCHEMA_HELP_TEXT}"
),
)
mutually_exclusive_group.add_argument(
"--reload-all",
action="store_true",
help=(
f"Script will load or reload all {self.broker_source_table_name} records from broker database,"
" from all time. This does NOT clear the USASpending database first."
),
)
parser.add_argument(
"--process-deletes",
action="store_true",
help=(
"If not in local mode, process deletes before beginning the upsert operations."
" This shouldn't be used with --file or --ids parameters"
),
)
def handle(self, *args, **options):
with Timer(message="Script"):
self.run_script(*args, **options)
def run_script(self, *args, **options):
self.start_time = datetime.now(timezone.utc)
self.options = options
if self.options["incremental_date"]:
self.is_incremental = True
self.options["datetime"] = self.obtain_last_date()
if self.options["process_deletes"]:
delete_date = self.options["datetime"]
if not delete_date:
delete_date = self.begining_of_time
with Timer(message="Processing deletes"):
delete_job_status = call_command(self.delete_management_command, f"--date={delete_date}")
if delete_job_status != 0:
raise RuntimeError("Fatal error. Problem with the deletes")
try:
with Timer(message="Load Process"):
self.process()
self.successful_run = True
except (Exception, SystemExit, KeyboardInterrupt):
logger.exception("Fatal error")
finally:
self.cleanup()
def obtain_last_date(self):
dt = get_last_load_date(self.last_load_record, self.lookback_minutes)
if not dt:
raise SystemExit("No datetime stored in the database, unable to use --since-last-load")
return dt
def process(self) -> None:
with Timer(message="Compiling IDs to process"):
self.file_path, self.total_ids_to_process = self.compile_transactions_to_process()
logger.info(f"{self.total_ids_to_process:,} IDs stored")
with Timer(message="Transfering Data"):
self.copy_broker_table_data(self.broker_source_table_name, self.destination_table_name, self.shared_pk)
def cleanup(self) -> None:
"""Finalize the execution and cleanup for the next script run"""
logger.info(f"Processed {self.upsert_records:,} transction records (insert/update)")
if self.successful_run and (self.is_incremental or self.options["reload_all"]):
logger.info("Updated last run time for next incremental load")
update_last_load_date(self.last_load_record, self.start_time)
if hasattr(self, "file_path") and self.file_path.exists():
# If the script fails before the file is created, skip
# If the file still exists, remove
self.file_path.unlink()
if self.successful_run:
logger.info(f"Loading {self.destination_table_name} completed successfully")
else:
logger.info("Failed state on exit")
raise SystemExit(1)
def compile_transactions_to_process(self) -> Tuple[Path, int]:
ids = []
if self.options["file"]:
ids = self.options["file"]
logger.info("using provided IDs in file")
elif self.options["ids"]:
ids = self.options["ids"]
logger.info("using provided IDs")
else:
ids = self.generate_ids_from_broker()
file_name = f"{self.working_file_prefix}_{self.start_time.strftime('%Y%m%d_%H%M%S_%f')}"
return store_ids_in_file(ids, file_name, is_numeric=False)
def generate_ids_from_broker(self):
sql = self.combine_sql()
with psycopg2.connect(dsn=get_broker_dsn_string()) as connection:
with connection.cursor("usaspending_data_transfer") as cursor:
cursor.execute(sql.strip("\n"))
while True:
id_list = [id[0] for id in cursor.fetchmany(size=self.chunk_size)]
if not id_list:
break
for broker_id in id_list:
yield broker_id
def combine_sql(self):
"""Create SQL used to fetch transaction ids for records marked to transfer"""
if self.options["reload_all"]:
logger.info("FULL RELOAD")
sql = self.broker_full_select_sql
optional_predicate = ""
elif self.options["datetime"]:
logger.info(f"Using datetime '{self.options['datetime']}'")
sql = self.broker_incremental_select_sql
predicate = f"\"updated_at\" >= '{self.options['datetime']}'"
if "where" in sql.lower():
optional_predicate = f"and {predicate}"
else:
optional_predicate = f"where {predicate}"
return sql.format(id=self.shared_pk, table=self.broker_source_table_name, optional_predicate=optional_predicate)
def copy_broker_table_data(self, source_tablename, dest_tablename, primary_key):
"""Loop through the batches of IDs and load using the ETL tables"""
destination = ETLTable(dest_tablename)
source = ETLDBLinkTable(source_tablename, settings.DATA_BROKER_DBLINK_NAME, destination.data_types)
transactions_remaining_count = self.total_ids_to_process
for id_list in read_file_for_database_ids(str(self.file_path), self.chunk_size, is_numeric=False):
with Timer(message=f"Upsert {len(id_list):,} records"):
if len(id_list) != 0:
predicate = self.extra_predicate + [{"field": primary_key, "op": "IN", "values": tuple(id_list)}]
record_count = operations.upsert_records_with_predicate(source, destination, predicate, primary_key)
else:
logger.warning("No records to load. Please check parameters and settings to confirm accuracy")
record_count = 0
if transactions_remaining_count > len(id_list):
transactions_remaining_count -= len(id_list)
else:
transactions_remaining_count = 0
self.upsert_records += record_count
logger.info(f"{self.upsert_records:,} successful upserts, {transactions_remaining_count:,} remaining.")
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-01-06 16:12
from typing import List
from elit.common.dataset import SortingSamplerBuilder
from elit.common.transform import NormalizeToken
from elit.components.mtl.loss_balancer import MovingAverageBalancer
from elit.components.mtl.multi_task_learning import MultiTaskLearning
from elit.components.mtl.tasks.constituency import CRFConstituencyParsing
from elit.components.mtl.tasks.dep import BiaffineDependencyParsing
from elit.components.mtl.tasks.ner.biaffine_ner import BiaffineNamedEntityRecognition
from elit.components.mtl.tasks.pos import TransformerTagging
from elit.components.mtl.tasks.srl.rank_srl import SpanRankingSemanticRoleLabeling
from elit.datasets.parsing.ptb import PTB_TOKEN_MAPPING
from elit.datasets.srl.ontonotes5.english import ONTONOTES5_POS_ENGLISH_TRAIN, ONTONOTES5_POS_ENGLISH_TEST, \
ONTONOTES5_POS_ENGLISH_DEV, ONTONOTES5_ENGLISH_TRAIN, ONTONOTES5_ENGLISH_TEST, ONTONOTES5_ENGLISH_DEV, \
ONTONOTES5_CON_ENGLISH_TRAIN, ONTONOTES5_CON_ENGLISH_DEV, ONTONOTES5_CON_ENGLISH_TEST, ONTONOTES5_DEP_ENGLISH_TEST, \
ONTONOTES5_DEP_ENGLISH_DEV, ONTONOTES5_DEP_ENGLISH_TRAIN, ONTONOTES5_SRL_ENGLISH_TRAIN, ONTONOTES5_SRL_ENGLISH_DEV, \
ONTONOTES5_SRL_ENGLISH_TEST
from elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding
from elit.metrics.mtl import MetricDict
from elit.utils.log_util import cprint
from stem_cell_hypothesis import cdroot
def main():
cdroot()
scores: List[MetricDict] = []
for i in range(3):
tasks = {
'pos': TransformerTagging(
ONTONOTES5_POS_ENGLISH_TRAIN,
ONTONOTES5_POS_ENGLISH_DEV,
ONTONOTES5_POS_ENGLISH_TEST,
SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
lr=1e-3,
),
# 'ner': BiaffineNamedEntityRecognition(
# ONTONOTES5_ENGLISH_TRAIN,
# ONTONOTES5_ENGLISH_DEV,
# ONTONOTES5_ENGLISH_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# doc_level_offset=True,
# ),
'srl': SpanRankingSemanticRoleLabeling(
ONTONOTES5_SRL_ENGLISH_TRAIN,
ONTONOTES5_SRL_ENGLISH_DEV,
ONTONOTES5_SRL_ENGLISH_TEST,
SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
lr=1e-3,
doc_level_offset=True,
),
# 'dep': BiaffineDependencyParsing(
# ONTONOTES5_DEP_ENGLISH_TRAIN,
# ONTONOTES5_DEP_ENGLISH_DEV,
# ONTONOTES5_DEP_ENGLISH_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# ),
# 'con': CRFConstituencyParsing(
# ONTONOTES5_CON_ENGLISH_TRAIN,
# ONTONOTES5_CON_ENGLISH_DEV,
# ONTONOTES5_CON_ENGLISH_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# ),
}
mtl = MultiTaskLearning()
save_dir = f'data/model/mtl/ontonotes_albert_base_pos_srl_en_{i}'
cprint(f'Model will be saved in [cyan]{save_dir}[/cyan]')
mtl.fit(
ContextualWordEmbedding(
'token',
'albert-base-v2',
average_subwords=True,
max_sequence_length=512,
word_dropout=.2,
),
tasks,
save_dir,
30,
lr=1e-3,
encoder_lr=5e-5,
grad_norm=1,
gradient_accumulation=4,
eval_trn=False,
transform=NormalizeToken(PTB_TOKEN_MAPPING, 'token'),
loss_balancer=MovingAverageBalancer(5, intrinsic_weighting=True),
# prefetch=10,
# cache='data/tmp'
)
cprint(f'Model saved in [cyan]{save_dir}[/cyan]')
mtl.load(save_dir)
if 'dep' in mtl.tasks:
mtl['dep'].config.tree = True
mtl['dep'].config.proj = True
mtl.save_config(save_dir)
for k, v in mtl.tasks.items():
v.trn = tasks[k].trn
v.dev = tasks[k].dev
v.tst = tasks[k].tst
metric = mtl.evaluate(save_dir)[0]
scores.append(metric)
print(f'{'-'.join(tasks.keys())} {len(scores)} runs scores:')
for each in scores:
cprint(each.cstr())
if __name__ == '__main__':
import torch
# torch.multiprocessing.set_start_method('spawn') # See https://github.com/pytorch/pytorch/issues/40403
main()
| # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-01-06 16:12
from typing import List
from elit.common.dataset import SortingSamplerBuilder
from elit.common.transform import NormalizeToken
from elit.components.mtl.loss_balancer import MovingAverageBalancer
from elit.components.mtl.multi_task_learning import MultiTaskLearning
from elit.components.mtl.tasks.constituency import CRFConstituencyParsing
from elit.components.mtl.tasks.dep import BiaffineDependencyParsing
from elit.components.mtl.tasks.ner.biaffine_ner import BiaffineNamedEntityRecognition
from elit.components.mtl.tasks.pos import TransformerTagging
from elit.components.mtl.tasks.srl.rank_srl import SpanRankingSemanticRoleLabeling
from elit.datasets.parsing.ptb import PTB_TOKEN_MAPPING
from elit.datasets.srl.ontonotes5.english import ONTONOTES5_POS_ENGLISH_TRAIN, ONTONOTES5_POS_ENGLISH_TEST, \
ONTONOTES5_POS_ENGLISH_DEV, ONTONOTES5_ENGLISH_TRAIN, ONTONOTES5_ENGLISH_TEST, ONTONOTES5_ENGLISH_DEV, \
ONTONOTES5_CON_ENGLISH_TRAIN, ONTONOTES5_CON_ENGLISH_DEV, ONTONOTES5_CON_ENGLISH_TEST, ONTONOTES5_DEP_ENGLISH_TEST, \
ONTONOTES5_DEP_ENGLISH_DEV, ONTONOTES5_DEP_ENGLISH_TRAIN, ONTONOTES5_SRL_ENGLISH_TRAIN, ONTONOTES5_SRL_ENGLISH_DEV, \
ONTONOTES5_SRL_ENGLISH_TEST
from elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding
from elit.metrics.mtl import MetricDict
from elit.utils.log_util import cprint
from stem_cell_hypothesis import cdroot
def main():
cdroot()
scores: List[MetricDict] = []
for i in range(3):
tasks = {
'pos': TransformerTagging(
ONTONOTES5_POS_ENGLISH_TRAIN,
ONTONOTES5_POS_ENGLISH_DEV,
ONTONOTES5_POS_ENGLISH_TEST,
SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
lr=1e-3,
),
# 'ner': BiaffineNamedEntityRecognition(
# ONTONOTES5_ENGLISH_TRAIN,
# ONTONOTES5_ENGLISH_DEV,
# ONTONOTES5_ENGLISH_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# doc_level_offset=True,
# ),
'srl': SpanRankingSemanticRoleLabeling(
ONTONOTES5_SRL_ENGLISH_TRAIN,
ONTONOTES5_SRL_ENGLISH_DEV,
ONTONOTES5_SRL_ENGLISH_TEST,
SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
lr=1e-3,
doc_level_offset=True,
),
# 'dep': BiaffineDependencyParsing(
# ONTONOTES5_DEP_ENGLISH_TRAIN,
# ONTONOTES5_DEP_ENGLISH_DEV,
# ONTONOTES5_DEP_ENGLISH_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# ),
# 'con': CRFConstituencyParsing(
# ONTONOTES5_CON_ENGLISH_TRAIN,
# ONTONOTES5_CON_ENGLISH_DEV,
# ONTONOTES5_CON_ENGLISH_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# ),
}
mtl = MultiTaskLearning()
save_dir = f'data/model/mtl/ontonotes_albert_base_pos_srl_en_{i}'
cprint(f'Model will be saved in [cyan]{save_dir}[/cyan]')
mtl.fit(
ContextualWordEmbedding(
'token',
'albert-base-v2',
average_subwords=True,
max_sequence_length=512,
word_dropout=.2,
),
tasks,
save_dir,
30,
lr=1e-3,
encoder_lr=5e-5,
grad_norm=1,
gradient_accumulation=4,
eval_trn=False,
transform=NormalizeToken(PTB_TOKEN_MAPPING, 'token'),
loss_balancer=MovingAverageBalancer(5, intrinsic_weighting=True),
# prefetch=10,
# cache='data/tmp'
)
cprint(f'Model saved in [cyan]{save_dir}[/cyan]')
mtl.load(save_dir)
if 'dep' in mtl.tasks:
mtl['dep'].config.tree = True
mtl['dep'].config.proj = True
mtl.save_config(save_dir)
for k, v in mtl.tasks.items():
v.trn = tasks[k].trn
v.dev = tasks[k].dev
v.tst = tasks[k].tst
metric = mtl.evaluate(save_dir)[0]
scores.append(metric)
print(f'{"-".join(tasks.keys())} {len(scores)} runs scores:')
for each in scores:
cprint(each.cstr())
if __name__ == '__main__':
import torch
# torch.multiprocessing.set_start_method('spawn') # See https://github.com/pytorch/pytorch/issues/40403
main()
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
__all__ = ["SystemDSContext"]
import copy
import json
import os
import socket
import threading
import time
from glob import glob
from queue import Empty, Queue
from subprocess import PIPE, Popen
from threading import Thread
from time import sleep
from typing import Dict, Iterable, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from py4j.java_gateway import GatewayParameters, JavaGateway
from py4j.protocol import Py4JNetworkError
from systemds.operator import Frame, Matrix, OperationNode, Scalar, Source
from systemds.script_building import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
from systemds.utils.helpers import get_module_dir
class SystemDSContext(object):
"""A context with a connection to a java instance with which SystemDS operations are executed.
The java process is started and is running using a random tcp port for instruction parsing."""
java_gateway: JavaGateway
def __init__(self, port: int = -1):
"""Starts a new instance of SystemDSContext, in which the connection to a JVM systemds instance is handled
Any new instance of this SystemDS Context, would start a separate new JVM.
Standard out and standard error form the JVM is also handled in this class, filling up Queues,
that can be read from to get the printed statements from the JVM.
"""
command = self.__build_startup_command()
process, port = self.__try_startup(command, port)
# Handle Std out from the subprocess.
self.__stdout = Queue()
self.__stderr = Queue()
self.__stdout_thread = Thread(target=self.__enqueue_output, args=(
process.stdout, self.__stdout), daemon=True)
self.__stderr_thread = Thread(target=self.__enqueue_output, args=(
process.stderr, self.__stderr), daemon=True)
self.__stdout_thread.start()
self.__stderr_thread.start()
# Py4j connect to the started process.
gwp = GatewayParameters(port=port, eager_load=True)
self.java_gateway = JavaGateway(
gateway_parameters=gwp, java_process=process)
def get_stdout(self, lines: int = -1):
"""Getter for the stdout of the java subprocess
The output is taken from the stdout queue and returned in a new list.
:param lines: The number of lines to try to read from the stdout queue.
default -1 prints all current lines in the queue.
"""
if lines == -1 or self.__stdout.qsize() < lines:
return [self.__stdout.get() for x in range(self.__stdout.qsize())]
else:
return [self.__stdout.get() for x in range(lines)]
def get_stderr(self, lines: int = -1):
"""Getter for the stderr of the java subprocess
The output is taken from the stderr queue and returned in a new list.
:param lines: The number of lines to try to read from the stderr queue.
default -1 prints all current lines in the queue.
"""
if lines == -1 or self.__stderr.qsize() < lines:
return [self.__stderr.get() for x in range(self.__stderr.qsize())]
else:
return [self.__stderr.get() for x in range(lines)]
def exception_and_close(self, e: Exception):
"""
Method for printing exception, printing stdout and error, while also closing the context correctly.
:param e: the exception thrown
"""
# e = sys.exc_info()[0]
message = "Exception Encountered! closing JVM\n"
message += "standard out :\n" + "\n".join(self.get_stdout())
message += "standard error :\n" + "\n".join(self.get_stdout())
message += "Exception : " + str(e)
self.close()
raise RuntimeError(message)
def __try_startup(self, command, port, rep=0):
""" Try to perform startup of system.
:param command: The command to execute for starting JMLC content
:param port: The port to try to connect to to.
:param rep: The number of repeated tries to startup the jvm.
"""
if port == -1:
assignedPort = self.__get_open_port()
elif rep == 0:
assignedPort = port
else:
assignedPort = self.__get_open_port()
fullCommand = []
fullCommand.extend(command)
fullCommand.append(str(assignedPort))
process = Popen(fullCommand, stdout=PIPE, stdin=PIPE, stderr=PIPE)
try:
self.__verify_startup(process)
return process, assignedPort
except Exception as e:
self.close()
if rep > 3:
raise Exception(
"Failed to start SystemDS context with " + str(rep) + " repeated tries")
else:
rep += 1
print("Failed to startup JVM process, retrying: " + str(rep))
sleep(0.5)
return self.__try_startup(command, port, rep)
def __verify_startup(self, process):
first_stdout = process.stdout.readline()
if(not b"GatewayServer Started" in first_stdout):
stderr = process.stderr.readline().decode("utf-8")
if(len(stderr) > 1):
raise Exception(
"Exception in startup of GatewayServer: " + stderr)
outputs = []
outputs.append(first_stdout.decode("utf-8"))
max_tries = 10
for i in range(max_tries):
next_line = process.stdout.readline()
if(b"GatewayServer Started" in next_line):
print("WARNING: Stdout corrupted by prints: " + str(outputs))
print("Startup success")
break
else:
outputs.append(next_line)
if (i == max_tries-1):
raise Exception("Error in startup of systemDS gateway process: \n gateway StdOut: " + str(
outputs) + " \n gateway StdErr" + process.stderr.readline().decode("utf-8"))
def __build_startup_command(self):
command = ["java", "-cp"]
root = os.environ.get("SYSTEMDS_ROOT")
if root == None:
# If there is no systemds install default to use the PIP packaged java files.
root = os.path.join(get_module_dir(), "systemds-java")
# nt means its Windows
cp_separator = ";" if os.name == "nt" else ":"
if os.environ.get("SYSTEMDS_ROOT") != None:
lib_cp = os.path.join(root, "target", "lib", "*")
systemds_cp = os.path.join(root, "target", "SystemDS.jar")
classpath = cp_separator.join([lib_cp, systemds_cp])
command.append(classpath)
files = glob(os.path.join(root, "conf", "log4j*.properties"))
if len(files) > 1:
print(
"WARNING: Multiple logging files found selecting: " + files[0])
if len(files) == 0:
print("WARNING: No log4j file found at: "
+ os.path.join(root, "conf")
+ " therefore using default settings")
else:
command.append("-Dlog4j.configuration=file:" + files[0])
else:
lib_cp = os.path.join(root, "lib", "*")
command.append(lib_cp)
command.append("org.apache.sysds.api.PythonDMLScript")
return command
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# no errors to handle to allow continuation
return None
def close(self):
"""Close the connection to the java process and do necessary cleanup."""
if(self.__stdout_thread.is_alive()):
self.__stdout_thread.join(0)
if(self.__stdout_thread.is_alive()):
self.__stderr_thread.join(0)
pid = self.java_gateway.java_process.pid
if self.java_gateway.java_gateway_server is not None:
try:
self.java_gateway.shutdown(True)
except Py4JNetworkError as e:
if "Gateway is not connected" not in str(e):
self.java_gateway.java_process.kill()
os.kill(pid, 14)
def __enqueue_output(self, out, queue):
"""Method for handling the output from java.
It is locating the string handeling inside a different thread, since the 'out.readline' is a blocking command.
"""
for line in iter(out.readline, b""):
queue.put(line.decode("utf-8").strip())
def __get_open_port(self):
"""Get a random available port.
and hope that no other process steals it while we wait for the JVM to startup
"""
# https://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def full(self, shape: Tuple[int, int], value: Union[float, int]) -> 'Matrix':
"""Generates a matrix completely filled with a value
:param sds_context: SystemDS context
:param shape: shape (rows and cols) of the matrix TODO tensor
:param value: the value to fill all cells with
:return: the OperationNode representing this operation
"""
unnamed_input_nodes = [value]
named_input_nodes = {'rows': shape[0], 'cols': shape[1]}
return Matrix(self, 'matrix', unnamed_input_nodes, named_input_nodes)
def seq(self, start: Union[float, int], stop: Union[float, int] = None,
step: Union[float, int] = 1) -> 'Matrix':
"""Create a single column vector with values from `start` to `stop` and an increment of `step`.
If no stop is defined and only one parameter is given, then start will be 0 and the parameter will be interpreted as
stop.
:param sds_context: SystemDS context
:param start: the starting value
:param stop: the maximum value
:param step: the step size
:return: the OperationNode representing this operation
"""
if stop is None:
stop = start
start = 0
unnamed_input_nodes = [start, stop, step]
return Matrix(self, 'seq', unnamed_input_nodes)
def rand(self, rows: int, cols: int,
min: Union[float, int] = None, max: Union[float, int] = None, pdf: str = "uniform",
sparsity: Union[float, int] = None, seed: Union[float, int] = None,
lambd: Union[float, int] = 1) -> 'Matrix':
"""Generates a matrix filled with random values
:param sds_context: SystemDS context
:param rows: number of rows
:param cols: number of cols
:param min: min value for cells
:param max: max value for cells
:param pdf: "uniform"/"normal"/"poison" distribution
:param sparsity: fraction of non-zero cells
:param seed: random seed
:param lambd: lamda value for "poison" distribution
:return:
"""
available_pdfs = ["uniform", "normal", "poisson"]
if rows < 0:
raise ValueError("In rand statement, can only assign rows a long (integer) value >= 0 "
"-- attempted to assign value: {r}".format(r=rows))
if cols < 0:
raise ValueError("In rand statement, can only assign cols a long (integer) value >= 0 "
"-- attempted to assign value: {c}".format(c=cols))
if pdf not in available_pdfs:
raise ValueError("The pdf passed is invalid! given: {g}, expected: {e}".format(
g=pdf, e=available_pdfs))
pdf = '\"' + pdf + '\"'
named_input_nodes = {
'rows': rows, 'cols': cols, 'pdf': pdf, 'lambda': lambd}
if min is not None:
named_input_nodes['min'] = min
if max is not None:
named_input_nodes['max'] = max
if sparsity is not None:
named_input_nodes['sparsity'] = sparsity
if seed is not None:
named_input_nodes['seed'] = seed
return Matrix(self, 'rand', [], named_input_nodes=named_input_nodes)
def read(self, path: os.PathLike, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:
""" Read an file from disk. Supportted types include:
CSV, Matrix Market(coordinate), Text(i,j,v), SystemDS Binay
See: http://apache.github.io/systemds/site/dml-language-reference#readwrite-built-in-functions for more details
:return: an Operation Node, containing the read data.
"""
mdt_filepath = path + ".mtd"
if os.path.exists(mdt_filepath):
with open(mdt_filepath) as jspec_file:
mtd = json.load(jspec_file)
kwargs["data_type"] = mtd["data_type"]
data_type = kwargs.get("data_type", None)
file_format = kwargs.get("format", None)
if data_type == "matrix":
kwargs["data_type"] = f'"{data_type}"'
return Matrix(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
elif data_type == "frame":
kwargs["data_type"] = f'"{data_type}"'
if isinstance(file_format, str):
kwargs["format"] = f'"{kwargs['format']}"'
return Frame(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
elif data_type == "scalar":
kwargs["data_type"] = f'"{data_type}"'
output_type = OutputType.from_str(kwargs.get("value_type", None))
kwargs["value_type"] = f'"{output_type.name}"'
return Scalar(self, "read", [f'"{path}"'], named_input_nodes=kwargs, output_type=output_type)
print("WARNING: Unknown type read please add a mtd file, or specify in arguments")
return OperationNode(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
def scalar(self, v: Dict[str, VALID_INPUT_TYPES]) -> 'Scalar':
""" Construct an scalar value, this can contain str, float, double, integers and booleans.
:return: An `OperationNode` containing the scalar value.
"""
if type(v) is str:
if not ((v[0] == '"' and v[-1] == '"') or (v[0] == "'" and v[-1] == "'")):
v = f'"{v}"'
# output type assign simply assigns the given variable to the value
# therefore the output type is assign.
return Scalar(self, v, assign=True, output_type=OutputType.from_str(v))
def from_numpy(self, mat: np.array,
*args: Sequence[VALID_INPUT_TYPES],
**kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
"""Generate DAGNode representing matrix with data given by a numpy array, which will be sent to SystemDS
on need.
:param mat: the numpy array
:param args: unnamed parameters
:param kwargs: named parameters
"""
unnamed_params = ['\'./tmp/{file_name}\'']
if len(mat.shape) == 2:
named_params = {'rows': mat.shape[0], 'cols': mat.shape[1]}
elif len(mat.shape) == 1:
named_params = {'rows': mat.shape[0], 'cols': 1}
else:
# TODO Support tensors.
raise ValueError("Only two dimensional arrays supported")
unnamed_params.extend(args)
named_params.update(kwargs)
return Matrix(self, 'read', unnamed_params, named_params, local_data=mat)
def from_pandas(self, df: pd.DataFrame,
*args: Sequence[VALID_INPUT_TYPES], **kwargs: Dict[str, VALID_INPUT_TYPES]) -> Frame:
"""Generate DAGNode representing frame with data given by a pandas dataframe, which will be sent to SystemDS
on need.
:param df: the pandas dataframe
:param args: unnamed parameters
:param kwargs: named parameters
"""
unnamed_params = ["'./tmp/{file_name}'"]
if len(df.shape) == 2:
named_params = {'rows': df.shape[0], 'cols': df.shape[1]}
elif len(df.shape) == 1:
named_params = {'rows': df.shape[0], 'cols': 1}
else:
# TODO Support tensors.
raise ValueError("Only two dimensional arrays supported")
unnamed_params.extend(args)
named_params["data_type"] = '"frame"'
self._pd_dataframe = df
named_params.update(kwargs)
return Frame(self, "read", unnamed_params, named_params, local_data=df)
def federated(self, addresses: Iterable[str],
ranges: Iterable[Tuple[Iterable[int], Iterable[int]]], *args,
**kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
"""Create federated matrix object.
:param sds_context: the SystemDS context
:param addresses: addresses of the federated workers
:param ranges: for each federated worker a pair of begin and end index of their held matrix
:param args: unnamed params
:param kwargs: named params
:return: the OperationNode representing this operation
"""
addresses_str = 'list(' + \
','.join(map(lambda s: f'"{s}"', addresses)) + ')'
ranges_str = 'list('
for begin, end in ranges:
ranges_str += f'list({','.join(map(str, begin))}), list({','.join(map(str, end))}),'
ranges_str = ranges_str[:-1]
ranges_str += ')'
named_params = {'addresses': addresses_str, 'ranges': ranges_str}
named_params.update(kwargs)
return Matrix(self, 'federated', args, named_params)
def source(self, path: str, name: str, print_imported_methods: bool = False):
"""Import methods from a given dml file.
The importing is done thorugh the DML command source, and adds all defined methods from
the script to the Source object returned in python. This gives the flexibility to call the methods
directly on the object returned.
In systemds a method called func_01 can then be imported using
```python
res = self.sds.source("PATH_TO_FILE", "UNIQUE_NAME").func_01().compute(verbose = True)
```
:param path: The absolute or relative path to the file to import
:param name: The name to give the imported file in the script, this name must be unique
:param print_imported_methods: boolean specifying if the imported methods should be printed.
"""
return Source(self, path, name, print_imported_methods)
| # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
__all__ = ["SystemDSContext"]
import copy
import json
import os
import socket
import threading
import time
from glob import glob
from queue import Empty, Queue
from subprocess import PIPE, Popen
from threading import Thread
from time import sleep
from typing import Dict, Iterable, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from py4j.java_gateway import GatewayParameters, JavaGateway
from py4j.protocol import Py4JNetworkError
from systemds.operator import Frame, Matrix, OperationNode, Scalar, Source
from systemds.script_building import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
from systemds.utils.helpers import get_module_dir
class SystemDSContext(object):
"""A context with a connection to a java instance with which SystemDS operations are executed.
The java process is started and is running using a random tcp port for instruction parsing."""
java_gateway: JavaGateway
def __init__(self, port: int = -1):
"""Starts a new instance of SystemDSContext, in which the connection to a JVM systemds instance is handled
Any new instance of this SystemDS Context, would start a separate new JVM.
Standard out and standard error form the JVM is also handled in this class, filling up Queues,
that can be read from to get the printed statements from the JVM.
"""
command = self.__build_startup_command()
process, port = self.__try_startup(command, port)
# Handle Std out from the subprocess.
self.__stdout = Queue()
self.__stderr = Queue()
self.__stdout_thread = Thread(target=self.__enqueue_output, args=(
process.stdout, self.__stdout), daemon=True)
self.__stderr_thread = Thread(target=self.__enqueue_output, args=(
process.stderr, self.__stderr), daemon=True)
self.__stdout_thread.start()
self.__stderr_thread.start()
# Py4j connect to the started process.
gwp = GatewayParameters(port=port, eager_load=True)
self.java_gateway = JavaGateway(
gateway_parameters=gwp, java_process=process)
def get_stdout(self, lines: int = -1):
"""Getter for the stdout of the java subprocess
The output is taken from the stdout queue and returned in a new list.
:param lines: The number of lines to try to read from the stdout queue.
default -1 prints all current lines in the queue.
"""
if lines == -1 or self.__stdout.qsize() < lines:
return [self.__stdout.get() for x in range(self.__stdout.qsize())]
else:
return [self.__stdout.get() for x in range(lines)]
def get_stderr(self, lines: int = -1):
"""Getter for the stderr of the java subprocess
The output is taken from the stderr queue and returned in a new list.
:param lines: The number of lines to try to read from the stderr queue.
default -1 prints all current lines in the queue.
"""
if lines == -1 or self.__stderr.qsize() < lines:
return [self.__stderr.get() for x in range(self.__stderr.qsize())]
else:
return [self.__stderr.get() for x in range(lines)]
def exception_and_close(self, e: Exception):
"""
Method for printing exception, printing stdout and error, while also closing the context correctly.
:param e: the exception thrown
"""
# e = sys.exc_info()[0]
message = "Exception Encountered! closing JVM\n"
message += "standard out :\n" + "\n".join(self.get_stdout())
message += "standard error :\n" + "\n".join(self.get_stdout())
message += "Exception : " + str(e)
self.close()
raise RuntimeError(message)
def __try_startup(self, command, port, rep=0):
""" Try to perform startup of system.
:param command: The command to execute for starting JMLC content
:param port: The port to try to connect to to.
:param rep: The number of repeated tries to startup the jvm.
"""
if port == -1:
assignedPort = self.__get_open_port()
elif rep == 0:
assignedPort = port
else:
assignedPort = self.__get_open_port()
fullCommand = []
fullCommand.extend(command)
fullCommand.append(str(assignedPort))
process = Popen(fullCommand, stdout=PIPE, stdin=PIPE, stderr=PIPE)
try:
self.__verify_startup(process)
return process, assignedPort
except Exception as e:
self.close()
if rep > 3:
raise Exception(
"Failed to start SystemDS context with " + str(rep) + " repeated tries")
else:
rep += 1
print("Failed to startup JVM process, retrying: " + str(rep))
sleep(0.5)
return self.__try_startup(command, port, rep)
def __verify_startup(self, process):
first_stdout = process.stdout.readline()
if(not b"GatewayServer Started" in first_stdout):
stderr = process.stderr.readline().decode("utf-8")
if(len(stderr) > 1):
raise Exception(
"Exception in startup of GatewayServer: " + stderr)
outputs = []
outputs.append(first_stdout.decode("utf-8"))
max_tries = 10
for i in range(max_tries):
next_line = process.stdout.readline()
if(b"GatewayServer Started" in next_line):
print("WARNING: Stdout corrupted by prints: " + str(outputs))
print("Startup success")
break
else:
outputs.append(next_line)
if (i == max_tries-1):
raise Exception("Error in startup of systemDS gateway process: \n gateway StdOut: " + str(
outputs) + " \n gateway StdErr" + process.stderr.readline().decode("utf-8"))
def __build_startup_command(self):
command = ["java", "-cp"]
root = os.environ.get("SYSTEMDS_ROOT")
if root == None:
# If there is no systemds install default to use the PIP packaged java files.
root = os.path.join(get_module_dir(), "systemds-java")
# nt means its Windows
cp_separator = ";" if os.name == "nt" else ":"
if os.environ.get("SYSTEMDS_ROOT") != None:
lib_cp = os.path.join(root, "target", "lib", "*")
systemds_cp = os.path.join(root, "target", "SystemDS.jar")
classpath = cp_separator.join([lib_cp, systemds_cp])
command.append(classpath)
files = glob(os.path.join(root, "conf", "log4j*.properties"))
if len(files) > 1:
print(
"WARNING: Multiple logging files found selecting: " + files[0])
if len(files) == 0:
print("WARNING: No log4j file found at: "
+ os.path.join(root, "conf")
+ " therefore using default settings")
else:
command.append("-Dlog4j.configuration=file:" + files[0])
else:
lib_cp = os.path.join(root, "lib", "*")
command.append(lib_cp)
command.append("org.apache.sysds.api.PythonDMLScript")
return command
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# no errors to handle to allow continuation
return None
def close(self):
"""Close the connection to the java process and do necessary cleanup."""
if(self.__stdout_thread.is_alive()):
self.__stdout_thread.join(0)
if(self.__stdout_thread.is_alive()):
self.__stderr_thread.join(0)
pid = self.java_gateway.java_process.pid
if self.java_gateway.java_gateway_server is not None:
try:
self.java_gateway.shutdown(True)
except Py4JNetworkError as e:
if "Gateway is not connected" not in str(e):
self.java_gateway.java_process.kill()
os.kill(pid, 14)
def __enqueue_output(self, out, queue):
"""Method for handling the output from java.
It is locating the string handeling inside a different thread, since the 'out.readline' is a blocking command.
"""
for line in iter(out.readline, b""):
queue.put(line.decode("utf-8").strip())
def __get_open_port(self):
"""Get a random available port.
and hope that no other process steals it while we wait for the JVM to startup
"""
# https://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def full(self, shape: Tuple[int, int], value: Union[float, int]) -> 'Matrix':
"""Generates a matrix completely filled with a value
:param sds_context: SystemDS context
:param shape: shape (rows and cols) of the matrix TODO tensor
:param value: the value to fill all cells with
:return: the OperationNode representing this operation
"""
unnamed_input_nodes = [value]
named_input_nodes = {'rows': shape[0], 'cols': shape[1]}
return Matrix(self, 'matrix', unnamed_input_nodes, named_input_nodes)
def seq(self, start: Union[float, int], stop: Union[float, int] = None,
step: Union[float, int] = 1) -> 'Matrix':
"""Create a single column vector with values from `start` to `stop` and an increment of `step`.
If no stop is defined and only one parameter is given, then start will be 0 and the parameter will be interpreted as
stop.
:param sds_context: SystemDS context
:param start: the starting value
:param stop: the maximum value
:param step: the step size
:return: the OperationNode representing this operation
"""
if stop is None:
stop = start
start = 0
unnamed_input_nodes = [start, stop, step]
return Matrix(self, 'seq', unnamed_input_nodes)
def rand(self, rows: int, cols: int,
min: Union[float, int] = None, max: Union[float, int] = None, pdf: str = "uniform",
sparsity: Union[float, int] = None, seed: Union[float, int] = None,
lambd: Union[float, int] = 1) -> 'Matrix':
"""Generates a matrix filled with random values
:param sds_context: SystemDS context
:param rows: number of rows
:param cols: number of cols
:param min: min value for cells
:param max: max value for cells
:param pdf: "uniform"/"normal"/"poison" distribution
:param sparsity: fraction of non-zero cells
:param seed: random seed
:param lambd: lamda value for "poison" distribution
:return:
"""
available_pdfs = ["uniform", "normal", "poisson"]
if rows < 0:
raise ValueError("In rand statement, can only assign rows a long (integer) value >= 0 "
"-- attempted to assign value: {r}".format(r=rows))
if cols < 0:
raise ValueError("In rand statement, can only assign cols a long (integer) value >= 0 "
"-- attempted to assign value: {c}".format(c=cols))
if pdf not in available_pdfs:
raise ValueError("The pdf passed is invalid! given: {g}, expected: {e}".format(
g=pdf, e=available_pdfs))
pdf = '\"' + pdf + '\"'
named_input_nodes = {
'rows': rows, 'cols': cols, 'pdf': pdf, 'lambda': lambd}
if min is not None:
named_input_nodes['min'] = min
if max is not None:
named_input_nodes['max'] = max
if sparsity is not None:
named_input_nodes['sparsity'] = sparsity
if seed is not None:
named_input_nodes['seed'] = seed
return Matrix(self, 'rand', [], named_input_nodes=named_input_nodes)
def read(self, path: os.PathLike, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:
""" Read an file from disk. Supportted types include:
CSV, Matrix Market(coordinate), Text(i,j,v), SystemDS Binay
See: http://apache.github.io/systemds/site/dml-language-reference#readwrite-built-in-functions for more details
:return: an Operation Node, containing the read data.
"""
mdt_filepath = path + ".mtd"
if os.path.exists(mdt_filepath):
with open(mdt_filepath) as jspec_file:
mtd = json.load(jspec_file)
kwargs["data_type"] = mtd["data_type"]
data_type = kwargs.get("data_type", None)
file_format = kwargs.get("format", None)
if data_type == "matrix":
kwargs["data_type"] = f'"{data_type}"'
return Matrix(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
elif data_type == "frame":
kwargs["data_type"] = f'"{data_type}"'
if isinstance(file_format, str):
kwargs["format"] = f'"{kwargs["format"]}"'
return Frame(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
elif data_type == "scalar":
kwargs["data_type"] = f'"{data_type}"'
output_type = OutputType.from_str(kwargs.get("value_type", None))
kwargs["value_type"] = f'"{output_type.name}"'
return Scalar(self, "read", [f'"{path}"'], named_input_nodes=kwargs, output_type=output_type)
print("WARNING: Unknown type read please add a mtd file, or specify in arguments")
return OperationNode(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
def scalar(self, v: Dict[str, VALID_INPUT_TYPES]) -> 'Scalar':
""" Construct an scalar value, this can contain str, float, double, integers and booleans.
:return: An `OperationNode` containing the scalar value.
"""
if type(v) is str:
if not ((v[0] == '"' and v[-1] == '"') or (v[0] == "'" and v[-1] == "'")):
v = f'"{v}"'
# output type assign simply assigns the given variable to the value
# therefore the output type is assign.
return Scalar(self, v, assign=True, output_type=OutputType.from_str(v))
def from_numpy(self, mat: np.array,
*args: Sequence[VALID_INPUT_TYPES],
**kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
"""Generate DAGNode representing matrix with data given by a numpy array, which will be sent to SystemDS
on need.
:param mat: the numpy array
:param args: unnamed parameters
:param kwargs: named parameters
"""
unnamed_params = ['\'./tmp/{file_name}\'']
if len(mat.shape) == 2:
named_params = {'rows': mat.shape[0], 'cols': mat.shape[1]}
elif len(mat.shape) == 1:
named_params = {'rows': mat.shape[0], 'cols': 1}
else:
# TODO Support tensors.
raise ValueError("Only two dimensional arrays supported")
unnamed_params.extend(args)
named_params.update(kwargs)
return Matrix(self, 'read', unnamed_params, named_params, local_data=mat)
def from_pandas(self, df: pd.DataFrame,
*args: Sequence[VALID_INPUT_TYPES], **kwargs: Dict[str, VALID_INPUT_TYPES]) -> Frame:
"""Generate DAGNode representing frame with data given by a pandas dataframe, which will be sent to SystemDS
on need.
:param df: the pandas dataframe
:param args: unnamed parameters
:param kwargs: named parameters
"""
unnamed_params = ["'./tmp/{file_name}'"]
if len(df.shape) == 2:
named_params = {'rows': df.shape[0], 'cols': df.shape[1]}
elif len(df.shape) == 1:
named_params = {'rows': df.shape[0], 'cols': 1}
else:
# TODO Support tensors.
raise ValueError("Only two dimensional arrays supported")
unnamed_params.extend(args)
named_params["data_type"] = '"frame"'
self._pd_dataframe = df
named_params.update(kwargs)
return Frame(self, "read", unnamed_params, named_params, local_data=df)
def federated(self, addresses: Iterable[str],
ranges: Iterable[Tuple[Iterable[int], Iterable[int]]], *args,
**kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
"""Create federated matrix object.
:param sds_context: the SystemDS context
:param addresses: addresses of the federated workers
:param ranges: for each federated worker a pair of begin and end index of their held matrix
:param args: unnamed params
:param kwargs: named params
:return: the OperationNode representing this operation
"""
addresses_str = 'list(' + \
','.join(map(lambda s: f'"{s}"', addresses)) + ')'
ranges_str = 'list('
for begin, end in ranges:
ranges_str += f'list({",".join(map(str, begin))}), list({",".join(map(str, end))}),'
ranges_str = ranges_str[:-1]
ranges_str += ')'
named_params = {'addresses': addresses_str, 'ranges': ranges_str}
named_params.update(kwargs)
return Matrix(self, 'federated', args, named_params)
def source(self, path: str, name: str, print_imported_methods: bool = False):
"""Import methods from a given dml file.
The importing is done thorugh the DML command source, and adds all defined methods from
the script to the Source object returned in python. This gives the flexibility to call the methods
directly on the object returned.
In systemds a method called func_01 can then be imported using
```python
res = self.sds.source("PATH_TO_FILE", "UNIQUE_NAME").func_01().compute(verbose = True)
```
:param path: The absolute or relative path to the file to import
:param name: The name to give the imported file in the script, this name must be unique
:param print_imported_methods: boolean specifying if the imported methods should be printed.
"""
return Source(self, path, name, print_imported_methods)
|
#!/usr/bin/env python
"""
Rules for building C/API module with f2py2e.
Here is a skeleton of a new wrapper function (13Dec2001):
wrapper_function(args)
declarations
get_python_arguments, say, `a' and `b'
get_a_from_python
if (successful) {
get_b_from_python
if (successful) {
callfortran
if (succesful) {
put_a_to_python
if (succesful) {
put_b_to_python
if (succesful) {
buildvalue = ...
}
}
}
}
cleanup_b
}
cleanup_a
return buildvalue
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/08/30 08:58:42 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.129 $"[10:-1]
from . import __version__
f2py_version = __version__.version
import pprint
import sys
import time
import copy
from .auxfuncs import *
from . import capi_maps
from .capi_maps import *
from . import cfuncs
from . import common_rules
from . import use_rules
from . import f90mod_rules
from . import func2subr
errmess = sys.stderr.write
outmess = sys.stdout.write
show = pprint.pprint
options={}
sepdict={}
#for k in ['need_cfuncs']: sepdict[k]=','
for k in ['decl',
'frompyobj',
'cleanupfrompyobj',
'topyarr','method',
'pyobjfrom','closepyobjfrom',
'freemem',
'userincludes',
'includes0','includes','typedefs','typedefs_generated',
'cppmacros','cfuncs','callbacks',
'latexdoc',
'restdoc',
'routine_defs','externroutines',
'initf2pywraphooks',
'commonhooks','initcommonhooks',
'f90modhooks','initf90modhooks']:
sepdict[k]='\n'
#################### Rules for C/API module #################
module_rules={
'modulebody':"""\
/* File: #modulename#module.c
* This file is auto-generated with f2py (version:#f2py_version#).
* f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
* written by Pearu Peterson <pearu@cens.ioc.ee>.
* See http://cens.ioc.ee/projects/f2py2e/
* Generation date: """+time.asctime(time.localtime(time.time()))+"""
* $R"""+"""evision:$
* $D"""+"""ate:$
* Do not edit this file directly unless you know what you are doing!!!
*/
#ifdef __cplusplus
extern \"C\" {
#endif
"""+gentitle("See f2py2e/cfuncs.py: includes")+"""
#includes#
#includes0#
"""+gentitle("See f2py2e/rules.py: mod_rules['modulebody']")+"""
static PyObject *#modulename#_error;
static PyObject *#modulename#_module;
"""+gentitle("See f2py2e/cfuncs.py: typedefs")+"""
#typedefs#
"""+gentitle("See f2py2e/cfuncs.py: typedefs_generated")+"""
#typedefs_generated#
"""+gentitle("See f2py2e/cfuncs.py: cppmacros")+"""
#cppmacros#
"""+gentitle("See f2py2e/cfuncs.py: cfuncs")+"""
#cfuncs#
"""+gentitle("See f2py2e/cfuncs.py: userincludes")+"""
#userincludes#
"""+gentitle("See f2py2e/capi_rules.py: usercode")+"""
#usercode#
/* See f2py2e/rules.py */
#externroutines#
"""+gentitle("See f2py2e/capi_rules.py: usercode1")+"""
#usercode1#
"""+gentitle("See f2py2e/cb_rules.py: buildcallback")+"""
#callbacks#
"""+gentitle("See f2py2e/rules.py: buildapi")+"""
#body#
"""+gentitle("See f2py2e/f90mod_rules.py: buildhooks")+"""
#f90modhooks#
"""+gentitle("See f2py2e/rules.py: module_rules['modulebody']")+"""
"""+gentitle("See f2py2e/common_rules.py: buildhooks")+"""
#commonhooks#
"""+gentitle("See f2py2e/rules.py")+"""
static FortranDataDef f2py_routine_defs[] = {
#routine_defs#
\t{NULL}
};
static PyMethodDef f2py_module_methods[] = {
#pymethoddef#
\t{NULL,NULL}
};
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
\tPyModuleDef_HEAD_INIT,
\t"#modulename#",
\tNULL,
\t-1,
\tf2py_module_methods,
\tNULL,
\tNULL,
\tNULL,
\tNULL
};
#endif
#if PY_VERSION_HEX >= 0x03000000
#define RETVAL m
PyMODINIT_FUNC PyInit_#modulename#(void) {
#else
#define RETVAL
PyMODINIT_FUNC init#modulename#(void) {
#endif
\tint i;
\tPyObject *m,*d, *s;
#if PY_VERSION_HEX >= 0x03000000
\tm = #modulename#_module = PyModule_Create(&moduledef);
#else
\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods);
#endif
\tPy_TYPE(&PyFortran_Type) = &PyType_Type;
\timport_array();
\tif (PyErr_Occurred())
\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;}
\td = PyModule_GetDict(m);
\ts = PyString_FromString(\"$R"""+"""evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
#if PY_VERSION_HEX >= 0x03000000
\ts = PyUnicode_FromString(
#else
\ts = PyString_FromString(
#endif
\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
\tPyDict_SetItemString(d, \"__doc__\", s);
\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
\tPy_DECREF(s);
\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++)
\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i]));
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
#interface_usercode#
#ifdef F2PY_REPORT_ATEXIT
\tif (! PyErr_Occurred())
\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
\treturn RETVAL;
}
#ifdef __cplusplus
}
#endif
""",
'separatorsfor':{'latexdoc':'\n\n',
'restdoc':'\n\n'},
'latexdoc':['\\section{Module \\texttt{#texmodulename#}}\n',
'#modnote#\n',
'#latexdoc#'],
'restdoc':['Module #modulename#\n'+'='*80,
'\n#restdoc#']
}
defmod_rules=[
{'body':'/*eof body*/',
'method':'/*eof method*/',
'externroutines':'/*eof externroutines*/',
'routine_defs':'/*eof routine_defs*/',
'initf90modhooks':'/*eof initf90modhooks*/',
'initf2pywraphooks':'/*eof initf2pywraphooks*/',
'initcommonhooks':'/*eof initcommonhooks*/',
'latexdoc':'',
'restdoc':'',
'modnote':{hasnote:'#note#',l_not(hasnote):''},
}
]
routine_rules={
'separatorsfor':sepdict,
'body':"""
#begintitle#
static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\";
/* #declfortranroutine# */
static PyObject *#apiname#(const PyObject *capi_self,
PyObject *capi_args,
PyObject *capi_keywds,
#functype# (*f2py_func)(#callprotoargument#)) {
\tPyObject * volatile capi_buildvalue = NULL;
\tvolatile int f2py_success = 1;
#decl#
\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL};
#usercode#
#routdebugenter#
#ifdef F2PY_REPORT_ATEXIT
f2py_start_clock();
#endif
\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\
\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL;
#frompyobj#
/*end of frompyobj*/
#ifdef F2PY_REPORT_ATEXIT
f2py_start_call_clock();
#endif
#callfortranroutine#
if (PyErr_Occurred())
f2py_success = 0;
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_call_clock();
#endif
/*end of callfortranroutine*/
\t\tif (f2py_success) {
#pyobjfrom#
/*end of pyobjfrom*/
\t\tCFUNCSMESS(\"Building return value.\\n\");
\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#);
/*closepyobjfrom*/
#closepyobjfrom#
\t\t} /*if (f2py_success) after callfortranroutine*/
/*cleanupfrompyobj*/
#cleanupfrompyobj#
\tif (capi_buildvalue == NULL) {
#routdebugfailure#
\t} else {
#routdebugleave#
\t}
\tCFUNCSMESS(\"Freeing memory.\\n\");
#freemem#
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_clock();
#endif
\treturn capi_buildvalue;
}
#endtitle#
""",
'routine_defs':'#routine_def#',
'initf2pywraphooks':'#initf2pywraphook#',
'externroutines':'#declfortranroutine#',
'doc':'#docreturn##name#(#docsignature#)',
'docshort':'#docreturn##name#(#docsignatureshort#)',
'docs':'"\t#docreturn##name#(#docsignature#)\\n"\n',
'need':['arrayobject.h','CFUNCSMESS','MINMAX'],
'cppmacros':{debugcapi:'#define DEBUGCFUNCS'},
'latexdoc':['\\subsection{Wrapper function \\texttt{#texname#}}\n',
"""
\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)}
#routnote#
#latexdocstrsigns#
"""],
'restdoc':['Wrapped function ``#name#``\n'+'-'*80,
]
}
################## Rules for C/API function ##############
rout_rules=[
{ # Init
'separatorsfor': {'callfortranroutine':'\n','routdebugenter':'\n','decl':'\n',
'routdebugleave':'\n','routdebugfailure':'\n',
'setjmpbuf':' || ',
'docstrreq':'\n','docstropt':'\n','docstrout':'\n',
'docstrcbs':'\n','docstrsigns':'\\n"\n"',
'latexdocstrsigns':'\n',
'latexdocstrreq':'\n','latexdocstropt':'\n',
'latexdocstrout':'\n','latexdocstrcbs':'\n',
},
'kwlist':'','kwlistopt':'','callfortran':'','callfortranappend':'',
'docsign':'','docsignopt':'','decl':'/*decl*/',
'freemem':'/*freemem*/',
'docsignshort':'','docsignoptshort':'',
'docstrsigns':'','latexdocstrsigns':'',
'docstrreq':'\\nParameters\\n----------',
'docstropt':'\\nOther Parameters\\n----------------',
'docstrout':'\\nReturns\\n-------',
'docstrcbs':'\\nNotes\\n-----\\nCall-back functions::\\n',
'latexdocstrreq':'\\noindent Required arguments:',
'latexdocstropt':'\\noindent Optional arguments:',
'latexdocstrout':'\\noindent Return objects:',
'latexdocstrcbs':'\\noindent Call-back functions:',
'args_capi':'','keys_capi':'','functype':'',
'frompyobj':'/*frompyobj*/',
'cleanupfrompyobj':['/*end of cleanupfrompyobj*/'], #this list will be reversed
'pyobjfrom':'/*pyobjfrom*/',
'closepyobjfrom':['/*end of closepyobjfrom*/'], #this list will be reversed
'topyarr':'/*topyarr*/','routdebugleave':'/*routdebugleave*/',
'routdebugenter':'/*routdebugenter*/',
'routdebugfailure':'/*routdebugfailure*/',
'callfortranroutine':'/*callfortranroutine*/',
'argformat':'','keyformat':'','need_cfuncs':'',
'docreturn':'','return':'','returnformat':'','rformat':'',
'kwlistxa':'','keys_xa':'','xaformat':'','docsignxa':'','docsignxashort':'',
'initf2pywraphook':'',
'routnote':{hasnote:'--- #note#',l_not(hasnote):''},
},{
'apiname':'f2py_rout_#modulename#_#name#',
'pyname':'#modulename#.#name#',
'decl':'',
'_check':l_not(ismoduleroutine)
},{
'apiname':'f2py_rout_#modulename#_#f90modulename#_#name#',
'pyname':'#modulename#.#f90modulename#.#name#',
'decl':'',
'_check':ismoduleroutine
},{ # Subroutine
'functype':'void',
'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern void #fortranname#(#callprotoargument#);',
ismoduleroutine:'',
isdummyroutine:''
},
'routine_def':{l_not(l_or(ismoduleroutine,isintent_c,isdummyroutine)):'\t{\'#name#\',-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isdummyroutine):'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'need':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'F_FUNC'},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe:'\t\t\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement:'''\t\t\t\t#callstatement#;
\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'},
{isthreadsafe:'\t\t\tPy_END_ALLOW_THREADS'},
{hasexternals:"""\t\t}"""}
],
'_check':l_and(issubroutine,l_not(issubroutine_wrap)),
},{ # Wrapped function
'functype':'void',
'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine:'',
},
'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\'#name#\',-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):'''
{
extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
#if PY_VERSION_HEX >= 0x03000000
PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
#else
PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
#endif
}
'''},
'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'},
{hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'}
],
'_check':isfunction_wrap,
},{ # Wrapped subroutine
'functype':'void',
'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine:'',
},
'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\'#name#\',-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):'''
{
extern void #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
#if PY_VERSION_HEX >= 0x03000000
PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
#else
PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
#endif
}
'''},
'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'},
{hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'}
],
'_check':issubroutine_wrap,
},{ # Function
'functype':'#ctype#',
'docreturn':{l_not(isintent_hide):'#rname#,'},
'docstrout':'#pydocsignout#',
'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasresultnote:'--- #resultnote#'}],
'callfortranroutine':[{l_and(debugcapi,isstringfunction):"""\
#ifdef USESCOMPAQFORTRAN
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
#else
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
#endif
"""},
{l_and(debugcapi,l_not(isstringfunction)):"""\
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
"""}
],
'_check':l_and(isfunction,l_not(isfunction_wrap))
},{ # Scalar function
'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);',
isdummyroutine:''
},
'routine_def':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'\t{\'#name#\',-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'decl':[{iscomplexfunction_warn:'\t#ctype# #name#_return_value={0,0};',
l_not(iscomplexfunction):'\t#ctype# #name#_return_value=0;'},
{iscomplexfunction:'\tPyObject *#name#_return_value_capi = Py_None;'}
],
'callfortranroutine':[
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement:'''\t#callstatement#;
/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/
'''},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'},
{l_and(debugcapi,iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
{l_and(debugcapi,l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
'pyobjfrom':{iscomplexfunction:'\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
'need':[{l_not(isdummyroutine):'F_FUNC'},
{iscomplexfunction:'pyobj_from_#ctype#1'},
{islong_longfunction:'long_long'},
{islong_doublefunction:'long_double'}],
'returnformat':{l_not(isintent_hide):'#rformat#'},
'return':{iscomplexfunction:',#name#_return_value_capi',
l_not(l_or(iscomplexfunction,isintent_hide)):',#name#_return_value'},
'_check':l_and(isfunction,l_not(isstringfunction),l_not(isfunction_wrap))
},{ # String function # in use for --no-wrap
'declfortranroutine':'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
'routine_def':{l_not(l_or(ismoduleroutine,isintent_c)):
# '\t{\"#name#\",-1,{{-1}},0,(char *)F_FUNC(#fortranname#,#FORTRANNAME#),(void *)#apiname#,doc_#apiname#},',
'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c):
# '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(void *)#apiname#,doc_#apiname#},'
'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
},
'decl':['\t#ctype# #name#_return_value = NULL;',
'\tint #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
'\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {',
'\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
'\t\tf2py_success = 0;',
'\t} else {',
"\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';",
'\t}',
'\tif (f2py_success) {',
{hasexternals:"""\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe:'\t\tPy_BEGIN_ALLOW_THREADS'},
"""\
#ifdef USESCOMPAQFORTRAN
\t\t(*f2py_func)(#callcompaqfortran#);
#else
\t\t(*f2py_func)(#callfortran#);
#endif
""",
{isthreadsafe:'\t\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t\t}'},
{debugcapi:'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
'\t} /* if (f2py_success) after (string)malloc */',
],
'returnformat':'#rformat#',
'return':',#name#_return_value',
'freemem':'\tSTRINGFREE(#name#_return_value);',
'need':['F_FUNC','#ctype#','STRINGFREE'],
'_check':l_and(isstringfunction,l_not(isfunction_wrap)) # ???obsolete
},
{ # Debugging
'routdebugenter':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
'routdebugleave':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
'routdebugfailure':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
'_check':debugcapi
}
]
################ Rules for arguments ##################
typedef_need_dict = {islong_long:'long_long',
islong_double:'long_double',
islong_complex:'complex_long_double',
isunsigned_char:'unsigned_char',
isunsigned_short:'unsigned_short',
isunsigned:'unsigned',
isunsigned_long_long:'unsigned_long_long',
isunsigned_chararray:'unsigned_char',
isunsigned_shortarray:'unsigned_short',
isunsigned_long_longarray:'unsigned_long_long',
issigned_long_longarray:'long_long',
}
aux_rules=[
{
'separatorsfor':sepdict
},
{ # Common
'frompyobj':['\t/* Processing auxiliary variable #varname# */',
{debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */',
'need':typedef_need_dict,
},
# Scalars (not complex)
{ # Common
'decl':'\t#ctype# #varname# = 0;',
'need':{hasinitvalue:'math.h'},
'frompyobj':{hasinitvalue:'\t#varname# = #init#;'},
'_check':l_and(isscalar,l_not(iscomplex)),
},
{
'return':',#varname#',
'docstrout':'#pydocsignout#',
'docreturn':'#outvarname#,',
'returnformat':'#varrformat#',
'_check':l_and(isscalar,l_not(iscomplex),isintent_out),
},
# Complex scalars
{ # Common
'decl':'\t#ctype# #varname#;',
'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check':iscomplex
},
# String
{ # Common
'decl':['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
],
'need':['len..'],
'_check':isstring
},
# Array
{ # Common
'decl':['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
],
'need':['len..',{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}],
'_check':isarray
},
# Scalararray
{ # Common
'_check':l_and(isarray,l_not(iscomplexarray))
},{ # Not hidden
'_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide)
},
# Integer*1 array
{'need':'#ctype#',
'_check':isint1array,
'_depend':''
},
# Integer*-1 array
{'need':'#ctype#',
'_check':isunsigned_chararray,
'_depend':''
},
# Integer*-2 array
{'need':'#ctype#',
'_check':isunsigned_shortarray,
'_depend':''
},
# Integer*-8 array
{'need':'#ctype#',
'_check':isunsigned_long_longarray,
'_depend':''
},
# Complexarray
{'need':'#ctype#',
'_check':iscomplexarray,
'_depend':''
},
# Stringarray
{
'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
'need':'string',
'_check':isstringarray
}
]
arg_rules=[
{
'separatorsfor':sepdict
},
{ # Common
'frompyobj':['\t/* Processing variable #varname# */',
{debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */',
'_depend':'',
'need':typedef_need_dict,
},
# Doc signatures
{
'docstropt':{l_and(isoptional,isintent_nothide):'#pydocsign#'},
'docstrreq':{l_and(isrequired,isintent_nothide):'#pydocsign#'},
'docstrout':{isintent_out:'#pydocsignout#'},
'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote,isintent_hide):'--- #note#',
l_and(hasnote,isintent_nothide):'--- See above.'}]},
'depend':''
},
# Required/Optional arguments
{
'kwlist':'"#varname#",',
'docsign':'#varname#,',
'_check':l_and(isintent_nothide,l_not(isoptional))
},
{
'kwlistopt':'"#varname#",',
'docsignopt':'#varname#=#showinit#,',
'docsignoptshort':'#varname#,',
'_check':l_and(isintent_nothide,isoptional)
},
# Docstring/BuildValue
{
'docreturn':'#outvarname#,',
'returnformat':'#varrformat#',
'_check':isintent_out
},
# Externals (call-back functions)
{ # Common
'docsignxa':{isintent_nothide:'#varname#_extra_args=(),'},
'docsignxashort':{isintent_nothide:'#varname#_extra_args,'},
'docstropt':{isintent_nothide:'#varname#_extra_args : input tuple, optional\\n Default: ()'},
'docstrcbs':'#cbdocstr#',
'latexdocstrcbs':'\\item[] #cblatexdocstr#',
'latexdocstropt':{isintent_nothide:'\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
'decl':['\tPyObject *#varname#_capi = Py_None;',
'\tPyTupleObject *#varname#_xa_capi = NULL;',
'\tPyTupleObject *#varname#_args_capi = NULL;',
'\tint #varname#_nofargs_capi = 0;',
{l_not(isintent_callback):'\t#cbname#_typedef #varname#_cptr;'}
],
'kwlistxa':{isintent_nothide:'"#varname#_extra_args",'},
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'xaformat':{isintent_nothide:'O!'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'keys_xa':',&PyTuple_Type,&#varname#_xa_capi',
'setjmpbuf':'(setjmp(#cbname#_jmpbuf))',
'callfortran':{l_not(isintent_callback):'#varname#_cptr,'},
'need':['#cbname#','setjmp.h'],
'_check':isexternal
},
{
'frompyobj':[{l_not(isintent_callback):"""\
if(F2PyCapsule_Check(#varname#_capi)) {
#varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi);
} else {
#varname#_cptr = #cbname#;
}
"""},{isintent_callback:"""\
if (#varname#_capi==Py_None) {
#varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\");
if (#varname#_capi) {
if (#varname#_xa_capi==NULL) {
if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
if (capi_tmp)
#varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
else
#varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
if (#varname#_xa_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
return NULL;
}
}
}
}
if (#varname#_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\");
return NULL;
}
}
"""},
## {l_not(isintent_callback):"""\
## if (#varname#_capi==Py_None) {
## printf(\"hoi\\n\");
## }
## """},
"""\
\t#varname#_nofargs_capi = #cbname#_nofargs;
\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) {
\t\tjmp_buf #varname#_jmpbuf;""",
{debugcapi:["""\
\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs);
\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""",
{l_not(isintent_callback):"""\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]},
"""\
\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\");
\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject);
\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject);
\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""",
],
'cleanupfrompyobj':
"""\
\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\");
\t\t#cbname#_capi = #varname#_capi;
\t\tPy_DECREF(#cbname#_args_capi);
\t\t#cbname#_args_capi = #varname#_args_capi;
\t\t#cbname#_nofargs = #varname#_nofargs_capi;
\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf));
\t}""",
'need':['SWAP','create_cb_arglist'],
'_check':isexternal,
'_depend':''
},
# Scalars (not complex)
{ # Common
'decl':'\t#ctype# #varname# = 0;',
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
'return':{isintent_out:',#varname#'},
'_check':l_and(isscalar,l_not(iscomplex))
},{
'need':{hasinitvalue:'math.h'},
'_check':l_and(isscalar,l_not(iscomplex)),
#'_depend':''
},{ # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'pyobjfrom':{isintent_inout:"""\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\tif (f2py_success) {"""},
'closepyobjfrom':{isintent_inout:"\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'_check':l_and(isscalar,l_not(iscomplex),isintent_nothide)
},{
'frompyobj':[
# hasinitvalue...
# if pyobj is None:
# varname = init
# else
# from_pyobj(varname)
#
# isoptional and noinitvalue...
# if pyobj is not None:
# from_pyobj(varname)
# else:
# varname is uninitialized
#
# ...
# from_pyobj(varname)
#
{hasinitvalue:'\tif (#varname#_capi == Py_None) #varname# = #init#; else',
'_depend':''},
{l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)',
'_depend':''},
{l_not(islogical):'''\
\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
\tif (f2py_success) {'''},
{islogical:'''\
\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
\t\tf2py_success = 1;
\tif (f2py_success) {'''},
],
'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname#*/',
'need':{l_not(islogical):'#ctype#_from_pyobj'},
'_check':l_and(isscalar,l_not(iscomplex),isintent_nothide),
'_depend':''
# },{ # Hidden
# '_check':l_and(isscalar,l_not(iscomplex),isintent_hide)
},{ # Hidden
'frompyobj':{hasinitvalue:'\t#varname# = #init#;'},
'need':typedef_need_dict,
'_check':l_and(isscalar,l_not(iscomplex),isintent_hide),
'_depend':''
},{ # Common
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'_check':l_and(isscalar,l_not(iscomplex)),
'_depend':''
},
# Complex scalars
{ # Common
'decl':'\t#ctype# #varname#;',
'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'return':{isintent_out:',#varname#_capi'},
'_check':iscomplex
},{ # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'pyobjfrom':{isintent_inout:"""\
\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\t\tif (f2py_success) {"""},
'closepyobjfrom':{isintent_inout:"\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'_check':l_and(iscomplex,isintent_nothide)
},{
'frompyobj':[{hasinitvalue:'\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
{l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)'},
# '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\\n");'
'\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
'\n\tif (f2py_success) {'],
'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname# frompyobj*/',
'need':['#ctype#_from_pyobj'],
'_check':l_and(iscomplex,isintent_nothide),
'_depend':''
},{ # Hidden
'decl':{isintent_out:'\tPyObject *#varname#_capi = Py_None;'},
'_check':l_and(iscomplex,isintent_hide)
},{
'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check':l_and(iscomplex,isintent_hide),
'_depend':''
},{ # Common
'pyobjfrom':{isintent_out:'\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'},
'need':['pyobj_from_#ctype#1'],
'_check':iscomplex
},{
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'_check':iscomplex,
'_depend':''
},
# String
{ # Common
'decl':['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
'\tPyObject *#varname#_capi = Py_None;'],
'callfortran':'#varname#,',
'callfortranappend':'slen(#varname#),',
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
# 'freemem':'\tSTRINGFREE(#varname#);',
'return':{isintent_out:',#varname#'},
'need':['len..'],#'STRINGFREE'],
'_check':isstring
},{ # Common
'frompyobj':"""\
\tslen(#varname#) = #length#;
\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\");
\tif (f2py_success) {""",
'cleanupfrompyobj':"""\
\t\tSTRINGFREE(#varname#);
\t} /*if (f2py_success) of #varname#*/""",
'need':['#ctype#_from_pyobj','len..','STRINGFREE'],
'_check':isstring,
'_depend':''
},{ # Not hidden
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'pyobjfrom':{isintent_inout:'''\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#);
\tif (f2py_success) {'''},
'closepyobjfrom':{isintent_inout:'\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'_check':l_and(isstring,isintent_nothide)
},{ # Hidden
'_check':l_and(isstring,isintent_hide)
},{
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'_check':isstring,
'_depend':''
},
# Array
{ # Common
'decl':['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
'\tPyArrayObject *capi_#varname#_tmp = NULL;',
'\tint capi_#varname#_intent = 0;',
],
'callfortran':'#varname#,',
'return':{isintent_out:',capi_#varname#_tmp'},
'need':'len..',
'_check':isarray
},{ # intent(overwrite) array
'decl':'\tint capi_overwrite_#varname# = 1;',
'kwlistxa':'"overwrite_#varname#",',
'xaformat':'i',
'keys_xa':',&capi_overwrite_#varname#',
'docsignxa':'overwrite_#varname#=1,',
'docsignxashort':'overwrite_#varname#,',
'docstropt':'overwrite_#varname# : input int, optional\\n Default: 1',
'_check':l_and(isarray,isintent_overwrite),
},{
'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check':l_and(isarray,isintent_overwrite),
'_depend':'',
},
{ # intent(copy) array
'decl':'\tint capi_overwrite_#varname# = 0;',
'kwlistxa':'"overwrite_#varname#",',
'xaformat':'i',
'keys_xa':',&capi_overwrite_#varname#',
'docsignxa':'overwrite_#varname#=0,',
'docsignxashort':'overwrite_#varname#,',
'docstropt':'overwrite_#varname# : input int, optional\\n Default: 0',
'_check':l_and(isarray,isintent_copy),
},{
'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check':l_and(isarray,isintent_copy),
'_depend':'',
},{
'need':[{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}],
'_check':isarray,
'_depend':''
},{ # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
# 'pyobjfrom':{isintent_inout:"""\
# /* Partly because of the following hack, intent(inout) is depreciated,
# Use intent(in,out) instead.
# \tif ((#varname#_capi != Py_None) && PyArray_Check(#varname#_capi) \\
# \t\t&& (#varname#_capi != (PyObject *)capi_#varname#_tmp)) {
# \t\tif (((PyArrayObject *)#varname#_capi)->nd != capi_#varname#_tmp->nd) {
# \t\t\tif (#varname#_capi != capi_#varname#_tmp->base)
# \t\t\t\tcopy_ND_array((PyArrayObject *)capi_#varname#_tmp->base,(PyArrayObject *)#varname#_capi);
# \t\t} else
# \t\t\tcopy_ND_array(capi_#varname#_tmp,(PyArrayObject *)#varname#_capi);
# \t}
# */
# """},
# 'need':{isintent_inout:'copy_ND_array'},
'_check':l_and(isarray,isintent_nothide)
},{
'frompyobj':['\t#setdims#;',
'\tcapi_#varname#_intent |= #intent#;',
{isintent_hide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
{isintent_nothide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
\tif (capi_#varname#_tmp == NULL) {
\t\tif (!PyErr_Occurred())
\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
\t} else {
\t\t#varname# = (#ctype# *)(capi_#varname#_tmp->data);
""",
{hasinitvalue:[
{isintent_nothide:'\tif (#varname#_capi == Py_None) {'},
{isintent_hide:'\t{'},
{iscomplexarray:'\t\t#ctype# capi_c;'},
"""\
\t\tint *_i,capi_i=0;
\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
\t\tif (initforcomb(capi_#varname#_tmp->dimensions,capi_#varname#_tmp->nd,1)) {
\t\t\twhile ((_i = nextforcomb()))
\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
\t\t} else {
\t\t\tif (!PyErr_Occurred())
\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
\t\t\tf2py_success = 0;
\t\t}
\t}
\tif (f2py_success) {"""]},
],
'cleanupfrompyobj':[ # note that this list will be reversed
'\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
{l_not(l_or(isintent_out,isintent_hide)):"""\
\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
\t\tPy_XDECREF(capi_#varname#_tmp); }"""},
{l_and(isintent_hide,l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""},
{hasinitvalue:'\t} /*if (f2py_success) of #varname# init*/'},
],
'_check':isarray,
'_depend':''
},
# { # Hidden
# 'freemem':{l_not(isintent_out):'\tPy_XDECREF(capi_#varname#_tmp);'},
# '_check':l_and(isarray,isintent_hide)
# },
# Scalararray
{ # Common
'_check':l_and(isarray,l_not(iscomplexarray))
},{ # Not hidden
'_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide)
},
# Integer*1 array
{'need':'#ctype#',
'_check':isint1array,
'_depend':''
},
# Integer*-1 array
{'need':'#ctype#',
'_check':isunsigned_chararray,
'_depend':''
},
# Integer*-2 array
{'need':'#ctype#',
'_check':isunsigned_shortarray,
'_depend':''
},
# Integer*-8 array
{'need':'#ctype#',
'_check':isunsigned_long_longarray,
'_depend':''
},
# Complexarray
{'need':'#ctype#',
'_check':iscomplexarray,
'_depend':''
},
# Stringarray
{
'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
'need':'string',
'_check':isstringarray
}
]
################# Rules for checking ###############
check_rules=[
{
'frompyobj':{debugcapi:'\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
'need':'len..'
},{
'frompyobj':'\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj':'\t} /*CHECKSCALAR(#check#)*/',
'need':'CHECKSCALAR',
'_check':l_and(isscalar,l_not(iscomplex)),
'_break':''
},{
'frompyobj':'\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj':'\t} /*CHECKSTRING(#check#)*/',
'need':'CHECKSTRING',
'_check':isstring,
'_break':''
},{
'need':'CHECKARRAY',
'frompyobj':'\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj':'\t} /*CHECKARRAY(#check#)*/',
'_check':isarray,
'_break':''
},{
'need':'CHECKGENERIC',
'frompyobj':'\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj':'\t} /*CHECKGENERIC(#check#)*/',
}
]
########## Applying the rules. No need to modify what follows #############
#################### Build C/API module #######################
def buildmodule(m,um):
"""
Return
"""
global f2py_version,options
outmess('\tBuilding module "%s"...\n'%(m['name']))
ret = {}
mod_rules=defmod_rules[:]
vrd=modsign2map(m)
rd=dictappend({'f2py_version':f2py_version},vrd)
funcwrappers = []
funcwrappers2 = [] # F90 codes
for n in m['interfaced']:
nb=None
for bi in m['body']:
if not bi['block']=='interface':
errmess('buildmodule: Expected interface block. Skipping.\n')
continue
for b in bi['body']:
if b['name']==n: nb=b;break
if not nb:
errmess('buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n'%(n))
continue
nb_list = [nb]
if 'entry' in nb:
for k,a in nb['entry'].items():
nb1 = copy.deepcopy(nb)
del nb1['entry']
nb1['name'] = k
nb1['args'] = a
nb_list.append(nb1)
for nb in nb_list:
api,wrap=buildapi(nb)
if wrap:
if ismoduleroutine(nb):
funcwrappers2.append(wrap)
else:
funcwrappers.append(wrap)
ar=applyrules(api,vrd)
rd=dictappend(rd,ar)
# Construct COMMON block support
cr,wrap = common_rules.buildhooks(m)
if wrap:
funcwrappers.append(wrap)
ar=applyrules(cr,vrd)
rd=dictappend(rd,ar)
# Construct F90 module support
mr,wrap = f90mod_rules.buildhooks(m)
if wrap:
funcwrappers2.append(wrap)
ar=applyrules(mr,vrd)
rd=dictappend(rd,ar)
for u in um:
ar=use_rules.buildusevars(u,m['use'][u['name']])
rd=dictappend(rd,ar)
needs=cfuncs.get_needs()
code={}
for n in needs.keys():
code[n]=[]
for k in needs[n]:
c=''
if k in cfuncs.includes0:
c=cfuncs.includes0[k]
elif k in cfuncs.includes:
c=cfuncs.includes[k]
elif k in cfuncs.userincludes:
c=cfuncs.userincludes[k]
elif k in cfuncs.typedefs:
c=cfuncs.typedefs[k]
elif k in cfuncs.typedefs_generated:
c=cfuncs.typedefs_generated[k]
elif k in cfuncs.cppmacros:
c=cfuncs.cppmacros[k]
elif k in cfuncs.cfuncs:
c=cfuncs.cfuncs[k]
elif k in cfuncs.callbacks:
c=cfuncs.callbacks[k]
elif k in cfuncs.f90modhooks:
c=cfuncs.f90modhooks[k]
elif k in cfuncs.commonhooks:
c=cfuncs.commonhooks[k]
else:
errmess('buildmodule: unknown need %s.\n'%(repr(k)));continue
code[n].append(c)
mod_rules.append(code)
for r in mod_rules:
if ('_check' in r and r['_check'](m)) or ('_check' not in r):
ar=applyrules(r,vrd,m)
rd=dictappend(rd,ar)
ar=applyrules(module_rules,rd)
fn = os.path.join(options['buildpath'],vrd['coutput'])
ret['csrc'] = fn
f=open(fn,'w')
f.write(ar['modulebody'].replace('\t',2*' '))
f.close()
outmess('\tWrote C/API module "%s" to file "%s"\n'%(m['name'],fn))
if options['dorestdoc']:
fn = os.path.join(options['buildpath'],vrd['modulename']+'module.rest')
f=open(fn,'w')
f.write('.. -*- rest -*-\n')
f.write('\n'.join(ar['restdoc']))
f.close()
outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'],vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(options['buildpath'],vrd['modulename']+'module.tex')
ret['ltx'] = fn
f=open(fn,'w')
f.write('%% This file is auto-generated with f2py (version:%s)\n'%(f2py_version))
if 'shortlatex' not in options:
f.write('\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
f.write('\n'.join(ar['latexdoc']))
if 'shortlatex' not in options:
f.write('\\end{document}')
f.close()
outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'],vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'],vrd['f2py_wrapper_output'])
ret['fsrc'] = wn
f=open(wn,'w')
f.write('C -*- fortran -*-\n')
f.write('C This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
f.write('C It contains Fortran 77 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers)+'\n').split('\n'):
if l and l[0]==' ':
while len(l)>=66:
lines.append(l[:66]+'\n &')
l = l[66:]
lines.append(l+'\n')
else: lines.append(l+'\n')
lines = ''.join(lines).replace('\n &\n','\n')
f.write(lines)
f.close()
outmess('\tFortran 77 wrappers are saved to "%s"\n'%(wn))
if funcwrappers2:
wn = os.path.join(options['buildpath'],'%s-f2pywrappers2.f90'%(vrd['modulename']))
ret['fsrc'] = wn
f=open(wn,'w')
f.write('! -*- f90 -*-\n')
f.write('! This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
f.write('! It contains Fortran 90 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers2)+'\n').split('\n'):
if len(l)>72 and l[0]==' ':
lines.append(l[:72]+'&\n &')
l = l[72:]
while len(l)>66:
lines.append(l[:66]+'&\n &')
l = l[66:]
lines.append(l+'\n')
else: lines.append(l+'\n')
lines = ''.join(lines).replace('\n &\n','\n')
f.write(lines)
f.close()
outmess('\tFortran 90 wrappers are saved to "%s"\n'%(wn))
return ret
################## Build C/API function #############
stnd={1:'st',2:'nd',3:'rd',4:'th',5:'th',6:'th',7:'th',8:'th',9:'th',0:'th'}
def buildapi(rout):
rout,wrap = func2subr.assubr(rout)
args,depargs=getargs2(rout)
capi_maps.depargs=depargs
var=rout['vars']
auxvars = [a for a in var.keys() if isintent_aux(var[a])]
if ismoduleroutine(rout):
outmess('\t\t\tConstructing wrapper function "%s.%s"...\n'%(rout['modulename'],rout['name']))
else:
outmess('\t\tConstructing wrapper function "%s"...\n'%(rout['name']))
# Routine
vrd=routsign2map(rout)
rd=dictappend({},vrd)
for r in rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar=applyrules(r,vrd,rout)
rd=dictappend(rd,ar)
# Args
nth,nthk=0,0
savevrd={}
for a in args:
vrd=sign2map(a,var[a])
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
if not isintent_hide(var[a]):
if not isoptional(var[a]):
nth=nth+1
vrd['nth']=repr(nth)+stnd[nth%10]+' argument'
else:
nthk=nthk+1
vrd['nth']=repr(nthk)+stnd[nthk%10]+' keyword'
else: vrd['nth']='hidden'
savevrd[a]=vrd
for r in _rules:
if '_depend' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
for a in depargs:
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
vrd=savevrd[a]
for r in _rules:
if '_depend' not in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
if 'check' in var[a]:
for c in var[a]['check']:
vrd['check']=c
ar=applyrules(check_rules,vrd,var[a])
rd=dictappend(rd,ar)
if isinstance(rd['cleanupfrompyobj'], list):
rd['cleanupfrompyobj'].reverse()
if isinstance(rd['closepyobjfrom'], list):
rd['closepyobjfrom'].reverse()
rd['docsignature']=stripcomma(replace('#docsign##docsignopt##docsignxa#',
{'docsign':rd['docsign'],
'docsignopt':rd['docsignopt'],
'docsignxa':rd['docsignxa']}))
optargs=stripcomma(replace('#docsignopt##docsignxa#',
{'docsignxa':rd['docsignxashort'],
'docsignopt':rd['docsignoptshort']}
))
if optargs=='':
rd['docsignatureshort']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']}))
else:
rd['docsignatureshort']=replace('#docsign#[#docsignopt#]',
{'docsign':rd['docsign'],
'docsignopt':optargs,
})
rd['latexdocsignatureshort']=rd['docsignatureshort'].replace('_','\\_')
rd['latexdocsignatureshort']=rd['latexdocsignatureshort'].replace(',',', ')
cfs=stripcomma(replace('#callfortran##callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
if len(rd['callfortranappend'])>1:
rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
else:
rd['callcompaqfortran']=cfs
rd['callfortran']=cfs
if isinstance(rd['docreturn'], list):
rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))+' = '
rd['docstrsigns']=[]
rd['latexdocstrsigns']=[]
for k in ['docstrreq','docstropt','docstrout','docstrcbs']:
if k in rd and isinstance(rd[k], list):
rd['docstrsigns']=rd['docstrsigns']+rd[k]
k='latex'+k
if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\
['\\begin{description}']+rd[k][1:]+\
['\\end{description}']
# Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720
if rd['keyformat'] or rd['xaformat']:
argformat = rd['argformat']
if isinstance(argformat, list):
argformat.append('|')
else:
assert isinstance(argformat, str),repr((argformat, type(argformat)))
rd['argformat'] += '|'
ar=applyrules(routine_rules,rd)
if ismoduleroutine(rout):
outmess('\t\t\t %s\n'%(ar['docshort']))
else:
outmess('\t\t %s\n'%(ar['docshort']))
return ar,wrap
#################### EOF rules.py #######################
| #!/usr/bin/env python
"""
Rules for building C/API module with f2py2e.
Here is a skeleton of a new wrapper function (13Dec2001):
wrapper_function(args)
declarations
get_python_arguments, say, `a' and `b'
get_a_from_python
if (successful) {
get_b_from_python
if (successful) {
callfortran
if (succesful) {
put_a_to_python
if (succesful) {
put_b_to_python
if (succesful) {
buildvalue = ...
}
}
}
}
cleanup_b
}
cleanup_a
return buildvalue
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/08/30 08:58:42 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.129 $"[10:-1]
from . import __version__
f2py_version = __version__.version
import pprint
import sys
import time
import copy
from .auxfuncs import *
from . import capi_maps
from .capi_maps import *
from . import cfuncs
from . import common_rules
from . import use_rules
from . import f90mod_rules
from . import func2subr
errmess = sys.stderr.write
outmess = sys.stdout.write
show = pprint.pprint
options={}
sepdict={}
#for k in ['need_cfuncs']: sepdict[k]=','
for k in ['decl',
'frompyobj',
'cleanupfrompyobj',
'topyarr','method',
'pyobjfrom','closepyobjfrom',
'freemem',
'userincludes',
'includes0','includes','typedefs','typedefs_generated',
'cppmacros','cfuncs','callbacks',
'latexdoc',
'restdoc',
'routine_defs','externroutines',
'initf2pywraphooks',
'commonhooks','initcommonhooks',
'f90modhooks','initf90modhooks']:
sepdict[k]='\n'
#################### Rules for C/API module #################
module_rules={
'modulebody':"""\
/* File: #modulename#module.c
* This file is auto-generated with f2py (version:#f2py_version#).
* f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
* written by Pearu Peterson <pearu@cens.ioc.ee>.
* See http://cens.ioc.ee/projects/f2py2e/
* Generation date: """+time.asctime(time.localtime(time.time()))+"""
* $R"""+"""evision:$
* $D"""+"""ate:$
* Do not edit this file directly unless you know what you are doing!!!
*/
#ifdef __cplusplus
extern \"C\" {
#endif
"""+gentitle("See f2py2e/cfuncs.py: includes")+"""
#includes#
#includes0#
"""+gentitle("See f2py2e/rules.py: mod_rules['modulebody']")+"""
static PyObject *#modulename#_error;
static PyObject *#modulename#_module;
"""+gentitle("See f2py2e/cfuncs.py: typedefs")+"""
#typedefs#
"""+gentitle("See f2py2e/cfuncs.py: typedefs_generated")+"""
#typedefs_generated#
"""+gentitle("See f2py2e/cfuncs.py: cppmacros")+"""
#cppmacros#
"""+gentitle("See f2py2e/cfuncs.py: cfuncs")+"""
#cfuncs#
"""+gentitle("See f2py2e/cfuncs.py: userincludes")+"""
#userincludes#
"""+gentitle("See f2py2e/capi_rules.py: usercode")+"""
#usercode#
/* See f2py2e/rules.py */
#externroutines#
"""+gentitle("See f2py2e/capi_rules.py: usercode1")+"""
#usercode1#
"""+gentitle("See f2py2e/cb_rules.py: buildcallback")+"""
#callbacks#
"""+gentitle("See f2py2e/rules.py: buildapi")+"""
#body#
"""+gentitle("See f2py2e/f90mod_rules.py: buildhooks")+"""
#f90modhooks#
"""+gentitle("See f2py2e/rules.py: module_rules['modulebody']")+"""
"""+gentitle("See f2py2e/common_rules.py: buildhooks")+"""
#commonhooks#
"""+gentitle("See f2py2e/rules.py")+"""
static FortranDataDef f2py_routine_defs[] = {
#routine_defs#
\t{NULL}
};
static PyMethodDef f2py_module_methods[] = {
#pymethoddef#
\t{NULL,NULL}
};
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
\tPyModuleDef_HEAD_INIT,
\t"#modulename#",
\tNULL,
\t-1,
\tf2py_module_methods,
\tNULL,
\tNULL,
\tNULL,
\tNULL
};
#endif
#if PY_VERSION_HEX >= 0x03000000
#define RETVAL m
PyMODINIT_FUNC PyInit_#modulename#(void) {
#else
#define RETVAL
PyMODINIT_FUNC init#modulename#(void) {
#endif
\tint i;
\tPyObject *m,*d, *s;
#if PY_VERSION_HEX >= 0x03000000
\tm = #modulename#_module = PyModule_Create(&moduledef);
#else
\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods);
#endif
\tPy_TYPE(&PyFortran_Type) = &PyType_Type;
\timport_array();
\tif (PyErr_Occurred())
\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;}
\td = PyModule_GetDict(m);
\ts = PyString_FromString(\"$R"""+"""evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
#if PY_VERSION_HEX >= 0x03000000
\ts = PyUnicode_FromString(
#else
\ts = PyString_FromString(
#endif
\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
\tPyDict_SetItemString(d, \"__doc__\", s);
\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
\tPy_DECREF(s);
\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++)
\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i]));
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
#interface_usercode#
#ifdef F2PY_REPORT_ATEXIT
\tif (! PyErr_Occurred())
\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
\treturn RETVAL;
}
#ifdef __cplusplus
}
#endif
""",
'separatorsfor':{'latexdoc':'\n\n',
'restdoc':'\n\n'},
'latexdoc':['\\section{Module \\texttt{#texmodulename#}}\n',
'#modnote#\n',
'#latexdoc#'],
'restdoc':['Module #modulename#\n'+'='*80,
'\n#restdoc#']
}
defmod_rules=[
{'body':'/*eof body*/',
'method':'/*eof method*/',
'externroutines':'/*eof externroutines*/',
'routine_defs':'/*eof routine_defs*/',
'initf90modhooks':'/*eof initf90modhooks*/',
'initf2pywraphooks':'/*eof initf2pywraphooks*/',
'initcommonhooks':'/*eof initcommonhooks*/',
'latexdoc':'',
'restdoc':'',
'modnote':{hasnote:'#note#',l_not(hasnote):''},
}
]
routine_rules={
'separatorsfor':sepdict,
'body':"""
#begintitle#
static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\";
/* #declfortranroutine# */
static PyObject *#apiname#(const PyObject *capi_self,
PyObject *capi_args,
PyObject *capi_keywds,
#functype# (*f2py_func)(#callprotoargument#)) {
\tPyObject * volatile capi_buildvalue = NULL;
\tvolatile int f2py_success = 1;
#decl#
\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL};
#usercode#
#routdebugenter#
#ifdef F2PY_REPORT_ATEXIT
f2py_start_clock();
#endif
\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\
\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL;
#frompyobj#
/*end of frompyobj*/
#ifdef F2PY_REPORT_ATEXIT
f2py_start_call_clock();
#endif
#callfortranroutine#
if (PyErr_Occurred())
f2py_success = 0;
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_call_clock();
#endif
/*end of callfortranroutine*/
\t\tif (f2py_success) {
#pyobjfrom#
/*end of pyobjfrom*/
\t\tCFUNCSMESS(\"Building return value.\\n\");
\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#);
/*closepyobjfrom*/
#closepyobjfrom#
\t\t} /*if (f2py_success) after callfortranroutine*/
/*cleanupfrompyobj*/
#cleanupfrompyobj#
\tif (capi_buildvalue == NULL) {
#routdebugfailure#
\t} else {
#routdebugleave#
\t}
\tCFUNCSMESS(\"Freeing memory.\\n\");
#freemem#
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_clock();
#endif
\treturn capi_buildvalue;
}
#endtitle#
""",
'routine_defs':'#routine_def#',
'initf2pywraphooks':'#initf2pywraphook#',
'externroutines':'#declfortranroutine#',
'doc':'#docreturn##name#(#docsignature#)',
'docshort':'#docreturn##name#(#docsignatureshort#)',
'docs':'"\t#docreturn##name#(#docsignature#)\\n"\n',
'need':['arrayobject.h','CFUNCSMESS','MINMAX'],
'cppmacros':{debugcapi:'#define DEBUGCFUNCS'},
'latexdoc':['\\subsection{Wrapper function \\texttt{#texname#}}\n',
"""
\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)}
#routnote#
#latexdocstrsigns#
"""],
'restdoc':['Wrapped function ``#name#``\n'+'-'*80,
]
}
################## Rules for C/API function ##############
rout_rules=[
{ # Init
'separatorsfor': {'callfortranroutine':'\n','routdebugenter':'\n','decl':'\n',
'routdebugleave':'\n','routdebugfailure':'\n',
'setjmpbuf':' || ',
'docstrreq':'\n','docstropt':'\n','docstrout':'\n',
'docstrcbs':'\n','docstrsigns':'\\n"\n"',
'latexdocstrsigns':'\n',
'latexdocstrreq':'\n','latexdocstropt':'\n',
'latexdocstrout':'\n','latexdocstrcbs':'\n',
},
'kwlist':'','kwlistopt':'','callfortran':'','callfortranappend':'',
'docsign':'','docsignopt':'','decl':'/*decl*/',
'freemem':'/*freemem*/',
'docsignshort':'','docsignoptshort':'',
'docstrsigns':'','latexdocstrsigns':'',
'docstrreq':'\\nParameters\\n----------',
'docstropt':'\\nOther Parameters\\n----------------',
'docstrout':'\\nReturns\\n-------',
'docstrcbs':'\\nNotes\\n-----\\nCall-back functions::\\n',
'latexdocstrreq':'\\noindent Required arguments:',
'latexdocstropt':'\\noindent Optional arguments:',
'latexdocstrout':'\\noindent Return objects:',
'latexdocstrcbs':'\\noindent Call-back functions:',
'args_capi':'','keys_capi':'','functype':'',
'frompyobj':'/*frompyobj*/',
'cleanupfrompyobj':['/*end of cleanupfrompyobj*/'], #this list will be reversed
'pyobjfrom':'/*pyobjfrom*/',
'closepyobjfrom':['/*end of closepyobjfrom*/'], #this list will be reversed
'topyarr':'/*topyarr*/','routdebugleave':'/*routdebugleave*/',
'routdebugenter':'/*routdebugenter*/',
'routdebugfailure':'/*routdebugfailure*/',
'callfortranroutine':'/*callfortranroutine*/',
'argformat':'','keyformat':'','need_cfuncs':'',
'docreturn':'','return':'','returnformat':'','rformat':'',
'kwlistxa':'','keys_xa':'','xaformat':'','docsignxa':'','docsignxashort':'',
'initf2pywraphook':'',
'routnote':{hasnote:'--- #note#',l_not(hasnote):''},
},{
'apiname':'f2py_rout_#modulename#_#name#',
'pyname':'#modulename#.#name#',
'decl':'',
'_check':l_not(ismoduleroutine)
},{
'apiname':'f2py_rout_#modulename#_#f90modulename#_#name#',
'pyname':'#modulename#.#f90modulename#.#name#',
'decl':'',
'_check':ismoduleroutine
},{ # Subroutine
'functype':'void',
'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern void #fortranname#(#callprotoargument#);',
ismoduleroutine:'',
isdummyroutine:''
},
'routine_def':{l_not(l_or(ismoduleroutine,isintent_c,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isdummyroutine):'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'need':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'F_FUNC'},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe:'\t\t\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement:'''\t\t\t\t#callstatement#;
\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'},
{isthreadsafe:'\t\t\tPy_END_ALLOW_THREADS'},
{hasexternals:"""\t\t}"""}
],
'_check':l_and(issubroutine,l_not(issubroutine_wrap)),
},{ # Wrapped function
'functype':'void',
'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine:'',
},
'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):'''
{
extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
#if PY_VERSION_HEX >= 0x03000000
PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
#else
PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
#endif
}
'''},
'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'},
{hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'}
],
'_check':isfunction_wrap,
},{ # Wrapped subroutine
'functype':'void',
'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine:'',
},
'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):'''
{
extern void #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
#if PY_VERSION_HEX >= 0x03000000
PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
#else
PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
#endif
}
'''},
'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'},
{hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'}
],
'_check':issubroutine_wrap,
},{ # Function
'functype':'#ctype#',
'docreturn':{l_not(isintent_hide):'#rname#,'},
'docstrout':'#pydocsignout#',
'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasresultnote:'--- #resultnote#'}],
'callfortranroutine':[{l_and(debugcapi,isstringfunction):"""\
#ifdef USESCOMPAQFORTRAN
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
#else
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
#endif
"""},
{l_and(debugcapi,l_not(isstringfunction)):"""\
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
"""}
],
'_check':l_and(isfunction,l_not(isfunction_wrap))
},{ # Scalar function
'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);',
isdummyroutine:''
},
'routine_def':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'decl':[{iscomplexfunction_warn:'\t#ctype# #name#_return_value={0,0};',
l_not(iscomplexfunction):'\t#ctype# #name#_return_value=0;'},
{iscomplexfunction:'\tPyObject *#name#_return_value_capi = Py_None;'}
],
'callfortranroutine':[
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement:'''\t#callstatement#;
/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/
'''},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'},
{l_and(debugcapi,iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
{l_and(debugcapi,l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
'pyobjfrom':{iscomplexfunction:'\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
'need':[{l_not(isdummyroutine):'F_FUNC'},
{iscomplexfunction:'pyobj_from_#ctype#1'},
{islong_longfunction:'long_long'},
{islong_doublefunction:'long_double'}],
'returnformat':{l_not(isintent_hide):'#rformat#'},
'return':{iscomplexfunction:',#name#_return_value_capi',
l_not(l_or(iscomplexfunction,isintent_hide)):',#name#_return_value'},
'_check':l_and(isfunction,l_not(isstringfunction),l_not(isfunction_wrap))
},{ # String function # in use for --no-wrap
'declfortranroutine':'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
'routine_def':{l_not(l_or(ismoduleroutine,isintent_c)):
# '\t{\"#name#\",-1,{{-1}},0,(char *)F_FUNC(#fortranname#,#FORTRANNAME#),(void *)#apiname#,doc_#apiname#},',
'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c):
# '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(void *)#apiname#,doc_#apiname#},'
'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
},
'decl':['\t#ctype# #name#_return_value = NULL;',
'\tint #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
'\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {',
'\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
'\t\tf2py_success = 0;',
'\t} else {',
"\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';",
'\t}',
'\tif (f2py_success) {',
{hasexternals:"""\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe:'\t\tPy_BEGIN_ALLOW_THREADS'},
"""\
#ifdef USESCOMPAQFORTRAN
\t\t(*f2py_func)(#callcompaqfortran#);
#else
\t\t(*f2py_func)(#callfortran#);
#endif
""",
{isthreadsafe:'\t\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t\t}'},
{debugcapi:'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
'\t} /* if (f2py_success) after (string)malloc */',
],
'returnformat':'#rformat#',
'return':',#name#_return_value',
'freemem':'\tSTRINGFREE(#name#_return_value);',
'need':['F_FUNC','#ctype#','STRINGFREE'],
'_check':l_and(isstringfunction,l_not(isfunction_wrap)) # ???obsolete
},
{ # Debugging
'routdebugenter':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
'routdebugleave':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
'routdebugfailure':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
'_check':debugcapi
}
]
################ Rules for arguments ##################
typedef_need_dict = {islong_long:'long_long',
islong_double:'long_double',
islong_complex:'complex_long_double',
isunsigned_char:'unsigned_char',
isunsigned_short:'unsigned_short',
isunsigned:'unsigned',
isunsigned_long_long:'unsigned_long_long',
isunsigned_chararray:'unsigned_char',
isunsigned_shortarray:'unsigned_short',
isunsigned_long_longarray:'unsigned_long_long',
issigned_long_longarray:'long_long',
}
aux_rules=[
{
'separatorsfor':sepdict
},
{ # Common
'frompyobj':['\t/* Processing auxiliary variable #varname# */',
{debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */',
'need':typedef_need_dict,
},
# Scalars (not complex)
{ # Common
'decl':'\t#ctype# #varname# = 0;',
'need':{hasinitvalue:'math.h'},
'frompyobj':{hasinitvalue:'\t#varname# = #init#;'},
'_check':l_and(isscalar,l_not(iscomplex)),
},
{
'return':',#varname#',
'docstrout':'#pydocsignout#',
'docreturn':'#outvarname#,',
'returnformat':'#varrformat#',
'_check':l_and(isscalar,l_not(iscomplex),isintent_out),
},
# Complex scalars
{ # Common
'decl':'\t#ctype# #varname#;',
'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check':iscomplex
},
# String
{ # Common
'decl':['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
],
'need':['len..'],
'_check':isstring
},
# Array
{ # Common
'decl':['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
],
'need':['len..',{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}],
'_check':isarray
},
# Scalararray
{ # Common
'_check':l_and(isarray,l_not(iscomplexarray))
},{ # Not hidden
'_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide)
},
# Integer*1 array
{'need':'#ctype#',
'_check':isint1array,
'_depend':''
},
# Integer*-1 array
{'need':'#ctype#',
'_check':isunsigned_chararray,
'_depend':''
},
# Integer*-2 array
{'need':'#ctype#',
'_check':isunsigned_shortarray,
'_depend':''
},
# Integer*-8 array
{'need':'#ctype#',
'_check':isunsigned_long_longarray,
'_depend':''
},
# Complexarray
{'need':'#ctype#',
'_check':iscomplexarray,
'_depend':''
},
# Stringarray
{
'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
'need':'string',
'_check':isstringarray
}
]
arg_rules=[
{
'separatorsfor':sepdict
},
{ # Common
'frompyobj':['\t/* Processing variable #varname# */',
{debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */',
'_depend':'',
'need':typedef_need_dict,
},
# Doc signatures
{
'docstropt':{l_and(isoptional,isintent_nothide):'#pydocsign#'},
'docstrreq':{l_and(isrequired,isintent_nothide):'#pydocsign#'},
'docstrout':{isintent_out:'#pydocsignout#'},
'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote,isintent_hide):'--- #note#',
l_and(hasnote,isintent_nothide):'--- See above.'}]},
'depend':''
},
# Required/Optional arguments
{
'kwlist':'"#varname#",',
'docsign':'#varname#,',
'_check':l_and(isintent_nothide,l_not(isoptional))
},
{
'kwlistopt':'"#varname#",',
'docsignopt':'#varname#=#showinit#,',
'docsignoptshort':'#varname#,',
'_check':l_and(isintent_nothide,isoptional)
},
# Docstring/BuildValue
{
'docreturn':'#outvarname#,',
'returnformat':'#varrformat#',
'_check':isintent_out
},
# Externals (call-back functions)
{ # Common
'docsignxa':{isintent_nothide:'#varname#_extra_args=(),'},
'docsignxashort':{isintent_nothide:'#varname#_extra_args,'},
'docstropt':{isintent_nothide:'#varname#_extra_args : input tuple, optional\\n Default: ()'},
'docstrcbs':'#cbdocstr#',
'latexdocstrcbs':'\\item[] #cblatexdocstr#',
'latexdocstropt':{isintent_nothide:'\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
'decl':['\tPyObject *#varname#_capi = Py_None;',
'\tPyTupleObject *#varname#_xa_capi = NULL;',
'\tPyTupleObject *#varname#_args_capi = NULL;',
'\tint #varname#_nofargs_capi = 0;',
{l_not(isintent_callback):'\t#cbname#_typedef #varname#_cptr;'}
],
'kwlistxa':{isintent_nothide:'"#varname#_extra_args",'},
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'xaformat':{isintent_nothide:'O!'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'keys_xa':',&PyTuple_Type,&#varname#_xa_capi',
'setjmpbuf':'(setjmp(#cbname#_jmpbuf))',
'callfortran':{l_not(isintent_callback):'#varname#_cptr,'},
'need':['#cbname#','setjmp.h'],
'_check':isexternal
},
{
'frompyobj':[{l_not(isintent_callback):"""\
if(F2PyCapsule_Check(#varname#_capi)) {
#varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi);
} else {
#varname#_cptr = #cbname#;
}
"""},{isintent_callback:"""\
if (#varname#_capi==Py_None) {
#varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\");
if (#varname#_capi) {
if (#varname#_xa_capi==NULL) {
if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
if (capi_tmp)
#varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
else
#varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
if (#varname#_xa_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
return NULL;
}
}
}
}
if (#varname#_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\");
return NULL;
}
}
"""},
## {l_not(isintent_callback):"""\
## if (#varname#_capi==Py_None) {
## printf(\"hoi\\n\");
## }
## """},
"""\
\t#varname#_nofargs_capi = #cbname#_nofargs;
\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) {
\t\tjmp_buf #varname#_jmpbuf;""",
{debugcapi:["""\
\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs);
\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""",
{l_not(isintent_callback):"""\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]},
"""\
\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\");
\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject);
\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject);
\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""",
],
'cleanupfrompyobj':
"""\
\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\");
\t\t#cbname#_capi = #varname#_capi;
\t\tPy_DECREF(#cbname#_args_capi);
\t\t#cbname#_args_capi = #varname#_args_capi;
\t\t#cbname#_nofargs = #varname#_nofargs_capi;
\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf));
\t}""",
'need':['SWAP','create_cb_arglist'],
'_check':isexternal,
'_depend':''
},
# Scalars (not complex)
{ # Common
'decl':'\t#ctype# #varname# = 0;',
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
'return':{isintent_out:',#varname#'},
'_check':l_and(isscalar,l_not(iscomplex))
},{
'need':{hasinitvalue:'math.h'},
'_check':l_and(isscalar,l_not(iscomplex)),
#'_depend':''
},{ # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'pyobjfrom':{isintent_inout:"""\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\tif (f2py_success) {"""},
'closepyobjfrom':{isintent_inout:"\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'_check':l_and(isscalar,l_not(iscomplex),isintent_nothide)
},{
'frompyobj':[
# hasinitvalue...
# if pyobj is None:
# varname = init
# else
# from_pyobj(varname)
#
# isoptional and noinitvalue...
# if pyobj is not None:
# from_pyobj(varname)
# else:
# varname is uninitialized
#
# ...
# from_pyobj(varname)
#
{hasinitvalue:'\tif (#varname#_capi == Py_None) #varname# = #init#; else',
'_depend':''},
{l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)',
'_depend':''},
{l_not(islogical):'''\
\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
\tif (f2py_success) {'''},
{islogical:'''\
\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
\t\tf2py_success = 1;
\tif (f2py_success) {'''},
],
'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname#*/',
'need':{l_not(islogical):'#ctype#_from_pyobj'},
'_check':l_and(isscalar,l_not(iscomplex),isintent_nothide),
'_depend':''
# },{ # Hidden
# '_check':l_and(isscalar,l_not(iscomplex),isintent_hide)
},{ # Hidden
'frompyobj':{hasinitvalue:'\t#varname# = #init#;'},
'need':typedef_need_dict,
'_check':l_and(isscalar,l_not(iscomplex),isintent_hide),
'_depend':''
},{ # Common
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'_check':l_and(isscalar,l_not(iscomplex)),
'_depend':''
},
# Complex scalars
{ # Common
'decl':'\t#ctype# #varname#;',
'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'return':{isintent_out:',#varname#_capi'},
'_check':iscomplex
},{ # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'pyobjfrom':{isintent_inout:"""\
\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\t\tif (f2py_success) {"""},
'closepyobjfrom':{isintent_inout:"\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'_check':l_and(iscomplex,isintent_nothide)
},{
'frompyobj':[{hasinitvalue:'\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
{l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)'},
# '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\\n");'
'\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
'\n\tif (f2py_success) {'],
'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname# frompyobj*/',
'need':['#ctype#_from_pyobj'],
'_check':l_and(iscomplex,isintent_nothide),
'_depend':''
},{ # Hidden
'decl':{isintent_out:'\tPyObject *#varname#_capi = Py_None;'},
'_check':l_and(iscomplex,isintent_hide)
},{
'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check':l_and(iscomplex,isintent_hide),
'_depend':''
},{ # Common
'pyobjfrom':{isintent_out:'\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'},
'need':['pyobj_from_#ctype#1'],
'_check':iscomplex
},{
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'_check':iscomplex,
'_depend':''
},
# String
{ # Common
'decl':['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
'\tPyObject *#varname#_capi = Py_None;'],
'callfortran':'#varname#,',
'callfortranappend':'slen(#varname#),',
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
# 'freemem':'\tSTRINGFREE(#varname#);',
'return':{isintent_out:',#varname#'},
'need':['len..'],#'STRINGFREE'],
'_check':isstring
},{ # Common
'frompyobj':"""\
\tslen(#varname#) = #length#;
\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\");
\tif (f2py_success) {""",
'cleanupfrompyobj':"""\
\t\tSTRINGFREE(#varname#);
\t} /*if (f2py_success) of #varname#*/""",
'need':['#ctype#_from_pyobj','len..','STRINGFREE'],
'_check':isstring,
'_depend':''
},{ # Not hidden
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'pyobjfrom':{isintent_inout:'''\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#);
\tif (f2py_success) {'''},
'closepyobjfrom':{isintent_inout:'\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'_check':l_and(isstring,isintent_nothide)
},{ # Hidden
'_check':l_and(isstring,isintent_hide)
},{
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'_check':isstring,
'_depend':''
},
# Array
{ # Common
'decl':['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
'\tPyArrayObject *capi_#varname#_tmp = NULL;',
'\tint capi_#varname#_intent = 0;',
],
'callfortran':'#varname#,',
'return':{isintent_out:',capi_#varname#_tmp'},
'need':'len..',
'_check':isarray
},{ # intent(overwrite) array
'decl':'\tint capi_overwrite_#varname# = 1;',
'kwlistxa':'"overwrite_#varname#",',
'xaformat':'i',
'keys_xa':',&capi_overwrite_#varname#',
'docsignxa':'overwrite_#varname#=1,',
'docsignxashort':'overwrite_#varname#,',
'docstropt':'overwrite_#varname# : input int, optional\\n Default: 1',
'_check':l_and(isarray,isintent_overwrite),
},{
'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check':l_and(isarray,isintent_overwrite),
'_depend':'',
},
{ # intent(copy) array
'decl':'\tint capi_overwrite_#varname# = 0;',
'kwlistxa':'"overwrite_#varname#",',
'xaformat':'i',
'keys_xa':',&capi_overwrite_#varname#',
'docsignxa':'overwrite_#varname#=0,',
'docsignxashort':'overwrite_#varname#,',
'docstropt':'overwrite_#varname# : input int, optional\\n Default: 0',
'_check':l_and(isarray,isintent_copy),
},{
'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check':l_and(isarray,isintent_copy),
'_depend':'',
},{
'need':[{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}],
'_check':isarray,
'_depend':''
},{ # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
# 'pyobjfrom':{isintent_inout:"""\
# /* Partly because of the following hack, intent(inout) is depreciated,
# Use intent(in,out) instead.
# \tif ((#varname#_capi != Py_None) && PyArray_Check(#varname#_capi) \\
# \t\t&& (#varname#_capi != (PyObject *)capi_#varname#_tmp)) {
# \t\tif (((PyArrayObject *)#varname#_capi)->nd != capi_#varname#_tmp->nd) {
# \t\t\tif (#varname#_capi != capi_#varname#_tmp->base)
# \t\t\t\tcopy_ND_array((PyArrayObject *)capi_#varname#_tmp->base,(PyArrayObject *)#varname#_capi);
# \t\t} else
# \t\t\tcopy_ND_array(capi_#varname#_tmp,(PyArrayObject *)#varname#_capi);
# \t}
# */
# """},
# 'need':{isintent_inout:'copy_ND_array'},
'_check':l_and(isarray,isintent_nothide)
},{
'frompyobj':['\t#setdims#;',
'\tcapi_#varname#_intent |= #intent#;',
{isintent_hide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
{isintent_nothide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
\tif (capi_#varname#_tmp == NULL) {
\t\tif (!PyErr_Occurred())
\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
\t} else {
\t\t#varname# = (#ctype# *)(capi_#varname#_tmp->data);
""",
{hasinitvalue:[
{isintent_nothide:'\tif (#varname#_capi == Py_None) {'},
{isintent_hide:'\t{'},
{iscomplexarray:'\t\t#ctype# capi_c;'},
"""\
\t\tint *_i,capi_i=0;
\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
\t\tif (initforcomb(capi_#varname#_tmp->dimensions,capi_#varname#_tmp->nd,1)) {
\t\t\twhile ((_i = nextforcomb()))
\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
\t\t} else {
\t\t\tif (!PyErr_Occurred())
\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
\t\t\tf2py_success = 0;
\t\t}
\t}
\tif (f2py_success) {"""]},
],
'cleanupfrompyobj':[ # note that this list will be reversed
'\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
{l_not(l_or(isintent_out,isintent_hide)):"""\
\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
\t\tPy_XDECREF(capi_#varname#_tmp); }"""},
{l_and(isintent_hide,l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""},
{hasinitvalue:'\t} /*if (f2py_success) of #varname# init*/'},
],
'_check':isarray,
'_depend':''
},
# { # Hidden
# 'freemem':{l_not(isintent_out):'\tPy_XDECREF(capi_#varname#_tmp);'},
# '_check':l_and(isarray,isintent_hide)
# },
# Scalararray
{ # Common
'_check':l_and(isarray,l_not(iscomplexarray))
},{ # Not hidden
'_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide)
},
# Integer*1 array
{'need':'#ctype#',
'_check':isint1array,
'_depend':''
},
# Integer*-1 array
{'need':'#ctype#',
'_check':isunsigned_chararray,
'_depend':''
},
# Integer*-2 array
{'need':'#ctype#',
'_check':isunsigned_shortarray,
'_depend':''
},
# Integer*-8 array
{'need':'#ctype#',
'_check':isunsigned_long_longarray,
'_depend':''
},
# Complexarray
{'need':'#ctype#',
'_check':iscomplexarray,
'_depend':''
},
# Stringarray
{
'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
'need':'string',
'_check':isstringarray
}
]
################# Rules for checking ###############
check_rules=[
{
'frompyobj':{debugcapi:'\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
'need':'len..'
},{
'frompyobj':'\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj':'\t} /*CHECKSCALAR(#check#)*/',
'need':'CHECKSCALAR',
'_check':l_and(isscalar,l_not(iscomplex)),
'_break':''
},{
'frompyobj':'\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj':'\t} /*CHECKSTRING(#check#)*/',
'need':'CHECKSTRING',
'_check':isstring,
'_break':''
},{
'need':'CHECKARRAY',
'frompyobj':'\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj':'\t} /*CHECKARRAY(#check#)*/',
'_check':isarray,
'_break':''
},{
'need':'CHECKGENERIC',
'frompyobj':'\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj':'\t} /*CHECKGENERIC(#check#)*/',
}
]
########## Applying the rules. No need to modify what follows #############
#################### Build C/API module #######################
def buildmodule(m,um):
"""
Return
"""
global f2py_version,options
outmess('\tBuilding module "%s"...\n'%(m['name']))
ret = {}
mod_rules=defmod_rules[:]
vrd=modsign2map(m)
rd=dictappend({'f2py_version':f2py_version},vrd)
funcwrappers = []
funcwrappers2 = [] # F90 codes
for n in m['interfaced']:
nb=None
for bi in m['body']:
if not bi['block']=='interface':
errmess('buildmodule: Expected interface block. Skipping.\n')
continue
for b in bi['body']:
if b['name']==n: nb=b;break
if not nb:
errmess('buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n'%(n))
continue
nb_list = [nb]
if 'entry' in nb:
for k,a in nb['entry'].items():
nb1 = copy.deepcopy(nb)
del nb1['entry']
nb1['name'] = k
nb1['args'] = a
nb_list.append(nb1)
for nb in nb_list:
api,wrap=buildapi(nb)
if wrap:
if ismoduleroutine(nb):
funcwrappers2.append(wrap)
else:
funcwrappers.append(wrap)
ar=applyrules(api,vrd)
rd=dictappend(rd,ar)
# Construct COMMON block support
cr,wrap = common_rules.buildhooks(m)
if wrap:
funcwrappers.append(wrap)
ar=applyrules(cr,vrd)
rd=dictappend(rd,ar)
# Construct F90 module support
mr,wrap = f90mod_rules.buildhooks(m)
if wrap:
funcwrappers2.append(wrap)
ar=applyrules(mr,vrd)
rd=dictappend(rd,ar)
for u in um:
ar=use_rules.buildusevars(u,m['use'][u['name']])
rd=dictappend(rd,ar)
needs=cfuncs.get_needs()
code={}
for n in needs.keys():
code[n]=[]
for k in needs[n]:
c=''
if k in cfuncs.includes0:
c=cfuncs.includes0[k]
elif k in cfuncs.includes:
c=cfuncs.includes[k]
elif k in cfuncs.userincludes:
c=cfuncs.userincludes[k]
elif k in cfuncs.typedefs:
c=cfuncs.typedefs[k]
elif k in cfuncs.typedefs_generated:
c=cfuncs.typedefs_generated[k]
elif k in cfuncs.cppmacros:
c=cfuncs.cppmacros[k]
elif k in cfuncs.cfuncs:
c=cfuncs.cfuncs[k]
elif k in cfuncs.callbacks:
c=cfuncs.callbacks[k]
elif k in cfuncs.f90modhooks:
c=cfuncs.f90modhooks[k]
elif k in cfuncs.commonhooks:
c=cfuncs.commonhooks[k]
else:
errmess('buildmodule: unknown need %s.\n'%(repr(k)));continue
code[n].append(c)
mod_rules.append(code)
for r in mod_rules:
if ('_check' in r and r['_check'](m)) or ('_check' not in r):
ar=applyrules(r,vrd,m)
rd=dictappend(rd,ar)
ar=applyrules(module_rules,rd)
fn = os.path.join(options['buildpath'],vrd['coutput'])
ret['csrc'] = fn
f=open(fn,'w')
f.write(ar['modulebody'].replace('\t',2*' '))
f.close()
outmess('\tWrote C/API module "%s" to file "%s"\n'%(m['name'],fn))
if options['dorestdoc']:
fn = os.path.join(options['buildpath'],vrd['modulename']+'module.rest')
f=open(fn,'w')
f.write('.. -*- rest -*-\n')
f.write('\n'.join(ar['restdoc']))
f.close()
outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'],vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(options['buildpath'],vrd['modulename']+'module.tex')
ret['ltx'] = fn
f=open(fn,'w')
f.write('%% This file is auto-generated with f2py (version:%s)\n'%(f2py_version))
if 'shortlatex' not in options:
f.write('\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
f.write('\n'.join(ar['latexdoc']))
if 'shortlatex' not in options:
f.write('\\end{document}')
f.close()
outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'],vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'],vrd['f2py_wrapper_output'])
ret['fsrc'] = wn
f=open(wn,'w')
f.write('C -*- fortran -*-\n')
f.write('C This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
f.write('C It contains Fortran 77 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers)+'\n').split('\n'):
if l and l[0]==' ':
while len(l)>=66:
lines.append(l[:66]+'\n &')
l = l[66:]
lines.append(l+'\n')
else: lines.append(l+'\n')
lines = ''.join(lines).replace('\n &\n','\n')
f.write(lines)
f.close()
outmess('\tFortran 77 wrappers are saved to "%s"\n'%(wn))
if funcwrappers2:
wn = os.path.join(options['buildpath'],'%s-f2pywrappers2.f90'%(vrd['modulename']))
ret['fsrc'] = wn
f=open(wn,'w')
f.write('! -*- f90 -*-\n')
f.write('! This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
f.write('! It contains Fortran 90 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers2)+'\n').split('\n'):
if len(l)>72 and l[0]==' ':
lines.append(l[:72]+'&\n &')
l = l[72:]
while len(l)>66:
lines.append(l[:66]+'&\n &')
l = l[66:]
lines.append(l+'\n')
else: lines.append(l+'\n')
lines = ''.join(lines).replace('\n &\n','\n')
f.write(lines)
f.close()
outmess('\tFortran 90 wrappers are saved to "%s"\n'%(wn))
return ret
################## Build C/API function #############
stnd={1:'st',2:'nd',3:'rd',4:'th',5:'th',6:'th',7:'th',8:'th',9:'th',0:'th'}
def buildapi(rout):
rout,wrap = func2subr.assubr(rout)
args,depargs=getargs2(rout)
capi_maps.depargs=depargs
var=rout['vars']
auxvars = [a for a in var.keys() if isintent_aux(var[a])]
if ismoduleroutine(rout):
outmess('\t\t\tConstructing wrapper function "%s.%s"...\n'%(rout['modulename'],rout['name']))
else:
outmess('\t\tConstructing wrapper function "%s"...\n'%(rout['name']))
# Routine
vrd=routsign2map(rout)
rd=dictappend({},vrd)
for r in rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar=applyrules(r,vrd,rout)
rd=dictappend(rd,ar)
# Args
nth,nthk=0,0
savevrd={}
for a in args:
vrd=sign2map(a,var[a])
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
if not isintent_hide(var[a]):
if not isoptional(var[a]):
nth=nth+1
vrd['nth']=repr(nth)+stnd[nth%10]+' argument'
else:
nthk=nthk+1
vrd['nth']=repr(nthk)+stnd[nthk%10]+' keyword'
else: vrd['nth']='hidden'
savevrd[a]=vrd
for r in _rules:
if '_depend' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
for a in depargs:
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
vrd=savevrd[a]
for r in _rules:
if '_depend' not in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
if 'check' in var[a]:
for c in var[a]['check']:
vrd['check']=c
ar=applyrules(check_rules,vrd,var[a])
rd=dictappend(rd,ar)
if isinstance(rd['cleanupfrompyobj'], list):
rd['cleanupfrompyobj'].reverse()
if isinstance(rd['closepyobjfrom'], list):
rd['closepyobjfrom'].reverse()
rd['docsignature']=stripcomma(replace('#docsign##docsignopt##docsignxa#',
{'docsign':rd['docsign'],
'docsignopt':rd['docsignopt'],
'docsignxa':rd['docsignxa']}))
optargs=stripcomma(replace('#docsignopt##docsignxa#',
{'docsignxa':rd['docsignxashort'],
'docsignopt':rd['docsignoptshort']}
))
if optargs=='':
rd['docsignatureshort']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']}))
else:
rd['docsignatureshort']=replace('#docsign#[#docsignopt#]',
{'docsign':rd['docsign'],
'docsignopt':optargs,
})
rd['latexdocsignatureshort']=rd['docsignatureshort'].replace('_','\\_')
rd['latexdocsignatureshort']=rd['latexdocsignatureshort'].replace(',',', ')
cfs=stripcomma(replace('#callfortran##callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
if len(rd['callfortranappend'])>1:
rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
else:
rd['callcompaqfortran']=cfs
rd['callfortran']=cfs
if isinstance(rd['docreturn'], list):
rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))+' = '
rd['docstrsigns']=[]
rd['latexdocstrsigns']=[]
for k in ['docstrreq','docstropt','docstrout','docstrcbs']:
if k in rd and isinstance(rd[k], list):
rd['docstrsigns']=rd['docstrsigns']+rd[k]
k='latex'+k
if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\
['\\begin{description}']+rd[k][1:]+\
['\\end{description}']
# Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720
if rd['keyformat'] or rd['xaformat']:
argformat = rd['argformat']
if isinstance(argformat, list):
argformat.append('|')
else:
assert isinstance(argformat, str),repr((argformat, type(argformat)))
rd['argformat'] += '|'
ar=applyrules(routine_rules,rd)
if ismoduleroutine(rout):
outmess('\t\t\t %s\n'%(ar['docshort']))
else:
outmess('\t\t %s\n'%(ar['docshort']))
return ar,wrap
#################### EOF rules.py #######################
|
import json
import logging
import typing as t
import aiohttp
import discord.ext.commands as commands
import discord.utils as utils
import bot.bot_secrets as bot_secrets
import bot.extensions as ext
log = logging.getLogger(__name__)
HEADERS = {
'Content-type': 'application/json',
'Accept': 'application/json'
}
MAX_CONTENT_LENGTH = 1900
MAX_LINE_LENGTH = 15
EVAL_COMMAND_COOLDOWN = 2
class EvalCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@ext.command(aliases=['e'])
@commands.cooldown(1, EVAL_COMMAND_COOLDOWN, commands.BucketType.guild)
@ext.long_help(
'Allows for the evaluations of arbitrary python code directly in discord'
'Supports all internal standard libraries like json or re'
)
@ext.short_help('Runs arbitrary python code in discord')
@ext.example('eval print("hello world")')
async def eval(self, ctx, *, code) -> None:
code = code.replace('```python', '')
code = code.replace('```py', '')
code = code.replace('`', '')
code = utils.escape_mentions(code)
feedback_mes = await ctx.send('Code execution started')
log.info('Code: {code} sent for evaluation by author: {author} in guild: {guild}',
code=code,
author=ctx.author.id,
guild=ctx.guild.id)
output = await self._post_eval(code)
stdout = output['stdout']
stdout = stdout.strip('`')
stdout = utils.escape_mentions(stdout)
await feedback_mes.delete()
if len(stdout) > MAX_CONTENT_LENGTH:
await ctx.send(f'{ctx.author.mention} Attempted output length exceeds 2000 characters, Please try again')
return
result_emoji = ':white_check_mark:' if output['returncode'] == 0 else ':warning:'
out = f'{ctx.author.mention} {result_emoji} Eval Completed with response code: {output['returncode']}'
if stdout:
await ctx.send(f'{out}\n\n```{self._format(stdout)}```')
else:
await ctx.send(f'{out}\n\n```[No Output]```')
def _format(self, resp):
lines = [f'{(i + 1):03d} | {line}' for i, line in enumerate(resp.split('\n')) if line]
if len(lines) > MAX_LINE_LENGTH:
lines = lines[:MAX_LINE_LENGTH]
lines.append('... Output line limit exceeded, data truncated')
return '\n'.join(lines)
async def _post_eval(self, code) -> t.Union[str, None]:
data = {
"input": code
}
json_data = json.dumps(data)
async with aiohttp.ClientSession() as s:
async with s.post(bot_secrets.secrets.repl_url,
data=json_data,
headers=HEADERS) as resp:
if resp.status == 200:
return json.loads(await resp.text())
def setup(bot):
bot.add_cog(EvalCog(bot))
| import json
import logging
import typing as t
import aiohttp
import discord.ext.commands as commands
import discord.utils as utils
import bot.bot_secrets as bot_secrets
import bot.extensions as ext
log = logging.getLogger(__name__)
HEADERS = {
'Content-type': 'application/json',
'Accept': 'application/json'
}
MAX_CONTENT_LENGTH = 1900
MAX_LINE_LENGTH = 15
EVAL_COMMAND_COOLDOWN = 2
class EvalCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@ext.command(aliases=['e'])
@commands.cooldown(1, EVAL_COMMAND_COOLDOWN, commands.BucketType.guild)
@ext.long_help(
'Allows for the evaluations of arbitrary python code directly in discord'
'Supports all internal standard libraries like json or re'
)
@ext.short_help('Runs arbitrary python code in discord')
@ext.example('eval print("hello world")')
async def eval(self, ctx, *, code) -> None:
code = code.replace('```python', '')
code = code.replace('```py', '')
code = code.replace('`', '')
code = utils.escape_mentions(code)
feedback_mes = await ctx.send('Code execution started')
log.info('Code: {code} sent for evaluation by author: {author} in guild: {guild}',
code=code,
author=ctx.author.id,
guild=ctx.guild.id)
output = await self._post_eval(code)
stdout = output['stdout']
stdout = stdout.strip('`')
stdout = utils.escape_mentions(stdout)
await feedback_mes.delete()
if len(stdout) > MAX_CONTENT_LENGTH:
await ctx.send(f'{ctx.author.mention} Attempted output length exceeds 2000 characters, Please try again')
return
result_emoji = ':white_check_mark:' if output['returncode'] == 0 else ':warning:'
out = f'{ctx.author.mention} {result_emoji} Eval Completed with response code: {output["returncode"]}'
if stdout:
await ctx.send(f'{out}\n\n```{self._format(stdout)}```')
else:
await ctx.send(f'{out}\n\n```[No Output]```')
def _format(self, resp):
lines = [f'{(i + 1):03d} | {line}' for i, line in enumerate(resp.split('\n')) if line]
if len(lines) > MAX_LINE_LENGTH:
lines = lines[:MAX_LINE_LENGTH]
lines.append('... Output line limit exceeded, data truncated')
return '\n'.join(lines)
async def _post_eval(self, code) -> t.Union[str, None]:
data = {
"input": code
}
json_data = json.dumps(data)
async with aiohttp.ClientSession() as s:
async with s.post(bot_secrets.secrets.repl_url,
data=json_data,
headers=HEADERS) as resp:
if resp.status == 200:
return json.loads(await resp.text())
def setup(bot):
bot.add_cog(EvalCog(bot))
|
# Copyright 2020-2021 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ETOS Environment Provider celery task module."""
import os
import uuid
import logging
import traceback
import json
from threading import Lock
from copy import deepcopy
from etos_lib.etos import ETOS
from etos_lib.logging.logger import FORMAT_CONFIG
from jsontas.jsontas import JsonTas
from environment_provider.splitter.split import Splitter
from .lib.celery import APP
from .lib.config import Config
from .lib.test_suite import TestSuite
from .lib.registry import ProviderRegistry
from .lib.json_dumps import JsonDumps
from .lib.uuid_generate import UuidGenerate
from .lib.join import Join
logging.getLogger("pika").setLevel(logging.WARNING)
class NoEventDataFound(Exception):
"""Could not fetch events from event storage."""
class EnvironmentProviderNotConfigured(Exception):
"""Environment provider was not configured prior to request."""
class EnvironmentProvider: # pylint:disable=too-many-instance-attributes
"""Environment provider celery Task."""
logger = logging.getLogger("EnvironmentProvider")
environment_provider_config = None
iut_provider = None
log_area_provider = None
execution_space_provider = None
task_track_started = True
lock = Lock()
def __init__(self, suite_id):
"""Initialize ETOS, dataset, provider registry and splitter.
:param suite_id: Suite ID to get an environment for
:type suite_id: str
"""
self.suite_id = suite_id
FORMAT_CONFIG.identifier = suite_id
self.logger.info("Initializing EnvironmentProvider task.")
self.etos = ETOS(
"ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider"
)
with self.lock:
# Since celery workers can share memory between them we need to make the configuration
# of ETOS library unique as it uses the memory sharing feature with the internal
# configuration dictionary.
# The impact of not doing this is that the environment provider would re-use
# another workers configuration instead of using its own.
self.etos.config.config = deepcopy(
self.etos.config.config
) # pylint:disable=protected-access
self.jsontas = JsonTas()
self.dataset = self.jsontas.dataset
self.dataset.add("json_dumps", JsonDumps)
self.dataset.add("uuid_generate", UuidGenerate)
self.dataset.add("join", Join)
self.registry = ProviderRegistry(self.etos, self.jsontas)
self.splitter = Splitter(self.etos, {})
def configure(self, suite_id):
"""Configure environment provider and start RabbitMQ publisher.
:param suite_id: Suite ID for this task.
:type suite_id: str
"""
self.logger.info("Configure environment provider.")
if not self.registry.wait_for_configuration(suite_id):
# TODO: Add link ref to docs that describe how the config is done.
raise EnvironmentProviderNotConfigured(
"Please do a proper configuration of "
"EnvironmentProvider before requesting an "
"environment."
)
self.logger.info("Registry is configured.")
self.iut_provider = self.registry.iut_provider(suite_id)
self.log_area_provider = self.registry.log_area_provider(suite_id)
self.execution_space_provider = self.registry.execution_space_provider(suite_id)
self.etos.config.set(
"EVENT_DATA_TIMEOUT", int(os.getenv("ETOS_EVENT_DATA_TIMEOUT", "10"))
)
self.etos.config.set(
"WAIT_FOR_IUT_TIMEOUT", int(os.getenv("ETOS_WAIT_FOR_IUT_TIMEOUT", "10"))
)
self.etos.config.set(
"WAIT_FOR_EXECUTION_SPACE_TIMEOUT",
int(os.getenv("ETOS_WAIT_FOR_EXECUTION_SPACE_TIMEOUT", "10")),
)
self.etos.config.set(
"WAIT_FOR_LOG_AREA_TIMEOUT",
int(os.getenv("ETOS_WAIT_FOR_LOG_AREA_TIMEOUT", "10")),
)
self.etos.config.set("SUITE_ID", suite_id)
self.etos.config.rabbitmq_publisher_from_environment()
self.etos.start_publisher()
self.environment_provider_config = Config(self.etos, suite_id)
if not self.environment_provider_config.generated:
missing = [
name
for name, value in [
("tercc", self.environment_provider_config.tercc),
(
"artifact_created",
self.environment_provider_config.artifact_created,
),
(
"activity_triggered",
self.environment_provider_config.activity_triggered,
),
]
if value is None
]
raise NoEventDataFound(f"Missing: {", ".join(missing)}")
self.dataset.add("environment", os.environ)
self.dataset.add("config", self.etos.config)
self.dataset.add("identity", self.environment_provider_config.identity)
self.dataset.add("artifact_id", self.environment_provider_config.artifact_id)
self.dataset.add("context", self.environment_provider_config.context)
self.dataset.add("custom_data", self.environment_provider_config.custom_data)
self.dataset.add("uuid", str(uuid.uuid4()))
self.dataset.add(
"artifact_created", self.environment_provider_config.artifact_created
)
self.dataset.add(
"artifact_published", self.environment_provider_config.artifact_published
)
self.dataset.add("tercc", self.environment_provider_config.tercc)
self.dataset.merge(self.registry.dataset(suite_id))
def cleanup(self):
"""Clean up by checkin in all checked out providers."""
self.logger.info("Cleanup by checking in all checked out providers.")
for provider in self.etos.config.get("PROVIDERS"):
try:
provider.checkin_all()
except: # noqa pylint:disable=bare-except
pass
@staticmethod
def get_constraint(recipe, key):
"""Get a constraint key from an ETOS recipe.
:param recipe: Recipe to get key from.
:type recipe: dict
:param key: Key to get value from, from the constraints.
:type key: str
:return: Constraint value.
:rtype: any
"""
for constraint in recipe.get("constraints", []):
if constraint.get("key") == key:
return constraint.get("value")
return None
def create_test_suite_dict(self):
"""Create a test suite dictionary based on test runners.
I.e. If there is only one test_runner the dictionary would be::
{
"test_suite_name": {
"MyTestrunner": {
"docker": "MyTestrunner",
"priority": 1,
"unsplit_recipes": [...]
}
}
}
Or two::
{
"test_suite_name": {
"MyTestrunner": {
"docker": "MyTestrunner",
"priority": 1,
"unsplit_recipes": [...]
},
"MyOtherTestrunner": {
"docker": "MyOtherTestrunner",
"priority": 1,
"unsplit_recipes": [...]
}
}
}
etc.
:return: A test suite dictionary based on test runners.
:rtype: dict
"""
self.logger.info("Create new test suite dictionary.")
test_suites = {}
for test_suite in self.environment_provider_config.test_suite:
test_runners = test_suites.setdefault(test_suite.get("name"), {})
for recipe in test_suite.get("recipes", []):
test_runner = self.get_constraint(recipe, "TEST_RUNNER")
test_runners.setdefault(
test_runner,
{
"docker": test_runner,
"priority": test_suite.get("priority"),
"unsplit_recipes": [],
},
)
test_runners[test_runner]["unsplit_recipes"].append(recipe)
return test_suites
def set_total_test_count_and_test_runners(self, test_runners):
"""Set total test count and test runners to be used by the splitter algorithm.
:param test_runners: Dictionary with test_runners as keys.
:type test_runners: dict
"""
total_test_count = 0
for _, data in test_runners.items():
total_test_count += len(data["unsplit_recipes"])
self.etos.config.set("TOTAL_TEST_COUNT", total_test_count)
self.etos.config.set("NUMBER_OF_TESTRUNNERS", len(test_runners.keys()))
def checkout_and_assign_iuts_to_test_runners(self, test_runners):
"""Checkout IUTs from the IUT provider and assign them to the test_runners dictionary.
:param test_runners: Dictionary with test_runners as keys.
:type test_runners: dict
"""
iuts = self.iut_provider.wait_for_and_checkout_iuts(
minimum_amount=self.etos.config.get("NUMBER_OF_TESTRUNNERS"),
maximum_amount=self.etos.config.get("TOTAL_TEST_COUNT"),
)
self.etos.config.set("NUMBER_OF_IUTS", len(iuts))
unused_iuts = self.splitter.assign_iuts(test_runners, self.dataset.get("iuts"))
for iut in unused_iuts:
self.iut_provider.checkin(iut)
def checkout_log_area(self):
"""Checkout a log area.
Called for each executor so only a single log area needs to be checked out.
"""
return self.log_area_provider.wait_for_and_checkout_log_areas(
minimum_amount=1, maximum_amount=1
)
def checkout_and_assign_executors_to_iuts(self, test_runner, iuts):
"""Checkout and assign executors to each available IUT.
:param test_runner: Test runner which will be added to dataset in order for
JSONTas to get more information when running.
:type test_runner: dict
:param iuts: Dictionary of IUTs to assign executors to.
:type iuts: dict
"""
self.dataset.add("test_runner", test_runner)
executors = (
self.execution_space_provider.wait_for_and_checkout_execution_spaces(
minimum_amount=len(iuts),
maximum_amount=len(iuts),
)
)
for iut, suite in iuts.items():
try:
suite["executor"] = executors.pop(0)
except IndexError:
break
self.dataset.add("executor", suite["executor"])
self.dataset.add("iut", iut)
# This index will always exist or 'checkout' would raise an exception.
suite["log_area"] = self.checkout_log_area()[0]
# Checkin the unassigned executors.
for executor in executors:
self.execution_space_provider.checkin(executor)
def checkin_iuts_without_executors(self, iuts):
"""Find all IUTs without an assigned executor and check them in.
:param iuts: IUTs to check for executors.
:type iuts: dict
:return: IUTs that were removed.
:rtype: list
"""
remove = []
for iut, suite in iuts.items():
if suite.get("executor") is None:
self.iut_provider.checkin(iut)
remove.append(iut)
return remove
def verify_json(self, json_data):
"""Verify that JSON data can be serialized properly.
:param json_data: JSON data to test.
:type json_data: str or dict
"""
try:
if isinstance(json_data, dict):
json_data = json.dumps(json_data)
json.loads(json_data)
except (json.decoder.JSONDecodeError, TypeError):
self.logger.error(json_data)
raise
def run(self):
"""Run the environment provider task.
:return: Test suite JSON with assigned IUTs, execution spaces and log areas.
:rtype: dict
"""
try:
self.configure(self.suite_id)
test_suites = self.create_test_suite_dict()
for test_suite_name, test_runners in test_suites.items():
self.set_total_test_count_and_test_runners(test_runners)
self.logger.info(
"Total test count : %r", self.etos.config.get("TOTAL_TEST_COUNT")
)
self.logger.info(
"Total testrunners: %r",
self.etos.config.get("NUMBER_OF_TESTRUNNERS"),
)
self.checkout_and_assign_iuts_to_test_runners(test_runners)
for test_runner, values in test_runners.items():
self.checkout_and_assign_executors_to_iuts(
test_runner, values["iuts"]
)
for iut in self.checkin_iuts_without_executors(values["iuts"]):
values["iuts"].remove(iut)
for sub_suite in test_runners.values():
self.splitter.split(sub_suite)
test_suite = TestSuite(
test_suite_name, test_runners, self.environment_provider_config
)
# This is where the resulting test suite is generated.
# The resulting test suite will be a dictionary with test runners, IUTs
# execution spaces and log areas with tests split up over as many as
# possible. The resulting test suite definition is further explained in
# :obj:`environment_provider.lib.test_suite.TestSuite`
test_suite.generate()
test_suite_json = test_suite.to_json()
# Test that the test suite JSON is serializable so that the
# exception is caught here and not by the webserver.
# This makes sure that we can cleanup if anything breaks.
self.verify_json(test_suite_json)
# TODO: Handle multiple test suites.
return test_suite_json
except Exception as exception: # pylint:disable=broad-except
self.cleanup()
traceback.print_exc()
return {"error": str(exception), "details": traceback.format_exc()}
finally:
if self.etos.publisher is not None:
self.etos.publisher.stop()
@APP.task(name="EnvironmentProvider")
def get_environment(suite_id):
"""Get an environment for ETOS test executions.
:param suite_id: Suite ID to get an environment for
:type suite_id: str
:return: Test suite JSON with assigned IUTs, execution spaces and log areas.
:rtype: dict
"""
environment_provider = EnvironmentProvider(suite_id)
return environment_provider.run()
| # Copyright 2020-2021 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ETOS Environment Provider celery task module."""
import os
import uuid
import logging
import traceback
import json
from threading import Lock
from copy import deepcopy
from etos_lib.etos import ETOS
from etos_lib.logging.logger import FORMAT_CONFIG
from jsontas.jsontas import JsonTas
from environment_provider.splitter.split import Splitter
from .lib.celery import APP
from .lib.config import Config
from .lib.test_suite import TestSuite
from .lib.registry import ProviderRegistry
from .lib.json_dumps import JsonDumps
from .lib.uuid_generate import UuidGenerate
from .lib.join import Join
logging.getLogger("pika").setLevel(logging.WARNING)
class NoEventDataFound(Exception):
"""Could not fetch events from event storage."""
class EnvironmentProviderNotConfigured(Exception):
"""Environment provider was not configured prior to request."""
class EnvironmentProvider: # pylint:disable=too-many-instance-attributes
"""Environment provider celery Task."""
logger = logging.getLogger("EnvironmentProvider")
environment_provider_config = None
iut_provider = None
log_area_provider = None
execution_space_provider = None
task_track_started = True
lock = Lock()
def __init__(self, suite_id):
"""Initialize ETOS, dataset, provider registry and splitter.
:param suite_id: Suite ID to get an environment for
:type suite_id: str
"""
self.suite_id = suite_id
FORMAT_CONFIG.identifier = suite_id
self.logger.info("Initializing EnvironmentProvider task.")
self.etos = ETOS(
"ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider"
)
with self.lock:
# Since celery workers can share memory between them we need to make the configuration
# of ETOS library unique as it uses the memory sharing feature with the internal
# configuration dictionary.
# The impact of not doing this is that the environment provider would re-use
# another workers configuration instead of using its own.
self.etos.config.config = deepcopy(
self.etos.config.config
) # pylint:disable=protected-access
self.jsontas = JsonTas()
self.dataset = self.jsontas.dataset
self.dataset.add("json_dumps", JsonDumps)
self.dataset.add("uuid_generate", UuidGenerate)
self.dataset.add("join", Join)
self.registry = ProviderRegistry(self.etos, self.jsontas)
self.splitter = Splitter(self.etos, {})
def configure(self, suite_id):
"""Configure environment provider and start RabbitMQ publisher.
:param suite_id: Suite ID for this task.
:type suite_id: str
"""
self.logger.info("Configure environment provider.")
if not self.registry.wait_for_configuration(suite_id):
# TODO: Add link ref to docs that describe how the config is done.
raise EnvironmentProviderNotConfigured(
"Please do a proper configuration of "
"EnvironmentProvider before requesting an "
"environment."
)
self.logger.info("Registry is configured.")
self.iut_provider = self.registry.iut_provider(suite_id)
self.log_area_provider = self.registry.log_area_provider(suite_id)
self.execution_space_provider = self.registry.execution_space_provider(suite_id)
self.etos.config.set(
"EVENT_DATA_TIMEOUT", int(os.getenv("ETOS_EVENT_DATA_TIMEOUT", "10"))
)
self.etos.config.set(
"WAIT_FOR_IUT_TIMEOUT", int(os.getenv("ETOS_WAIT_FOR_IUT_TIMEOUT", "10"))
)
self.etos.config.set(
"WAIT_FOR_EXECUTION_SPACE_TIMEOUT",
int(os.getenv("ETOS_WAIT_FOR_EXECUTION_SPACE_TIMEOUT", "10")),
)
self.etos.config.set(
"WAIT_FOR_LOG_AREA_TIMEOUT",
int(os.getenv("ETOS_WAIT_FOR_LOG_AREA_TIMEOUT", "10")),
)
self.etos.config.set("SUITE_ID", suite_id)
self.etos.config.rabbitmq_publisher_from_environment()
self.etos.start_publisher()
self.environment_provider_config = Config(self.etos, suite_id)
if not self.environment_provider_config.generated:
missing = [
name
for name, value in [
("tercc", self.environment_provider_config.tercc),
(
"artifact_created",
self.environment_provider_config.artifact_created,
),
(
"activity_triggered",
self.environment_provider_config.activity_triggered,
),
]
if value is None
]
raise NoEventDataFound(f"Missing: {', '.join(missing)}")
self.dataset.add("environment", os.environ)
self.dataset.add("config", self.etos.config)
self.dataset.add("identity", self.environment_provider_config.identity)
self.dataset.add("artifact_id", self.environment_provider_config.artifact_id)
self.dataset.add("context", self.environment_provider_config.context)
self.dataset.add("custom_data", self.environment_provider_config.custom_data)
self.dataset.add("uuid", str(uuid.uuid4()))
self.dataset.add(
"artifact_created", self.environment_provider_config.artifact_created
)
self.dataset.add(
"artifact_published", self.environment_provider_config.artifact_published
)
self.dataset.add("tercc", self.environment_provider_config.tercc)
self.dataset.merge(self.registry.dataset(suite_id))
def cleanup(self):
"""Clean up by checkin in all checked out providers."""
self.logger.info("Cleanup by checking in all checked out providers.")
for provider in self.etos.config.get("PROVIDERS"):
try:
provider.checkin_all()
except: # noqa pylint:disable=bare-except
pass
@staticmethod
def get_constraint(recipe, key):
"""Get a constraint key from an ETOS recipe.
:param recipe: Recipe to get key from.
:type recipe: dict
:param key: Key to get value from, from the constraints.
:type key: str
:return: Constraint value.
:rtype: any
"""
for constraint in recipe.get("constraints", []):
if constraint.get("key") == key:
return constraint.get("value")
return None
def create_test_suite_dict(self):
"""Create a test suite dictionary based on test runners.
I.e. If there is only one test_runner the dictionary would be::
{
"test_suite_name": {
"MyTestrunner": {
"docker": "MyTestrunner",
"priority": 1,
"unsplit_recipes": [...]
}
}
}
Or two::
{
"test_suite_name": {
"MyTestrunner": {
"docker": "MyTestrunner",
"priority": 1,
"unsplit_recipes": [...]
},
"MyOtherTestrunner": {
"docker": "MyOtherTestrunner",
"priority": 1,
"unsplit_recipes": [...]
}
}
}
etc.
:return: A test suite dictionary based on test runners.
:rtype: dict
"""
self.logger.info("Create new test suite dictionary.")
test_suites = {}
for test_suite in self.environment_provider_config.test_suite:
test_runners = test_suites.setdefault(test_suite.get("name"), {})
for recipe in test_suite.get("recipes", []):
test_runner = self.get_constraint(recipe, "TEST_RUNNER")
test_runners.setdefault(
test_runner,
{
"docker": test_runner,
"priority": test_suite.get("priority"),
"unsplit_recipes": [],
},
)
test_runners[test_runner]["unsplit_recipes"].append(recipe)
return test_suites
def set_total_test_count_and_test_runners(self, test_runners):
"""Set total test count and test runners to be used by the splitter algorithm.
:param test_runners: Dictionary with test_runners as keys.
:type test_runners: dict
"""
total_test_count = 0
for _, data in test_runners.items():
total_test_count += len(data["unsplit_recipes"])
self.etos.config.set("TOTAL_TEST_COUNT", total_test_count)
self.etos.config.set("NUMBER_OF_TESTRUNNERS", len(test_runners.keys()))
def checkout_and_assign_iuts_to_test_runners(self, test_runners):
"""Checkout IUTs from the IUT provider and assign them to the test_runners dictionary.
:param test_runners: Dictionary with test_runners as keys.
:type test_runners: dict
"""
iuts = self.iut_provider.wait_for_and_checkout_iuts(
minimum_amount=self.etos.config.get("NUMBER_OF_TESTRUNNERS"),
maximum_amount=self.etos.config.get("TOTAL_TEST_COUNT"),
)
self.etos.config.set("NUMBER_OF_IUTS", len(iuts))
unused_iuts = self.splitter.assign_iuts(test_runners, self.dataset.get("iuts"))
for iut in unused_iuts:
self.iut_provider.checkin(iut)
def checkout_log_area(self):
"""Checkout a log area.
Called for each executor so only a single log area needs to be checked out.
"""
return self.log_area_provider.wait_for_and_checkout_log_areas(
minimum_amount=1, maximum_amount=1
)
def checkout_and_assign_executors_to_iuts(self, test_runner, iuts):
"""Checkout and assign executors to each available IUT.
:param test_runner: Test runner which will be added to dataset in order for
JSONTas to get more information when running.
:type test_runner: dict
:param iuts: Dictionary of IUTs to assign executors to.
:type iuts: dict
"""
self.dataset.add("test_runner", test_runner)
executors = (
self.execution_space_provider.wait_for_and_checkout_execution_spaces(
minimum_amount=len(iuts),
maximum_amount=len(iuts),
)
)
for iut, suite in iuts.items():
try:
suite["executor"] = executors.pop(0)
except IndexError:
break
self.dataset.add("executor", suite["executor"])
self.dataset.add("iut", iut)
# This index will always exist or 'checkout' would raise an exception.
suite["log_area"] = self.checkout_log_area()[0]
# Checkin the unassigned executors.
for executor in executors:
self.execution_space_provider.checkin(executor)
def checkin_iuts_without_executors(self, iuts):
"""Find all IUTs without an assigned executor and check them in.
:param iuts: IUTs to check for executors.
:type iuts: dict
:return: IUTs that were removed.
:rtype: list
"""
remove = []
for iut, suite in iuts.items():
if suite.get("executor") is None:
self.iut_provider.checkin(iut)
remove.append(iut)
return remove
def verify_json(self, json_data):
"""Verify that JSON data can be serialized properly.
:param json_data: JSON data to test.
:type json_data: str or dict
"""
try:
if isinstance(json_data, dict):
json_data = json.dumps(json_data)
json.loads(json_data)
except (json.decoder.JSONDecodeError, TypeError):
self.logger.error(json_data)
raise
def run(self):
"""Run the environment provider task.
:return: Test suite JSON with assigned IUTs, execution spaces and log areas.
:rtype: dict
"""
try:
self.configure(self.suite_id)
test_suites = self.create_test_suite_dict()
for test_suite_name, test_runners in test_suites.items():
self.set_total_test_count_and_test_runners(test_runners)
self.logger.info(
"Total test count : %r", self.etos.config.get("TOTAL_TEST_COUNT")
)
self.logger.info(
"Total testrunners: %r",
self.etos.config.get("NUMBER_OF_TESTRUNNERS"),
)
self.checkout_and_assign_iuts_to_test_runners(test_runners)
for test_runner, values in test_runners.items():
self.checkout_and_assign_executors_to_iuts(
test_runner, values["iuts"]
)
for iut in self.checkin_iuts_without_executors(values["iuts"]):
values["iuts"].remove(iut)
for sub_suite in test_runners.values():
self.splitter.split(sub_suite)
test_suite = TestSuite(
test_suite_name, test_runners, self.environment_provider_config
)
# This is where the resulting test suite is generated.
# The resulting test suite will be a dictionary with test runners, IUTs
# execution spaces and log areas with tests split up over as many as
# possible. The resulting test suite definition is further explained in
# :obj:`environment_provider.lib.test_suite.TestSuite`
test_suite.generate()
test_suite_json = test_suite.to_json()
# Test that the test suite JSON is serializable so that the
# exception is caught here and not by the webserver.
# This makes sure that we can cleanup if anything breaks.
self.verify_json(test_suite_json)
# TODO: Handle multiple test suites.
return test_suite_json
except Exception as exception: # pylint:disable=broad-except
self.cleanup()
traceback.print_exc()
return {"error": str(exception), "details": traceback.format_exc()}
finally:
if self.etos.publisher is not None:
self.etos.publisher.stop()
@APP.task(name="EnvironmentProvider")
def get_environment(suite_id):
"""Get an environment for ETOS test executions.
:param suite_id: Suite ID to get an environment for
:type suite_id: str
:return: Test suite JSON with assigned IUTs, execution spaces and log areas.
:rtype: dict
"""
environment_provider = EnvironmentProvider(suite_id)
return environment_provider.run()
|
import atexit
import math
import queue
import threading
import requests
import json
import importlib
from readme_metrics import MetricsApiConfig
from readme_metrics.publisher import publish_batch
from readme_metrics.PayloadBuilder import PayloadBuilder
from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper
class Metrics:
"""
This is the internal central controller class invoked by the ReadMe middleware. It
queues requests for submission. The submission is processed by readme_metrics.publisher.publish_batch().
"""
PACKAGE_NAME: str = "readme/metrics"
def __init__(self, config: MetricsApiConfig):
"""
Constructs and initializes the ReadMe Metrics controller class with the
specified configuration.
Args:
config (MetricsApiConfig): Running configuration
"""
self.config = config
self.payload_builder = PayloadBuilder(
config.DENYLIST,
config.ALLOWLIST,
config.IS_DEVELOPMENT_MODE,
config.GROUPING_FUNCTION,
config.LOGGER,
)
self.queue = queue.Queue()
atexit.register(self.exit_handler)
def process(self, request, response: ResponseInfoWrapper) -> None:
"""Enqueues a request/response combination to be submitted the API.
Args:
request (Request): Request object from your WSGI server
response (ResponseInfoWrapper): Response object
"""
if not self.host_allowed(request.environ["HTTP_HOST"]):
self.config.LOGGER.debug(
f"Not enqueueing request, host {request.environ["HTTP_HOST"]} not in ALLOWED_HTTP_HOSTS"
)
return
payload = self.payload_builder(request, response)
if payload is None:
# PayloadBuilder returns None when the grouping function returns
# None (an indication that the request should not be logged.)
self.config.LOGGER.debug(
f"Not enqueueing request, grouping function returned None"
)
return
self.queue.put(payload)
if self.queue.qsize() >= self.config.BUFFER_LENGTH:
args = (self.config, self.queue)
if self.config.IS_BACKGROUND_MODE:
thread = threading.Thread(target=publish_batch, daemon=True, args=args)
thread.start()
else:
publish_batch(*args)
def exit_handler(self) -> None:
if not self.queue.empty():
args = (self.config, self.queue)
for _ in range(math.ceil(self.queue.qsize() / self.config.BUFFER_LENGTH)):
if self.config.IS_BACKGROUND_MODE:
thread = threading.Thread(
target=publish_batch, daemon=True, args=args
)
thread.start()
else:
publish_batch(*args)
self.queue.join()
def host_allowed(self, host):
if self.config.ALLOWED_HTTP_HOSTS:
return host in self.config.ALLOWED_HTTP_HOSTS
else:
# If the allowed_http_hosts has not been set (None by default), send off the data to be queued
return True
| import atexit
import math
import queue
import threading
import requests
import json
import importlib
from readme_metrics import MetricsApiConfig
from readme_metrics.publisher import publish_batch
from readme_metrics.PayloadBuilder import PayloadBuilder
from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper
class Metrics:
"""
This is the internal central controller class invoked by the ReadMe middleware. It
queues requests for submission. The submission is processed by readme_metrics.publisher.publish_batch().
"""
PACKAGE_NAME: str = "readme/metrics"
def __init__(self, config: MetricsApiConfig):
"""
Constructs and initializes the ReadMe Metrics controller class with the
specified configuration.
Args:
config (MetricsApiConfig): Running configuration
"""
self.config = config
self.payload_builder = PayloadBuilder(
config.DENYLIST,
config.ALLOWLIST,
config.IS_DEVELOPMENT_MODE,
config.GROUPING_FUNCTION,
config.LOGGER,
)
self.queue = queue.Queue()
atexit.register(self.exit_handler)
def process(self, request, response: ResponseInfoWrapper) -> None:
"""Enqueues a request/response combination to be submitted the API.
Args:
request (Request): Request object from your WSGI server
response (ResponseInfoWrapper): Response object
"""
if not self.host_allowed(request.environ["HTTP_HOST"]):
self.config.LOGGER.debug(
f"Not enqueueing request, host {request.environ['HTTP_HOST']} not in ALLOWED_HTTP_HOSTS"
)
return
payload = self.payload_builder(request, response)
if payload is None:
# PayloadBuilder returns None when the grouping function returns
# None (an indication that the request should not be logged.)
self.config.LOGGER.debug(
f"Not enqueueing request, grouping function returned None"
)
return
self.queue.put(payload)
if self.queue.qsize() >= self.config.BUFFER_LENGTH:
args = (self.config, self.queue)
if self.config.IS_BACKGROUND_MODE:
thread = threading.Thread(target=publish_batch, daemon=True, args=args)
thread.start()
else:
publish_batch(*args)
def exit_handler(self) -> None:
if not self.queue.empty():
args = (self.config, self.queue)
for _ in range(math.ceil(self.queue.qsize() / self.config.BUFFER_LENGTH)):
if self.config.IS_BACKGROUND_MODE:
thread = threading.Thread(
target=publish_batch, daemon=True, args=args
)
thread.start()
else:
publish_batch(*args)
self.queue.join()
def host_allowed(self, host):
if self.config.ALLOWED_HTTP_HOSTS:
return host in self.config.ALLOWED_HTTP_HOSTS
else:
# If the allowed_http_hosts has not been set (None by default), send off the data to be queued
return True
|
'''
Add noise to audio files in TIMIT.
'''
import os
import glob
import random
from tqdm import tqdm
from create_mixed_audio_file_with_soundfile import mix_clean_with_noise
if __name__ == '__main__':
snr = 0 # in dB
timit_root = '/Users/goree/Desktop/cmu/datasets/timit/data' # change for user path structure
output_root = f'/Users/goree/Desktop/cmu/datasets/timit_snr{snr}' # change for user path structure
# change for user path structure
noise_files = [
'/Users/goree/Desktop/cmu/datasets/noise/white_noise.wav'
]
wav_files = glob.glob(os.path.join(timit_root, '*', '*', '*', '*.WAV.wav'))
for cur_wav in tqdm(wav_files):
# cur_dir = '/'.join([output_root] + os.path.dirname(cur_wav).split('/')[-3:])
cur_dir = os.path.join(output_root, *os.path.dirname(cur_wav).split('\\')[-3:]) # windows usage
cur_id = os.path.basename(cur_wav)[:-8]
os.makedirs(cur_dir, exist_ok=True)
mix_clean_with_noise(
cur_wav,
random.choice(noise_files),
snr,
os.path.join(cur_dir, f"{cur_id}.wav")
)
# this cp_cmd is for windows usage. Change 'copy' to 'cp' for unix systems
# also remove string quotes around source dest files in unix
cp_cmd = f'copy "{os.path.join(os.path.dirname(cur_wav), f'{cur_id}.TXT')}" "{os.path.join(cur_dir, f'{cur_id}.txt')}" >NUL'
os.system(cp_cmd)
| '''
Add noise to audio files in TIMIT.
'''
import os
import glob
import random
from tqdm import tqdm
from create_mixed_audio_file_with_soundfile import mix_clean_with_noise
if __name__ == '__main__':
snr = 0 # in dB
timit_root = '/Users/goree/Desktop/cmu/datasets/timit/data' # change for user path structure
output_root = f'/Users/goree/Desktop/cmu/datasets/timit_snr{snr}' # change for user path structure
# change for user path structure
noise_files = [
'/Users/goree/Desktop/cmu/datasets/noise/white_noise.wav'
]
wav_files = glob.glob(os.path.join(timit_root, '*', '*', '*', '*.WAV.wav'))
for cur_wav in tqdm(wav_files):
# cur_dir = '/'.join([output_root] + os.path.dirname(cur_wav).split('/')[-3:])
cur_dir = os.path.join(output_root, *os.path.dirname(cur_wav).split('\\')[-3:]) # windows usage
cur_id = os.path.basename(cur_wav)[:-8]
os.makedirs(cur_dir, exist_ok=True)
mix_clean_with_noise(
cur_wav,
random.choice(noise_files),
snr,
os.path.join(cur_dir, f"{cur_id}.wav")
)
# this cp_cmd is for windows usage. Change 'copy' to 'cp' for unix systems
# also remove string quotes around source dest files in unix
cp_cmd = f'copy "{os.path.join(os.path.dirname(cur_wav), f"{cur_id}.TXT")}" "{os.path.join(cur_dir, f"{cur_id}.txt")}" >NUL'
os.system(cp_cmd)
|
import pytz
import logging
import datetime
from typing import List
from pydantic import BaseModel
from sqlalchemy import func
from sqlalchemy.sql.functions import user
from dispatch.nlp import build_phrase_matcher, build_term_vocab, extract_terms_from_text
from dispatch.conversation import service as conversation_service
from dispatch.event import service as event_service
from dispatch.incident import flows as incident_flows
from dispatch.incident import service as incident_service
from dispatch.plugins.dispatch_slack.config import (
SlackConversationConfiguration,
)
from dispatch.tag import service as tag_service
from dispatch.individual import service as individual_service
from dispatch.participant import service as participant_service
from dispatch.participant_role.models import ParticipantRoleType
from dispatch.plugins.dispatch_slack import service as dispatch_slack_service
from dispatch.plugin import service as plugin_service
from dispatch.monitor import service as monitor_service
from dispatch.conversation.enums import ConversationButtonActions
from dispatch.tag.models import Tag
from .decorators import slack_background_task, get_organization_scope_from_channel_id
from .service import get_user_email
from .models import MonitorButton
log = logging.getLogger(__name__)
class EventBodyItem(BaseModel):
"""Body item of the Slack event."""
type: str = None
channel: str = None
ts: str = None
class EventBody(BaseModel):
"""Body of the Slack event."""
channel: str = None
channel_id: str = None
channel_type: str = None
deleted_ts: str = None
event_ts: str = None
thread_ts: str = None
file_id: str = None
hidden: bool = None
inviter: str = None
item: EventBodyItem = None
item_user: str = None
reaction: str = None
subtype: str = None
team: str = None
text: str = None
type: str
user: str = None
user_id: str = None
class EventEnvelope(BaseModel):
"""Envelope of the Slack event."""
api_app_id: str = None
authed_users: List[str] = []
challenge: str = None
enterprise_id: str = None
event: EventBody = None
event_id: str = None
event_time: int = None
team_id: str = None
token: str = None
type: str
def get_channel_id_from_event(event: EventEnvelope):
"""Returns the channel id from the Slack event."""
channel_id = ""
if event.event.channel_id:
return event.event.channel_id
if event.event.channel:
return event.event.channel
if event.event.item.channel:
return event.event.item.channel
return channel_id
def event_functions(event: EventEnvelope):
"""Interprets the events and routes it the appropriate function."""
event_mappings = {
"member_joined_channel": [member_joined_channel],
"member_left_channel": [member_left_channel],
"message": [after_hours, ban_threads_warning, message_tagging, message_monitor],
"message.groups": [],
"message.im": [],
"reaction_added": [handle_reaction_added_event],
}
return event_mappings.get(event.event.type, [])
async def handle_slack_event(*, config, client, event, background_tasks):
"""Handles slack event message."""
user_id = event.event.user
channel_id = get_channel_id_from_event(event)
if user_id and channel_id:
db_session = get_organization_scope_from_channel_id(channel_id=channel_id)
if not db_session:
log.info(
f"Unable to determine organization associated with channel id. ChannelId: {channel_id}"
)
return {"ok": ""}
conversation = conversation_service.get_by_channel_id_ignoring_channel_type(
db_session=db_session, channel_id=channel_id
)
if conversation and dispatch_slack_service.is_user(config, user_id):
# We resolve the user's email
user_email = await dispatch_slack_service.get_user_email_async(client, user_id)
# increment activity for user
participant = participant_service.get_by_incident_id_and_email(
db_session=db_session, incident_id=conversation.incident_id, email=user_email
)
if participant.activity:
participant.activity += 1
else:
participant.activity = 1
db_session.commit()
# Dispatch event functions to be executed in the background
for f in event_functions(event):
background_tasks.add_task(
f,
config=config,
user_id=user_id,
user_email=user_email,
channel_id=channel_id,
incident_id=conversation.incident_id,
event=event,
)
return {"ok": ""}
@slack_background_task
def handle_reaction_added_event(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Handles an event where a reaction is added to a message."""
reaction = event.event.reaction
if reaction == config.timeline_event_reaction:
conversation_id = event.event.item.channel
message_ts = event.event.item.ts
message_ts_utc = datetime.datetime.utcfromtimestamp(float(message_ts))
# we fetch the message information
response = dispatch_slack_service.list_conversation_messages(
slack_client, conversation_id, latest=message_ts, limit=1, inclusive=1
)
message_text = response["messages"][0]["text"]
message_sender_id = response["messages"][0]["user"]
# we fetch the incident
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
# we fetch the individual who sent the message
message_sender_email = get_user_email(client=slack_client, user_id=message_sender_id)
individual = individual_service.get_by_email_and_project(
db_session=db_session, email=message_sender_email, project_id=incident.project.id
)
# we log the event
event_service.log(
db_session=db_session,
source="Slack Plugin - Conversation Management",
description=f'"{message_text}," said {individual.name}',
incident_id=incident_id,
individual_id=individual.id,
started_at=message_ts_utc,
)
def is_business_hours(commander_tz: str):
"""Determines if it's currently office hours where the incident commander is located."""
now = datetime.datetime.now(pytz.timezone(commander_tz))
return now.weekday() not in [5, 6] and 9 <= now.hour < 17
@slack_background_task
def after_hours(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Notifies the user that this incident is current in after hours mode."""
# we ignore user channel and group join messages
if event.event.subtype in ["channel_join", "group_join"]:
return
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
# get their timezone from slack
commander_info = dispatch_slack_service.get_user_info_by_email(
slack_client, email=incident.commander.individual.email
)
commander_tz = commander_info["tz"]
if not is_business_hours(commander_tz):
# send ephermal message
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": (
(
f"Responses may be delayed. The current incident priority is *{incident.incident_priority.name}*"
f" and your message was sent outside of the Incident Commander's working hours (Weekdays, 9am-5pm, {commander_tz} timezone)."
)
),
},
}
]
participant = participant_service.get_by_incident_id_and_email(
db_session=db_session, incident_id=incident_id, email=user_email
)
if not participant.after_hours_notification:
dispatch_slack_service.send_ephemeral_message(
slack_client, channel_id, user_id, "", blocks=blocks
)
participant.after_hours_notification = True
db_session.add(participant)
db_session.commit()
@slack_background_task
def member_joined_channel(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope,
db_session=None,
slack_client=None,
):
"""Handles the member_joined_channel slack event."""
participant = incident_flows.incident_add_or_reactivate_participant_flow(
user_email=user_email, incident_id=incident_id, db_session=db_session
)
if event.event.inviter:
# we update the participant's metadata
if not dispatch_slack_service.is_user(config, event.event.inviter):
# we default to the incident commander when we don't know how the user was added
added_by_participant = participant_service.get_by_incident_id_and_role(
db_session=db_session,
incident_id=incident_id,
role=ParticipantRoleType.incident_commander,
)
participant.added_by = added_by_participant
participant.added_reason = (
f"Participant added by {added_by_participant.individual.name}"
)
else:
inviter_email = get_user_email(client=slack_client, user_id=event.event.inviter)
added_by_participant = participant_service.get_by_incident_id_and_email(
db_session=db_session, incident_id=incident_id, email=inviter_email
)
participant.added_by = added_by_participant
participant.added_reason = event.event.text
db_session.add(participant)
db_session.commit()
@slack_background_task
def member_left_channel(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope,
db_session=None,
slack_client=None,
):
"""Handles the member_left_channel Slack event."""
incident_flows.incident_remove_participant_flow(user_email, incident_id, db_session=db_session)
@slack_background_task
def ban_threads_warning(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Sends the user an ephemeral message if they use threads."""
if not config.ban_threads:
return
if event.event.thread_ts:
# we should be able to look for `subtype == message_replied` once this bug is fixed
# https://api.slack.com/events/message/message_replied
# From Slack: Bug alert! This event is missing the subtype field when dispatched
# over the Events API. Until it is fixed, examine message events' thread_ts value.
# When present, it's a reply. To be doubly sure, compare a thread_ts to the top-level ts
# value, when they differ the latter is a reply to the former.
message = "Please refrain from using threads in incident related channels. Threads make it harder for incident participants to maintain context."
dispatch_slack_service.send_ephemeral_message(
slack_client,
channel_id,
user_id,
message,
thread_ts=event.event.thread_ts,
)
@slack_background_task
def message_tagging(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Looks for incident tags in incident messages."""
text = event.event.text
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
tags = tag_service.get_all(db_session=db_session, project_id=incident.project.id).all()
tag_strings = [t.name.lower() for t in tags if t.discoverable]
phrases = build_term_vocab(tag_strings)
matcher = build_phrase_matcher("dispatch-tag", phrases)
extracted_tags = list(set(extract_terms_from_text(text, matcher)))
matched_tags = (
db_session.query(Tag)
.filter(func.upper(Tag.name).in_([func.upper(t) for t in extracted_tags]))
.all()
)
incident.tags.extend(matched_tags)
db_session.commit()
@slack_background_task
def message_monitor(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Looks strings that are available for monitoring (usually links)."""
text = event.event.text
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
plugins = plugin_service.get_active_instances(
db_session=db_session, project_id=incident.project.id, plugin_type="monitor"
)
for p in plugins:
for matcher in p.instance.get_matchers():
for match in matcher.finditer(text):
match_data = match.groupdict()
monitor = monitor_service.get_by_weblink(
db_session=db_session, weblink=match_data["weblink"]
)
# silence ignored matches
if monitor:
continue
current_status = p.instance.get_match_status(match_data)
if current_status:
status_text = ""
for k, v in current_status.items():
status_text += f"*{k.title()}*:\n{v.title()}\n"
monitor_button = MonitorButton(
incident_id=incident.id,
plugin_instance_id=p.id,
organization=incident.project.organization.slug,
weblink=match_data["weblink"],
action_type="monitor",
)
ignore_button = MonitorButton(
incident_id=incident.id,
plugin_instance_id=p.id,
organization=incident.project.organization.slug,
weblink=match_data["weblink"],
action_type="ignore",
)
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Hi! Dispatch is able to help track the status of: \n {match_data["weblink"]} \n\n Would you like for changes in it's status to be propagated to this incident channel?",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": status_text,
},
},
{
"type": "actions",
"block_id": f"{ConversationButtonActions.monitor_link}",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"emoji": True,
"text": "Monitor",
},
"style": "primary",
"value": monitor_button.json(),
},
{
"type": "button",
"text": {"type": "plain_text", "emoji": True, "text": "Ignore"},
"style": "danger",
"value": ignore_button.json(),
},
],
},
]
dispatch_slack_service.send_ephemeral_message(
slack_client, channel_id, user_id, "", blocks=blocks
)
| import pytz
import logging
import datetime
from typing import List
from pydantic import BaseModel
from sqlalchemy import func
from sqlalchemy.sql.functions import user
from dispatch.nlp import build_phrase_matcher, build_term_vocab, extract_terms_from_text
from dispatch.conversation import service as conversation_service
from dispatch.event import service as event_service
from dispatch.incident import flows as incident_flows
from dispatch.incident import service as incident_service
from dispatch.plugins.dispatch_slack.config import (
SlackConversationConfiguration,
)
from dispatch.tag import service as tag_service
from dispatch.individual import service as individual_service
from dispatch.participant import service as participant_service
from dispatch.participant_role.models import ParticipantRoleType
from dispatch.plugins.dispatch_slack import service as dispatch_slack_service
from dispatch.plugin import service as plugin_service
from dispatch.monitor import service as monitor_service
from dispatch.conversation.enums import ConversationButtonActions
from dispatch.tag.models import Tag
from .decorators import slack_background_task, get_organization_scope_from_channel_id
from .service import get_user_email
from .models import MonitorButton
log = logging.getLogger(__name__)
class EventBodyItem(BaseModel):
"""Body item of the Slack event."""
type: str = None
channel: str = None
ts: str = None
class EventBody(BaseModel):
"""Body of the Slack event."""
channel: str = None
channel_id: str = None
channel_type: str = None
deleted_ts: str = None
event_ts: str = None
thread_ts: str = None
file_id: str = None
hidden: bool = None
inviter: str = None
item: EventBodyItem = None
item_user: str = None
reaction: str = None
subtype: str = None
team: str = None
text: str = None
type: str
user: str = None
user_id: str = None
class EventEnvelope(BaseModel):
"""Envelope of the Slack event."""
api_app_id: str = None
authed_users: List[str] = []
challenge: str = None
enterprise_id: str = None
event: EventBody = None
event_id: str = None
event_time: int = None
team_id: str = None
token: str = None
type: str
def get_channel_id_from_event(event: EventEnvelope):
"""Returns the channel id from the Slack event."""
channel_id = ""
if event.event.channel_id:
return event.event.channel_id
if event.event.channel:
return event.event.channel
if event.event.item.channel:
return event.event.item.channel
return channel_id
def event_functions(event: EventEnvelope):
"""Interprets the events and routes it the appropriate function."""
event_mappings = {
"member_joined_channel": [member_joined_channel],
"member_left_channel": [member_left_channel],
"message": [after_hours, ban_threads_warning, message_tagging, message_monitor],
"message.groups": [],
"message.im": [],
"reaction_added": [handle_reaction_added_event],
}
return event_mappings.get(event.event.type, [])
async def handle_slack_event(*, config, client, event, background_tasks):
"""Handles slack event message."""
user_id = event.event.user
channel_id = get_channel_id_from_event(event)
if user_id and channel_id:
db_session = get_organization_scope_from_channel_id(channel_id=channel_id)
if not db_session:
log.info(
f"Unable to determine organization associated with channel id. ChannelId: {channel_id}"
)
return {"ok": ""}
conversation = conversation_service.get_by_channel_id_ignoring_channel_type(
db_session=db_session, channel_id=channel_id
)
if conversation and dispatch_slack_service.is_user(config, user_id):
# We resolve the user's email
user_email = await dispatch_slack_service.get_user_email_async(client, user_id)
# increment activity for user
participant = participant_service.get_by_incident_id_and_email(
db_session=db_session, incident_id=conversation.incident_id, email=user_email
)
if participant.activity:
participant.activity += 1
else:
participant.activity = 1
db_session.commit()
# Dispatch event functions to be executed in the background
for f in event_functions(event):
background_tasks.add_task(
f,
config=config,
user_id=user_id,
user_email=user_email,
channel_id=channel_id,
incident_id=conversation.incident_id,
event=event,
)
return {"ok": ""}
@slack_background_task
def handle_reaction_added_event(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Handles an event where a reaction is added to a message."""
reaction = event.event.reaction
if reaction == config.timeline_event_reaction:
conversation_id = event.event.item.channel
message_ts = event.event.item.ts
message_ts_utc = datetime.datetime.utcfromtimestamp(float(message_ts))
# we fetch the message information
response = dispatch_slack_service.list_conversation_messages(
slack_client, conversation_id, latest=message_ts, limit=1, inclusive=1
)
message_text = response["messages"][0]["text"]
message_sender_id = response["messages"][0]["user"]
# we fetch the incident
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
# we fetch the individual who sent the message
message_sender_email = get_user_email(client=slack_client, user_id=message_sender_id)
individual = individual_service.get_by_email_and_project(
db_session=db_session, email=message_sender_email, project_id=incident.project.id
)
# we log the event
event_service.log(
db_session=db_session,
source="Slack Plugin - Conversation Management",
description=f'"{message_text}," said {individual.name}',
incident_id=incident_id,
individual_id=individual.id,
started_at=message_ts_utc,
)
def is_business_hours(commander_tz: str):
"""Determines if it's currently office hours where the incident commander is located."""
now = datetime.datetime.now(pytz.timezone(commander_tz))
return now.weekday() not in [5, 6] and 9 <= now.hour < 17
@slack_background_task
def after_hours(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Notifies the user that this incident is current in after hours mode."""
# we ignore user channel and group join messages
if event.event.subtype in ["channel_join", "group_join"]:
return
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
# get their timezone from slack
commander_info = dispatch_slack_service.get_user_info_by_email(
slack_client, email=incident.commander.individual.email
)
commander_tz = commander_info["tz"]
if not is_business_hours(commander_tz):
# send ephermal message
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": (
(
f"Responses may be delayed. The current incident priority is *{incident.incident_priority.name}*"
f" and your message was sent outside of the Incident Commander's working hours (Weekdays, 9am-5pm, {commander_tz} timezone)."
)
),
},
}
]
participant = participant_service.get_by_incident_id_and_email(
db_session=db_session, incident_id=incident_id, email=user_email
)
if not participant.after_hours_notification:
dispatch_slack_service.send_ephemeral_message(
slack_client, channel_id, user_id, "", blocks=blocks
)
participant.after_hours_notification = True
db_session.add(participant)
db_session.commit()
@slack_background_task
def member_joined_channel(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope,
db_session=None,
slack_client=None,
):
"""Handles the member_joined_channel slack event."""
participant = incident_flows.incident_add_or_reactivate_participant_flow(
user_email=user_email, incident_id=incident_id, db_session=db_session
)
if event.event.inviter:
# we update the participant's metadata
if not dispatch_slack_service.is_user(config, event.event.inviter):
# we default to the incident commander when we don't know how the user was added
added_by_participant = participant_service.get_by_incident_id_and_role(
db_session=db_session,
incident_id=incident_id,
role=ParticipantRoleType.incident_commander,
)
participant.added_by = added_by_participant
participant.added_reason = (
f"Participant added by {added_by_participant.individual.name}"
)
else:
inviter_email = get_user_email(client=slack_client, user_id=event.event.inviter)
added_by_participant = participant_service.get_by_incident_id_and_email(
db_session=db_session, incident_id=incident_id, email=inviter_email
)
participant.added_by = added_by_participant
participant.added_reason = event.event.text
db_session.add(participant)
db_session.commit()
@slack_background_task
def member_left_channel(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope,
db_session=None,
slack_client=None,
):
"""Handles the member_left_channel Slack event."""
incident_flows.incident_remove_participant_flow(user_email, incident_id, db_session=db_session)
@slack_background_task
def ban_threads_warning(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Sends the user an ephemeral message if they use threads."""
if not config.ban_threads:
return
if event.event.thread_ts:
# we should be able to look for `subtype == message_replied` once this bug is fixed
# https://api.slack.com/events/message/message_replied
# From Slack: Bug alert! This event is missing the subtype field when dispatched
# over the Events API. Until it is fixed, examine message events' thread_ts value.
# When present, it's a reply. To be doubly sure, compare a thread_ts to the top-level ts
# value, when they differ the latter is a reply to the former.
message = "Please refrain from using threads in incident related channels. Threads make it harder for incident participants to maintain context."
dispatch_slack_service.send_ephemeral_message(
slack_client,
channel_id,
user_id,
message,
thread_ts=event.event.thread_ts,
)
@slack_background_task
def message_tagging(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Looks for incident tags in incident messages."""
text = event.event.text
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
tags = tag_service.get_all(db_session=db_session, project_id=incident.project.id).all()
tag_strings = [t.name.lower() for t in tags if t.discoverable]
phrases = build_term_vocab(tag_strings)
matcher = build_phrase_matcher("dispatch-tag", phrases)
extracted_tags = list(set(extract_terms_from_text(text, matcher)))
matched_tags = (
db_session.query(Tag)
.filter(func.upper(Tag.name).in_([func.upper(t) for t in extracted_tags]))
.all()
)
incident.tags.extend(matched_tags)
db_session.commit()
@slack_background_task
def message_monitor(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Looks strings that are available for monitoring (usually links)."""
text = event.event.text
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
plugins = plugin_service.get_active_instances(
db_session=db_session, project_id=incident.project.id, plugin_type="monitor"
)
for p in plugins:
for matcher in p.instance.get_matchers():
for match in matcher.finditer(text):
match_data = match.groupdict()
monitor = monitor_service.get_by_weblink(
db_session=db_session, weblink=match_data["weblink"]
)
# silence ignored matches
if monitor:
continue
current_status = p.instance.get_match_status(match_data)
if current_status:
status_text = ""
for k, v in current_status.items():
status_text += f"*{k.title()}*:\n{v.title()}\n"
monitor_button = MonitorButton(
incident_id=incident.id,
plugin_instance_id=p.id,
organization=incident.project.organization.slug,
weblink=match_data["weblink"],
action_type="monitor",
)
ignore_button = MonitorButton(
incident_id=incident.id,
plugin_instance_id=p.id,
organization=incident.project.organization.slug,
weblink=match_data["weblink"],
action_type="ignore",
)
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Hi! Dispatch is able to help track the status of: \n {match_data['weblink']} \n\n Would you like for changes in it's status to be propagated to this incident channel?",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": status_text,
},
},
{
"type": "actions",
"block_id": f"{ConversationButtonActions.monitor_link}",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"emoji": True,
"text": "Monitor",
},
"style": "primary",
"value": monitor_button.json(),
},
{
"type": "button",
"text": {"type": "plain_text", "emoji": True, "text": "Ignore"},
"style": "danger",
"value": ignore_button.json(),
},
],
},
]
dispatch_slack_service.send_ephemeral_message(
slack_client, channel_id, user_id, "", blocks=blocks
)
|
import asyncio
import json
import logging
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional
import aiohttp
from dogechia.server.outbound_message import NodeType
from dogechia.server.server import ssl_context_for_server
from dogechia.types.peer_info import PeerInfo
from dogechia.util.byte_types import hexstr_to_bytes
from dogechia.util.ints import uint16
from dogechia.util.json_util import dict_to_json_str, obj_to_response
from dogechia.util.ws_message import create_payload, create_payload_dict, format_response, pong
log = logging.getLogger(__name__)
class RpcServer:
"""
Implementation of RPC server.
"""
def __init__(self, rpc_api: Any, service_name: str, stop_cb: Callable, root_path, net_config):
self.rpc_api = rpc_api
self.stop_cb: Callable = stop_cb
self.log = log
self.shut_down = False
self.websocket: Optional[aiohttp.ClientWebSocketResponse] = None
self.service_name = service_name
self.root_path = root_path
self.net_config = net_config
self.crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
self.key_path = root_path / net_config["daemon_ssl"]["private_key"]
self.ca_cert_path = root_path / net_config["private_ssl_ca"]["crt"]
self.ca_key_path = root_path / net_config["private_ssl_ca"]["key"]
self.ssl_context = ssl_context_for_server(self.ca_cert_path, self.ca_key_path, self.crt_path, self.key_path)
async def stop(self):
self.shut_down = True
if self.websocket is not None:
await self.websocket.close()
async def _state_changed(self, *args):
if self.websocket is None:
return None
payloads: List[Dict] = await self.rpc_api._state_changed(*args)
change = args[0]
if change == "add_connection" or change == "close_connection" or change == "peer_changed_peak":
data = await self.get_connections({})
if data is not None:
payload = create_payload_dict(
"get_connections",
data,
self.service_name,
"wallet_ui",
)
payloads.append(payload)
for payload in payloads:
if "success" not in payload["data"]:
payload["data"]["success"] = True
try:
await self.websocket.send_str(dict_to_json_str(payload))
except Exception:
tb = traceback.format_exc()
self.log.warning(f"Sending data failed. Exception {tb}.")
def state_changed(self, *args):
if self.websocket is None:
return None
asyncio.create_task(self._state_changed(*args))
def _wrap_http_handler(self, f) -> Callable:
async def inner(request) -> aiohttp.web.Response:
request_data = await request.json()
try:
res_object = await f(request_data)
if res_object is None:
res_object = {}
if "success" not in res_object:
res_object["success"] = True
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Error while handling message: {tb}")
if len(e.args) > 0:
res_object = {"success": False, "error": f"{e.args[0]}"}
else:
res_object = {"success": False, "error": f"{e}"}
return obj_to_response(res_object)
return inner
async def get_connections(self, request: Dict) -> Dict:
if self.rpc_api.service.server is None:
raise ValueError("Global connections is not set")
if self.rpc_api.service.server._local_type is NodeType.FULL_NODE:
# TODO add peaks for peers
connections = self.rpc_api.service.server.get_connections()
con_info = []
if self.rpc_api.service.sync_store is not None:
peak_store = self.rpc_api.service.sync_store.peer_to_peak
else:
peak_store = None
for con in connections:
if peak_store is not None and con.peer_node_id in peak_store:
peak_hash, peak_height, peak_weight = peak_store[con.peer_node_id]
else:
peak_height = None
peak_hash = None
peak_weight = None
con_dict = {
"type": con.connection_type,
"local_port": con.local_port,
"peer_host": con.peer_host,
"peer_port": con.peer_port,
"peer_server_port": con.peer_server_port,
"node_id": con.peer_node_id,
"creation_time": con.creation_time,
"bytes_read": con.bytes_read,
"bytes_written": con.bytes_written,
"last_message_time": con.last_message_time,
"peak_height": peak_height,
"peak_weight": peak_weight,
"peak_hash": peak_hash,
}
con_info.append(con_dict)
else:
connections = self.rpc_api.service.server.get_connections()
con_info = [
{
"type": con.connection_type,
"local_port": con.local_port,
"peer_host": con.peer_host,
"peer_port": con.peer_port,
"peer_server_port": con.peer_server_port,
"node_id": con.peer_node_id,
"creation_time": con.creation_time,
"bytes_read": con.bytes_read,
"bytes_written": con.bytes_written,
"last_message_time": con.last_message_time,
}
for con in connections
]
return {"connections": con_info}
async def open_connection(self, request: Dict):
host = request["host"]
port = request["port"]
target_node: PeerInfo = PeerInfo(host, uint16(int(port)))
on_connect = None
if hasattr(self.rpc_api.service, "on_connect"):
on_connect = self.rpc_api.service.on_connect
if getattr(self.rpc_api.service, "server", None) is None or not (
await self.rpc_api.service.server.start_client(target_node, on_connect)
):
raise ValueError("Start client failed, or server is not set")
return {}
async def close_connection(self, request: Dict):
node_id = hexstr_to_bytes(request["node_id"])
if self.rpc_api.service.server is None:
raise aiohttp.web.HTTPInternalServerError()
connections_to_close = [c for c in self.rpc_api.service.server.get_connections() if c.peer_node_id == node_id]
if len(connections_to_close) == 0:
raise ValueError(f"Connection with node_id {node_id.hex()} does not exist")
for connection in connections_to_close:
await connection.close()
return {}
async def stop_node(self, request):
"""
Shuts down the node.
"""
if self.stop_cb is not None:
self.stop_cb()
return {}
async def ws_api(self, message):
"""
This function gets called when new message is received via websocket.
"""
command = message["command"]
if message["ack"]:
return None
data = None
if "data" in message:
data = message["data"]
if command == "ping":
return pong()
f = getattr(self, command, None)
if f is not None:
return await f(data)
f = getattr(self.rpc_api, command, None)
if f is not None:
return await f(data)
raise ValueError(f"unknown_command {command}")
async def safe_handle(self, websocket, payload):
message = None
try:
message = json.loads(payload)
self.log.debug(f"Rpc call <- {message["command"]}")
response = await self.ws_api(message)
# Only respond if we return something from api call
if response is not None:
log.debug(f"Rpc response -> {message["command"]}")
# Set success to true automatically (unless it's already set)
if "success" not in response:
response["success"] = True
await websocket.send_str(format_response(message, response))
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Error while handling message: {tb}")
if message is not None:
error = e.args[0] if e.args else e
res = {"success": False, "error": f"{error}"}
await websocket.send_str(format_response(message, res))
async def connection(self, ws):
data = {"service": self.service_name}
payload = create_payload("register_service", data, self.service_name, "daemon")
await ws.send_str(payload)
while True:
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
message = msg.data.strip()
# self.log.info(f"received message: {message}")
await self.safe_handle(ws, message)
elif msg.type == aiohttp.WSMsgType.BINARY:
self.log.debug("Received binary data")
elif msg.type == aiohttp.WSMsgType.PING:
self.log.debug("Ping received")
await ws.pong()
elif msg.type == aiohttp.WSMsgType.PONG:
self.log.debug("Pong received")
else:
if msg.type == aiohttp.WSMsgType.CLOSE:
self.log.debug("Closing RPC websocket")
await ws.close()
elif msg.type == aiohttp.WSMsgType.ERROR:
self.log.error("Error during receive %s" % ws.exception())
elif msg.type == aiohttp.WSMsgType.CLOSED:
pass
break
await ws.close()
async def connect_to_daemon(self, self_hostname: str, daemon_port: uint16):
while True:
try:
if self.shut_down:
break
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f"wss://{self_hostname}:{daemon_port}",
autoclose=True,
autoping=True,
heartbeat=60,
ssl_context=self.ssl_context,
max_msg_size=100 * 1024 * 1024,
) as ws:
self.websocket = ws
await self.connection(ws)
self.websocket = None
except aiohttp.ClientConnectorError:
self.log.warning(f"Cannot connect to daemon at ws://{self_hostname}:{daemon_port}")
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Exception: {tb} {type(e)}")
await asyncio.sleep(2)
async def start_rpc_server(
rpc_api: Any,
self_hostname: str,
daemon_port: uint16,
rpc_port: uint16,
stop_cb: Callable,
root_path: Path,
net_config,
connect_to_daemon=True,
):
"""
Starts an HTTP server with the following RPC methods, to be used by local clients to
query the node.
"""
app = aiohttp.web.Application()
rpc_server = RpcServer(rpc_api, rpc_api.service_name, stop_cb, root_path, net_config)
rpc_server.rpc_api.service._set_state_changed_callback(rpc_server.state_changed)
http_routes: Dict[str, Callable] = rpc_api.get_routes()
routes = [aiohttp.web.post(route, rpc_server._wrap_http_handler(func)) for (route, func) in http_routes.items()]
routes += [
aiohttp.web.post(
"/get_connections",
rpc_server._wrap_http_handler(rpc_server.get_connections),
),
aiohttp.web.post(
"/open_connection",
rpc_server._wrap_http_handler(rpc_server.open_connection),
),
aiohttp.web.post(
"/close_connection",
rpc_server._wrap_http_handler(rpc_server.close_connection),
),
aiohttp.web.post("/stop_node", rpc_server._wrap_http_handler(rpc_server.stop_node)),
]
app.add_routes(routes)
if connect_to_daemon:
daemon_connection = asyncio.create_task(rpc_server.connect_to_daemon(self_hostname, daemon_port))
runner = aiohttp.web.AppRunner(app, access_log=None)
await runner.setup()
site = aiohttp.web.TCPSite(runner, self_hostname, int(rpc_port), ssl_context=rpc_server.ssl_context)
await site.start()
async def cleanup():
await rpc_server.stop()
await runner.cleanup()
if connect_to_daemon:
await daemon_connection
return cleanup
| import asyncio
import json
import logging
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional
import aiohttp
from dogechia.server.outbound_message import NodeType
from dogechia.server.server import ssl_context_for_server
from dogechia.types.peer_info import PeerInfo
from dogechia.util.byte_types import hexstr_to_bytes
from dogechia.util.ints import uint16
from dogechia.util.json_util import dict_to_json_str, obj_to_response
from dogechia.util.ws_message import create_payload, create_payload_dict, format_response, pong
log = logging.getLogger(__name__)
class RpcServer:
"""
Implementation of RPC server.
"""
def __init__(self, rpc_api: Any, service_name: str, stop_cb: Callable, root_path, net_config):
self.rpc_api = rpc_api
self.stop_cb: Callable = stop_cb
self.log = log
self.shut_down = False
self.websocket: Optional[aiohttp.ClientWebSocketResponse] = None
self.service_name = service_name
self.root_path = root_path
self.net_config = net_config
self.crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
self.key_path = root_path / net_config["daemon_ssl"]["private_key"]
self.ca_cert_path = root_path / net_config["private_ssl_ca"]["crt"]
self.ca_key_path = root_path / net_config["private_ssl_ca"]["key"]
self.ssl_context = ssl_context_for_server(self.ca_cert_path, self.ca_key_path, self.crt_path, self.key_path)
async def stop(self):
self.shut_down = True
if self.websocket is not None:
await self.websocket.close()
async def _state_changed(self, *args):
if self.websocket is None:
return None
payloads: List[Dict] = await self.rpc_api._state_changed(*args)
change = args[0]
if change == "add_connection" or change == "close_connection" or change == "peer_changed_peak":
data = await self.get_connections({})
if data is not None:
payload = create_payload_dict(
"get_connections",
data,
self.service_name,
"wallet_ui",
)
payloads.append(payload)
for payload in payloads:
if "success" not in payload["data"]:
payload["data"]["success"] = True
try:
await self.websocket.send_str(dict_to_json_str(payload))
except Exception:
tb = traceback.format_exc()
self.log.warning(f"Sending data failed. Exception {tb}.")
def state_changed(self, *args):
if self.websocket is None:
return None
asyncio.create_task(self._state_changed(*args))
def _wrap_http_handler(self, f) -> Callable:
async def inner(request) -> aiohttp.web.Response:
request_data = await request.json()
try:
res_object = await f(request_data)
if res_object is None:
res_object = {}
if "success" not in res_object:
res_object["success"] = True
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Error while handling message: {tb}")
if len(e.args) > 0:
res_object = {"success": False, "error": f"{e.args[0]}"}
else:
res_object = {"success": False, "error": f"{e}"}
return obj_to_response(res_object)
return inner
async def get_connections(self, request: Dict) -> Dict:
if self.rpc_api.service.server is None:
raise ValueError("Global connections is not set")
if self.rpc_api.service.server._local_type is NodeType.FULL_NODE:
# TODO add peaks for peers
connections = self.rpc_api.service.server.get_connections()
con_info = []
if self.rpc_api.service.sync_store is not None:
peak_store = self.rpc_api.service.sync_store.peer_to_peak
else:
peak_store = None
for con in connections:
if peak_store is not None and con.peer_node_id in peak_store:
peak_hash, peak_height, peak_weight = peak_store[con.peer_node_id]
else:
peak_height = None
peak_hash = None
peak_weight = None
con_dict = {
"type": con.connection_type,
"local_port": con.local_port,
"peer_host": con.peer_host,
"peer_port": con.peer_port,
"peer_server_port": con.peer_server_port,
"node_id": con.peer_node_id,
"creation_time": con.creation_time,
"bytes_read": con.bytes_read,
"bytes_written": con.bytes_written,
"last_message_time": con.last_message_time,
"peak_height": peak_height,
"peak_weight": peak_weight,
"peak_hash": peak_hash,
}
con_info.append(con_dict)
else:
connections = self.rpc_api.service.server.get_connections()
con_info = [
{
"type": con.connection_type,
"local_port": con.local_port,
"peer_host": con.peer_host,
"peer_port": con.peer_port,
"peer_server_port": con.peer_server_port,
"node_id": con.peer_node_id,
"creation_time": con.creation_time,
"bytes_read": con.bytes_read,
"bytes_written": con.bytes_written,
"last_message_time": con.last_message_time,
}
for con in connections
]
return {"connections": con_info}
async def open_connection(self, request: Dict):
host = request["host"]
port = request["port"]
target_node: PeerInfo = PeerInfo(host, uint16(int(port)))
on_connect = None
if hasattr(self.rpc_api.service, "on_connect"):
on_connect = self.rpc_api.service.on_connect
if getattr(self.rpc_api.service, "server", None) is None or not (
await self.rpc_api.service.server.start_client(target_node, on_connect)
):
raise ValueError("Start client failed, or server is not set")
return {}
async def close_connection(self, request: Dict):
node_id = hexstr_to_bytes(request["node_id"])
if self.rpc_api.service.server is None:
raise aiohttp.web.HTTPInternalServerError()
connections_to_close = [c for c in self.rpc_api.service.server.get_connections() if c.peer_node_id == node_id]
if len(connections_to_close) == 0:
raise ValueError(f"Connection with node_id {node_id.hex()} does not exist")
for connection in connections_to_close:
await connection.close()
return {}
async def stop_node(self, request):
"""
Shuts down the node.
"""
if self.stop_cb is not None:
self.stop_cb()
return {}
async def ws_api(self, message):
"""
This function gets called when new message is received via websocket.
"""
command = message["command"]
if message["ack"]:
return None
data = None
if "data" in message:
data = message["data"]
if command == "ping":
return pong()
f = getattr(self, command, None)
if f is not None:
return await f(data)
f = getattr(self.rpc_api, command, None)
if f is not None:
return await f(data)
raise ValueError(f"unknown_command {command}")
async def safe_handle(self, websocket, payload):
message = None
try:
message = json.loads(payload)
self.log.debug(f"Rpc call <- {message['command']}")
response = await self.ws_api(message)
# Only respond if we return something from api call
if response is not None:
log.debug(f"Rpc response -> {message['command']}")
# Set success to true automatically (unless it's already set)
if "success" not in response:
response["success"] = True
await websocket.send_str(format_response(message, response))
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Error while handling message: {tb}")
if message is not None:
error = e.args[0] if e.args else e
res = {"success": False, "error": f"{error}"}
await websocket.send_str(format_response(message, res))
async def connection(self, ws):
data = {"service": self.service_name}
payload = create_payload("register_service", data, self.service_name, "daemon")
await ws.send_str(payload)
while True:
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
message = msg.data.strip()
# self.log.info(f"received message: {message}")
await self.safe_handle(ws, message)
elif msg.type == aiohttp.WSMsgType.BINARY:
self.log.debug("Received binary data")
elif msg.type == aiohttp.WSMsgType.PING:
self.log.debug("Ping received")
await ws.pong()
elif msg.type == aiohttp.WSMsgType.PONG:
self.log.debug("Pong received")
else:
if msg.type == aiohttp.WSMsgType.CLOSE:
self.log.debug("Closing RPC websocket")
await ws.close()
elif msg.type == aiohttp.WSMsgType.ERROR:
self.log.error("Error during receive %s" % ws.exception())
elif msg.type == aiohttp.WSMsgType.CLOSED:
pass
break
await ws.close()
async def connect_to_daemon(self, self_hostname: str, daemon_port: uint16):
while True:
try:
if self.shut_down:
break
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f"wss://{self_hostname}:{daemon_port}",
autoclose=True,
autoping=True,
heartbeat=60,
ssl_context=self.ssl_context,
max_msg_size=100 * 1024 * 1024,
) as ws:
self.websocket = ws
await self.connection(ws)
self.websocket = None
except aiohttp.ClientConnectorError:
self.log.warning(f"Cannot connect to daemon at ws://{self_hostname}:{daemon_port}")
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Exception: {tb} {type(e)}")
await asyncio.sleep(2)
async def start_rpc_server(
rpc_api: Any,
self_hostname: str,
daemon_port: uint16,
rpc_port: uint16,
stop_cb: Callable,
root_path: Path,
net_config,
connect_to_daemon=True,
):
"""
Starts an HTTP server with the following RPC methods, to be used by local clients to
query the node.
"""
app = aiohttp.web.Application()
rpc_server = RpcServer(rpc_api, rpc_api.service_name, stop_cb, root_path, net_config)
rpc_server.rpc_api.service._set_state_changed_callback(rpc_server.state_changed)
http_routes: Dict[str, Callable] = rpc_api.get_routes()
routes = [aiohttp.web.post(route, rpc_server._wrap_http_handler(func)) for (route, func) in http_routes.items()]
routes += [
aiohttp.web.post(
"/get_connections",
rpc_server._wrap_http_handler(rpc_server.get_connections),
),
aiohttp.web.post(
"/open_connection",
rpc_server._wrap_http_handler(rpc_server.open_connection),
),
aiohttp.web.post(
"/close_connection",
rpc_server._wrap_http_handler(rpc_server.close_connection),
),
aiohttp.web.post("/stop_node", rpc_server._wrap_http_handler(rpc_server.stop_node)),
]
app.add_routes(routes)
if connect_to_daemon:
daemon_connection = asyncio.create_task(rpc_server.connect_to_daemon(self_hostname, daemon_port))
runner = aiohttp.web.AppRunner(app, access_log=None)
await runner.setup()
site = aiohttp.web.TCPSite(runner, self_hostname, int(rpc_port), ssl_context=rpc_server.ssl_context)
await site.start()
async def cleanup():
await rpc_server.stop()
await runner.cleanup()
if connect_to_daemon:
await daemon_connection
return cleanup
|
#!/usr/bin/env python3
from logging import debug, info, warning, error
from time import sleep
import traceback
class DeviceExample():
def __init__(self):
self.io = None
self.trajectory = None
self.idle_value = 0.0
def init(self):
from time import sleep
"""
orderly initialization
"""
debug('Conducting orderly initialization of the Example Device')
sleep(1)
def parse_CMD(self,cmd):
def linear(start=0,end=10,step=1):
from numpy import arange
return list(arange(start,end,step))
try:
lst = eval(cmd)
except:
lst = None
if type(lst) is list:
return {'flag':True, 'values':lst}
else:
return {'flag':False, 'values':[]}
def set_VAL(self,value):
self._VAL = value
sleep(0.5)
def get_VAL(self):
return self._VAL
VAL = property(get_VAL,set_VAL)
def io_execute(self,pv_name, value):
"""
"""
from time import sleep
print(f'io_execute received: {pv_name},{value}')
response = ''
if pv_name == 'CMD':
self.io_put(pv_name = 'ACK',value = 0)
self.io_put(pv_name = 'values',value = [])
reply = self.parse_CMD(value)
response = ""+str(int(reply['flag']))
self.trajectory = reply['values']
if reply['flag'] == False:
response += f"{"failed to eval the command"}"
self.io_put(pv_name = 'values',value = reply['values'])
sleep(1)
self.io_put(pv_name = 'ACK',value = response)
if pv_name == 'Nested_Indices':
print(f'io_execute inside if pv_name == Nested_Indices: {'Nested_Indices'}')
print(bytes(value))
index = eval(bytes(value))['test.server']
self.io_put(pv_name = 'ACK',value = 0)
try:
if self.trajectory is not None:
self.VAL = self.trajectory[index]
else:
self.VAL = self.idle_value
flag = True
resp = ' '
except:
resp = traceback.format_exc()
print(resp)
flag = False
print(self.VAL)
response += resp
self.io_put(pv_name = 'VAL',value = self.VAL)
self.io_put(pv_name = 'message',value = response)
self.io_put(pv_name = 'ACK',value = 1)
print('response:',reply,response)
def io_put(self,pv_name, value):
print(f'DeviceExample.io_put: {pv_name},{value}')
if self.io is not None:
if pv_name == 'VAL':
self.io.io_put_queue.put({pv_name: value})
else:
self.io.seq.io_put_queue.put({pv_name: value})
if __name__ is '__main__':
device = DeviceExample()
device.init()
| #!/usr/bin/env python3
from logging import debug, info, warning, error
from time import sleep
import traceback
class DeviceExample():
def __init__(self):
self.io = None
self.trajectory = None
self.idle_value = 0.0
def init(self):
from time import sleep
"""
orderly initialization
"""
debug('Conducting orderly initialization of the Example Device')
sleep(1)
def parse_CMD(self,cmd):
def linear(start=0,end=10,step=1):
from numpy import arange
return list(arange(start,end,step))
try:
lst = eval(cmd)
except:
lst = None
if type(lst) is list:
return {'flag':True, 'values':lst}
else:
return {'flag':False, 'values':[]}
def set_VAL(self,value):
self._VAL = value
sleep(0.5)
def get_VAL(self):
return self._VAL
VAL = property(get_VAL,set_VAL)
def io_execute(self,pv_name, value):
"""
"""
from time import sleep
print(f'io_execute received: {pv_name},{value}')
response = ''
if pv_name == 'CMD':
self.io_put(pv_name = 'ACK',value = 0)
self.io_put(pv_name = 'values',value = [])
reply = self.parse_CMD(value)
response = ""+str(int(reply['flag']))
self.trajectory = reply['values']
if reply['flag'] == False:
response += f"{'failed to eval the command'}"
self.io_put(pv_name = 'values',value = reply['values'])
sleep(1)
self.io_put(pv_name = 'ACK',value = response)
if pv_name == 'Nested_Indices':
print(f'io_execute inside if pv_name == Nested_Indices: {"Nested_Indices"}')
print(bytes(value))
index = eval(bytes(value))['test.server']
self.io_put(pv_name = 'ACK',value = 0)
try:
if self.trajectory is not None:
self.VAL = self.trajectory[index]
else:
self.VAL = self.idle_value
flag = True
resp = ' '
except:
resp = traceback.format_exc()
print(resp)
flag = False
print(self.VAL)
response += resp
self.io_put(pv_name = 'VAL',value = self.VAL)
self.io_put(pv_name = 'message',value = response)
self.io_put(pv_name = 'ACK',value = 1)
print('response:',reply,response)
def io_put(self,pv_name, value):
print(f'DeviceExample.io_put: {pv_name},{value}')
if self.io is not None:
if pv_name == 'VAL':
self.io.io_put_queue.put({pv_name: value})
else:
self.io.seq.io_put_queue.put({pv_name: value})
if __name__ is '__main__':
device = DeviceExample()
device.init()
|
import json
import time
from typing import Callable, Optional, List, Any, Dict
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
import chia.server.ws_connection as ws
from chia.consensus.network_type import NetworkType
from chia.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from chia.farmer.farmer import Farmer
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.harvester_protocol import PoolDifficulty
from chia.protocols.pool_protocol import (
get_current_authentication_token,
PoolErrorCode,
PostPartialRequest,
PostPartialPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.util.api_decorators import api_request, peer_required
from chia.util.ints import uint32, uint64
class FarmerAPI:
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
def _set_state_changed_callback(self, callback: Callable):
self.farmer.state_changed_callback = callback
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection
):
"""
This is a response from the harvester, for a NewChallenge. Here we check if the proof
of space is sufficiently good, and if so, we ask for the whole proof.
"""
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
max_pos_per_sp = 5
if self.farmer.constants.NETWORK_TYPE != NetworkType.MAINNET:
# This is meant to make testnets more stable, when difficulty is very low
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
# If the iters are good enough to make a block, proceed with the block making flow
if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters):
# Proceed at getting the signatures for this PoSpace
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = []
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(int(time.time()))
await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request))
p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash
if p2_singleton_puzzle_hash is not None:
# Otherwise, send the proof of space to the pool
# When we win a block, we also send the partial to the pool
if p2_singleton_puzzle_hash not in self.farmer.pool_state:
self.farmer.log.info(f"Did not find pool info for {p2_singleton_puzzle_hash}")
return
pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
return
if pool_state_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
return
required_iters = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
pool_state_dict["current_difficulty"],
new_proof_of_space.sp_hash,
)
if required_iters >= calculate_sp_interval_iters(
self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS
):
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict["current_difficulty"]}"
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
if authentication_token_timeout is None:
self.farmer.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
return
# Submit partial to pool
is_eos = new_proof_of_space.signage_point_index == 0
payload = PostPartialPayload(
pool_state_dict["pool_config"].launcher_id,
get_current_authentication_token(authentication_token_timeout),
new_proof_of_space.proof,
new_proof_of_space.sp_hash,
is_eos,
peer.peer_node_id,
)
# The plot key is 2/2 so we need the harvester's half of the signature
m_to_sign = payload.get_hash()
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[m_to_sign],
)
response: Any = await peer.request_signatures(request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(f"Invalid response from harvester: {response}")
return
assert len(response.message_signatures) == 1
plot_signature: Optional[G2Element] = None
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, True)
assert agg_pk == new_proof_of_space.proof.plot_public_key
sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk)
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_sig: G2Element = AugSchemeMPL.sign(taproot_sk, m_to_sign, agg_pk)
plot_signature = AugSchemeMPL.aggregate(
[sig_farmer, response.message_signatures[0][1], taproot_sig]
)
assert AugSchemeMPL.verify(agg_pk, m_to_sign, plot_signature)
authentication_pk = pool_state_dict["pool_config"].authentication_public_key
if bytes(authentication_pk) is None:
self.farmer.log.error(f"No authentication sk for {authentication_pk}")
return
authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(authentication_pk)]
authentication_signature = AugSchemeMPL.sign(authentication_sk, m_to_sign)
assert plot_signature is not None
agg_sig: G2Element = AugSchemeMPL.aggregate([plot_signature, authentication_signature])
post_partial_request: PostPartialRequest = PostPartialRequest(payload, agg_sig)
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append((time.time(), pool_state_dict["current_difficulty"]))
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_url}/partial",
data=post_partial_body,
headers=headers,
ssl=ssl_context_for_root(get_mozilla_ca_crt()),
) as resp:
if resp.ok:
pool_response: Dict = json.loads(await resp.text())
self.farmer.log.info(f"Pool response: {pool_response}")
if "error_code" in pool_response:
self.farmer.log.error(
f"Error in pooling: "
f"{pool_response["error_code"], pool_response["error_message"]}"
)
pool_state_dict["pool_errors_24h"].append(pool_response)
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
"get our current difficulty."
)
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append((time.time(), new_difficulty))
pool_state_dict["current_difficulty"] = new_difficulty
else:
self.farmer.log.error(f"Error sending partial to {pool_url}, {resp.status}")
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
return
return
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
"""
There are two cases: receiving signatures for sps, or receiving signatures for the block.
"""
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
include_taproot: bool = pospace.pool_contract_puzzle_hash is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_share_cc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, challenge_chain_sp, agg_pk)
taproot_share_rc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, reward_chain_sp, agg_pk)
else:
taproot_share_cc_sp = G2Element()
taproot_share_rc_sp = G2Element()
farmer_share_cc_sp = AugSchemeMPL.sign(sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate(
[challenge_chain_sp_harv_sig, farmer_share_cc_sp, taproot_share_cc_sp]
)
assert AugSchemeMPL.verify(agg_pk, challenge_chain_sp, agg_sig_cc_sp)
# This means it passes the sp filter
farmer_share_rc_sp = AugSchemeMPL.sign(sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate(
[reward_chain_sp_harv_sig, farmer_share_rc_sp, taproot_share_rc_sp]
)
assert AugSchemeMPL.verify(agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target: Optional[PoolTarget] = PoolTarget(self.farmer.pool_target, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed("proof", {"proof": request, "passed_filter": True})
msg = make_msg(ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
# This is a response with block signatures
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
foliage_sig_taproot: G2Element = AugSchemeMPL.sign(taproot_sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_transaction_block_hash, agg_pk
)
else:
foliage_sig_taproot = G2Element()
foliage_transaction_block_sig_taproot = G2Element()
foliage_sig_farmer = AugSchemeMPL.sign(sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate(
[foliage_sig_harvester, foliage_sig_farmer, foliage_sig_taproot]
)
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[
foliage_transaction_block_sig_harvester,
foliage_transaction_block_sig_farmer,
foliage_transaction_block_sig_taproot,
]
)
assert AugSchemeMPL.verify(agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
"""
FARMER PROTOCOL (FARMER <-> FULL NODE)
"""
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
pool_difficulties: List[PoolDifficulty] = []
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
if pool_dict["pool_config"].pool_url == "":
# Self pooling
continue
if pool_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this signage point, pool: "
f"{pool_dict["pool_config"].pool_url} "
)
continue
pool_difficulties.append(
PoolDifficulty(
pool_dict["current_difficulty"],
self.farmer.constants.POOL_SUB_SLOT_ITERS,
p2_singleton_puzzle_hash,
)
)
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
new_signage_point.difficulty,
new_signage_point.sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
pool_difficulties,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(int(time.time()))
self.farmer.state_changed("new_signage_point", {"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash, full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
async def farming_info(self, request: farmer_protocol.FarmingInfo):
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
}
},
)
@api_request
async def respond_plots(self, _: harvester_protocol.RespondPlots):
self.farmer.log.warning("Respond plots came too late")
| import json
import time
from typing import Callable, Optional, List, Any, Dict
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
import chia.server.ws_connection as ws
from chia.consensus.network_type import NetworkType
from chia.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from chia.farmer.farmer import Farmer
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.harvester_protocol import PoolDifficulty
from chia.protocols.pool_protocol import (
get_current_authentication_token,
PoolErrorCode,
PostPartialRequest,
PostPartialPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.util.api_decorators import api_request, peer_required
from chia.util.ints import uint32, uint64
class FarmerAPI:
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
def _set_state_changed_callback(self, callback: Callable):
self.farmer.state_changed_callback = callback
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection
):
"""
This is a response from the harvester, for a NewChallenge. Here we check if the proof
of space is sufficiently good, and if so, we ask for the whole proof.
"""
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
max_pos_per_sp = 5
if self.farmer.constants.NETWORK_TYPE != NetworkType.MAINNET:
# This is meant to make testnets more stable, when difficulty is very low
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
# If the iters are good enough to make a block, proceed with the block making flow
if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters):
# Proceed at getting the signatures for this PoSpace
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = []
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(int(time.time()))
await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request))
p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash
if p2_singleton_puzzle_hash is not None:
# Otherwise, send the proof of space to the pool
# When we win a block, we also send the partial to the pool
if p2_singleton_puzzle_hash not in self.farmer.pool_state:
self.farmer.log.info(f"Did not find pool info for {p2_singleton_puzzle_hash}")
return
pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
return
if pool_state_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
return
required_iters = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
pool_state_dict["current_difficulty"],
new_proof_of_space.sp_hash,
)
if required_iters >= calculate_sp_interval_iters(
self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS
):
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}"
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
if authentication_token_timeout is None:
self.farmer.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
return
# Submit partial to pool
is_eos = new_proof_of_space.signage_point_index == 0
payload = PostPartialPayload(
pool_state_dict["pool_config"].launcher_id,
get_current_authentication_token(authentication_token_timeout),
new_proof_of_space.proof,
new_proof_of_space.sp_hash,
is_eos,
peer.peer_node_id,
)
# The plot key is 2/2 so we need the harvester's half of the signature
m_to_sign = payload.get_hash()
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[m_to_sign],
)
response: Any = await peer.request_signatures(request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(f"Invalid response from harvester: {response}")
return
assert len(response.message_signatures) == 1
plot_signature: Optional[G2Element] = None
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, True)
assert agg_pk == new_proof_of_space.proof.plot_public_key
sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk)
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_sig: G2Element = AugSchemeMPL.sign(taproot_sk, m_to_sign, agg_pk)
plot_signature = AugSchemeMPL.aggregate(
[sig_farmer, response.message_signatures[0][1], taproot_sig]
)
assert AugSchemeMPL.verify(agg_pk, m_to_sign, plot_signature)
authentication_pk = pool_state_dict["pool_config"].authentication_public_key
if bytes(authentication_pk) is None:
self.farmer.log.error(f"No authentication sk for {authentication_pk}")
return
authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(authentication_pk)]
authentication_signature = AugSchemeMPL.sign(authentication_sk, m_to_sign)
assert plot_signature is not None
agg_sig: G2Element = AugSchemeMPL.aggregate([plot_signature, authentication_signature])
post_partial_request: PostPartialRequest = PostPartialRequest(payload, agg_sig)
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append((time.time(), pool_state_dict["current_difficulty"]))
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_url}/partial",
data=post_partial_body,
headers=headers,
ssl=ssl_context_for_root(get_mozilla_ca_crt()),
) as resp:
if resp.ok:
pool_response: Dict = json.loads(await resp.text())
self.farmer.log.info(f"Pool response: {pool_response}")
if "error_code" in pool_response:
self.farmer.log.error(
f"Error in pooling: "
f"{pool_response['error_code'], pool_response['error_message']}"
)
pool_state_dict["pool_errors_24h"].append(pool_response)
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
"get our current difficulty."
)
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append((time.time(), new_difficulty))
pool_state_dict["current_difficulty"] = new_difficulty
else:
self.farmer.log.error(f"Error sending partial to {pool_url}, {resp.status}")
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
return
return
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
"""
There are two cases: receiving signatures for sps, or receiving signatures for the block.
"""
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
include_taproot: bool = pospace.pool_contract_puzzle_hash is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_share_cc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, challenge_chain_sp, agg_pk)
taproot_share_rc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, reward_chain_sp, agg_pk)
else:
taproot_share_cc_sp = G2Element()
taproot_share_rc_sp = G2Element()
farmer_share_cc_sp = AugSchemeMPL.sign(sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate(
[challenge_chain_sp_harv_sig, farmer_share_cc_sp, taproot_share_cc_sp]
)
assert AugSchemeMPL.verify(agg_pk, challenge_chain_sp, agg_sig_cc_sp)
# This means it passes the sp filter
farmer_share_rc_sp = AugSchemeMPL.sign(sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate(
[reward_chain_sp_harv_sig, farmer_share_rc_sp, taproot_share_rc_sp]
)
assert AugSchemeMPL.verify(agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target: Optional[PoolTarget] = PoolTarget(self.farmer.pool_target, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed("proof", {"proof": request, "passed_filter": True})
msg = make_msg(ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
# This is a response with block signatures
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
foliage_sig_taproot: G2Element = AugSchemeMPL.sign(taproot_sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_transaction_block_hash, agg_pk
)
else:
foliage_sig_taproot = G2Element()
foliage_transaction_block_sig_taproot = G2Element()
foliage_sig_farmer = AugSchemeMPL.sign(sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate(
[foliage_sig_harvester, foliage_sig_farmer, foliage_sig_taproot]
)
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[
foliage_transaction_block_sig_harvester,
foliage_transaction_block_sig_farmer,
foliage_transaction_block_sig_taproot,
]
)
assert AugSchemeMPL.verify(agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
"""
FARMER PROTOCOL (FARMER <-> FULL NODE)
"""
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
pool_difficulties: List[PoolDifficulty] = []
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
if pool_dict["pool_config"].pool_url == "":
# Self pooling
continue
if pool_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this signage point, pool: "
f"{pool_dict['pool_config'].pool_url} "
)
continue
pool_difficulties.append(
PoolDifficulty(
pool_dict["current_difficulty"],
self.farmer.constants.POOL_SUB_SLOT_ITERS,
p2_singleton_puzzle_hash,
)
)
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
new_signage_point.difficulty,
new_signage_point.sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
pool_difficulties,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(int(time.time()))
self.farmer.state_changed("new_signage_point", {"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash, full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
async def farming_info(self, request: farmer_protocol.FarmingInfo):
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
}
},
)
@api_request
async def respond_plots(self, _: harvester_protocol.RespondPlots):
self.farmer.log.warning("Respond plots came too late")
|
import numpy as np
import trimesh
import pyrender
import matplotlib.pyplot as plt
import math
from tqdm import tqdm
import os
import torch
import torchvision
import glob
def render_one(mesh_list, steps, save_name, save_path, resolution, need_video=False):
"""
mesh: pyrender.mesh.Mesh
A pyrender.mesh.Mesh object
steps: int
number of steps in one horizontal revolution
save_path: str
path to save color and depth image (saved as numpy arrays).
mode: str, either 'light' or 'albedo'
if 'light', then render with light objects
if 'albedo', then render with only ambient lights
resolution: tuple of 2: (res_h, res_w)
----
file saving:
This files save the color image, the depth image, the camera pose and the camera projection matrix
color image: saved as
[save_path]/[save_name]/[save_name]_[rotate_deg]_color.npy
depth image: saved as
[save_path]/[save_name]/[save_name]_[rotate_deg]_depth.npy
camera pose: saved as
[save_path]/[save_name]/[save_name]_[rotate_deg]_campose.npy
projection matrix: saved as
[save_path]/[save_name]/[save_name]_projection.npy
"""
print(f'Starting to render one, which will be saved to {os.path.join(save_path, save_name)}.')
if not os.path.exists(os.path.join(save_path, save_name)):
os.system(f'mkdir -p {os.path.join(save_path, save_name)}')
# resolution
res_h, res_w = resolution
# creating nodes
# mesh
node_mesh_list = []
for mesh in mesh_list:
#mesh = pyrender.Mesh.from_trimesh(mesh)
node_mesh_list.append( pyrender.Node(mesh=mesh, matrix=np.eye(4)) )
# directional light
dir_light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=2.0)
node_light = pyrender.Node(light=dir_light, matrix=np.eye(4))
# perspective cameras
pers_cam = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1)
node_cam = pyrender.Node(camera=pers_cam, matrix=np.eye(4))
# scene
scene = pyrender.Scene(ambient_light=[1., 1., 1.], bg_color=[1., 1., 1.])
for node_mesh in node_mesh_list:
scene.add_node(node_mesh)
scene.add_node(node_light)
scene.add_node(node_cam)
offscr_renderer = pyrender.OffscreenRenderer(viewport_width=res_h, viewport_height=res_w, point_size=3.)
# for outputting video
if need_video:
color_video = torch.zeros(steps, res_h, res_w, 3, dtype=torch.uint8)
depth_video = torch.zeros(steps, res_h, res_w, 3, dtype=torch.uint8)
albedo_video = torch.zeros(steps, res_h, res_w, 3, dtype=torch.uint8)
deg_interval = 720 / steps
for i, angle_i in enumerate(range(steps)):
print(f'Showing angle {angle_i}')
angle_deg = int(deg_interval * angle_i)
angle_rad = angle_deg * math.pi / 180
s = math.sin(angle_rad)
c = math.cos(angle_rad)
camera_pose = np.array([
[ c, 0.0, s, 2*s],
[0.0, -1.0, 0.0, 0.0],
[ s, 0.0, -c, -2*c],
[0.0, 0.0, 0.0, 1.0]])
pitch_angle_rad = 30 * math.pi / 180
s_pitch = math.sin(pitch_angle_rad)
c_pitch = math.cos(pitch_angle_rad)
# rendering
scene.set_pose(node_cam, pose=camera_pose)
color, depth = offscr_renderer.render(scene)
scene.remove_node(node_light)
albedo, _ = offscr_renderer.render(scene)
scene.add_node(node_light)
#plt.imshow(color)
#plt.show()
# making video
if need_video:
color_video[i] = torch.from_numpy(color.copy())
depth_pt = torch.from_numpy(depth.copy())
depth_scaled = (depth_pt - depth_pt[depth_pt !=0].min()) / (depth_pt[depth_pt != 0].max() - depth_pt[depth_pt != 0].min()) * 255
depth_scaled = torch.where(depth_pt != 0., depth_scaled, torch.zeros_like(depth_scaled))
depth_video[i] = depth_scaled.int().unsqueeze(dim=-1).expand(-1, -1, 3)
albedo_video[i] = torch.from_numpy(albedo.copy())
#np.save( os.path.join(save_path, save_name, f'{save_name}_{angle_deg}_color'), color)
#np.save( os.path.join(save_path, save_name, f'{save_name}_{angle_deg}_depth'), depth)
#np.save( os.path.join(save_path, save_name, f'{save_name}_{angle_deg}_albedo'), albedo)
#np.save( os.path.join(save_path, save_name, f'{save_name}_{angle_deg}_campose'), camera_pose)
#plt.imshow(color)
#plt.savefig(f'{save_name}_color_{angle_i}.png', bbox_inches='tight')
#plt.clf()
#plt.show()
#plt.imshow(depth)
#plt.show()
#plt.imshow(albedo)
#plt.show()
#np.save( os.path.join(save_path, save_name, f'{save_name}_projection'), node_cam.camera.get_projection_matrix())
#print(node_cam.camera.get_projection_matrix())
if need_video:
final_video = torch.cat([color_video, depth_video], dim=2)
torchvision.io.write_video( os.path.join(save_path, save_name, f'{save_name}_rendervideo_color.mp4'), color_video, fps=30)
torchvision.io.write_video( os.path.join(save_path, save_name, f'{save_name}_rendervideo_depth.mp4'), depth_video, fps=30)
if __name__ == '__main__':
# headless rendering
os.environ['PYOPENGL_PLATFORM']='egl'
source_folder = '02691156'
# steps
steps = 180
file_list = ['0676', '0775', '1314', '0411', '0447', '1441', '0993', '0671']
for frame_id in file_list:
file_name = glob.glob(f'*{frame_id}*.ply')[0]
os.system(f'python ~/.dev_apps/simplemesh/simplemesh.py --input {file_name} -n.85 --output {file_name[:-4]+'_norm.ply'}')
pcd = trimesh.load(file_name[:-4]+"_norm.ply")
pcd_pyr = pyrender.Mesh.from_points(pcd.vertices, colors=pcd.colors)
render_one([pcd_pyr], steps=steps, save_name=file_name[:-4]+'_render', save_path='videos', resolution=(512, 512), need_video=True)
| import numpy as np
import trimesh
import pyrender
import matplotlib.pyplot as plt
import math
from tqdm import tqdm
import os
import torch
import torchvision
import glob
def render_one(mesh_list, steps, save_name, save_path, resolution, need_video=False):
"""
mesh: pyrender.mesh.Mesh
A pyrender.mesh.Mesh object
steps: int
number of steps in one horizontal revolution
save_path: str
path to save color and depth image (saved as numpy arrays).
mode: str, either 'light' or 'albedo'
if 'light', then render with light objects
if 'albedo', then render with only ambient lights
resolution: tuple of 2: (res_h, res_w)
----
file saving:
This files save the color image, the depth image, the camera pose and the camera projection matrix
color image: saved as
[save_path]/[save_name]/[save_name]_[rotate_deg]_color.npy
depth image: saved as
[save_path]/[save_name]/[save_name]_[rotate_deg]_depth.npy
camera pose: saved as
[save_path]/[save_name]/[save_name]_[rotate_deg]_campose.npy
projection matrix: saved as
[save_path]/[save_name]/[save_name]_projection.npy
"""
print(f'Starting to render one, which will be saved to {os.path.join(save_path, save_name)}.')
if not os.path.exists(os.path.join(save_path, save_name)):
os.system(f'mkdir -p {os.path.join(save_path, save_name)}')
# resolution
res_h, res_w = resolution
# creating nodes
# mesh
node_mesh_list = []
for mesh in mesh_list:
#mesh = pyrender.Mesh.from_trimesh(mesh)
node_mesh_list.append( pyrender.Node(mesh=mesh, matrix=np.eye(4)) )
# directional light
dir_light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=2.0)
node_light = pyrender.Node(light=dir_light, matrix=np.eye(4))
# perspective cameras
pers_cam = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1)
node_cam = pyrender.Node(camera=pers_cam, matrix=np.eye(4))
# scene
scene = pyrender.Scene(ambient_light=[1., 1., 1.], bg_color=[1., 1., 1.])
for node_mesh in node_mesh_list:
scene.add_node(node_mesh)
scene.add_node(node_light)
scene.add_node(node_cam)
offscr_renderer = pyrender.OffscreenRenderer(viewport_width=res_h, viewport_height=res_w, point_size=3.)
# for outputting video
if need_video:
color_video = torch.zeros(steps, res_h, res_w, 3, dtype=torch.uint8)
depth_video = torch.zeros(steps, res_h, res_w, 3, dtype=torch.uint8)
albedo_video = torch.zeros(steps, res_h, res_w, 3, dtype=torch.uint8)
deg_interval = 720 / steps
for i, angle_i in enumerate(range(steps)):
print(f'Showing angle {angle_i}')
angle_deg = int(deg_interval * angle_i)
angle_rad = angle_deg * math.pi / 180
s = math.sin(angle_rad)
c = math.cos(angle_rad)
camera_pose = np.array([
[ c, 0.0, s, 2*s],
[0.0, -1.0, 0.0, 0.0],
[ s, 0.0, -c, -2*c],
[0.0, 0.0, 0.0, 1.0]])
pitch_angle_rad = 30 * math.pi / 180
s_pitch = math.sin(pitch_angle_rad)
c_pitch = math.cos(pitch_angle_rad)
# rendering
scene.set_pose(node_cam, pose=camera_pose)
color, depth = offscr_renderer.render(scene)
scene.remove_node(node_light)
albedo, _ = offscr_renderer.render(scene)
scene.add_node(node_light)
#plt.imshow(color)
#plt.show()
# making video
if need_video:
color_video[i] = torch.from_numpy(color.copy())
depth_pt = torch.from_numpy(depth.copy())
depth_scaled = (depth_pt - depth_pt[depth_pt !=0].min()) / (depth_pt[depth_pt != 0].max() - depth_pt[depth_pt != 0].min()) * 255
depth_scaled = torch.where(depth_pt != 0., depth_scaled, torch.zeros_like(depth_scaled))
depth_video[i] = depth_scaled.int().unsqueeze(dim=-1).expand(-1, -1, 3)
albedo_video[i] = torch.from_numpy(albedo.copy())
#np.save( os.path.join(save_path, save_name, f'{save_name}_{angle_deg}_color'), color)
#np.save( os.path.join(save_path, save_name, f'{save_name}_{angle_deg}_depth'), depth)
#np.save( os.path.join(save_path, save_name, f'{save_name}_{angle_deg}_albedo'), albedo)
#np.save( os.path.join(save_path, save_name, f'{save_name}_{angle_deg}_campose'), camera_pose)
#plt.imshow(color)
#plt.savefig(f'{save_name}_color_{angle_i}.png', bbox_inches='tight')
#plt.clf()
#plt.show()
#plt.imshow(depth)
#plt.show()
#plt.imshow(albedo)
#plt.show()
#np.save( os.path.join(save_path, save_name, f'{save_name}_projection'), node_cam.camera.get_projection_matrix())
#print(node_cam.camera.get_projection_matrix())
if need_video:
final_video = torch.cat([color_video, depth_video], dim=2)
torchvision.io.write_video( os.path.join(save_path, save_name, f'{save_name}_rendervideo_color.mp4'), color_video, fps=30)
torchvision.io.write_video( os.path.join(save_path, save_name, f'{save_name}_rendervideo_depth.mp4'), depth_video, fps=30)
if __name__ == '__main__':
# headless rendering
os.environ['PYOPENGL_PLATFORM']='egl'
source_folder = '02691156'
# steps
steps = 180
file_list = ['0676', '0775', '1314', '0411', '0447', '1441', '0993', '0671']
for frame_id in file_list:
file_name = glob.glob(f'*{frame_id}*.ply')[0]
os.system(f'python ~/.dev_apps/simplemesh/simplemesh.py --input {file_name} -n.85 --output {file_name[:-4]+"_norm.ply"}')
pcd = trimesh.load(file_name[:-4]+"_norm.ply")
pcd_pyr = pyrender.Mesh.from_points(pcd.vertices, colors=pcd.colors)
render_one([pcd_pyr], steps=steps, save_name=file_name[:-4]+'_render', save_path='videos', resolution=(512, 512), need_video=True)
|
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright (c) 2021. Jason Cameron +
# All rights reserved. +
# This file is part of the edoC discord bot project , +
# and is released under the "MIT License Agreement". Please see the LICENSE +
# file that should have been included as part of this package. +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import argparse
import asyncio
import copy
import datetime
import logging
import re
import shlex
from collections import Counter
from io import BytesIO
import discord
from discord import NotFound, Object
from discord.ext import commands
from discord.ext.commands import Converter, BadArgument
from discord.utils import find
from cogs.Discordinfo import format_relative, plural
# from lib.db import db
from utils import checks, default
from utils.checks import MemberConverterr
from utils.default import mod_or_permissions
from utils.vars import *
log = logging.getLogger('mod')
class Arguments(argparse.ArgumentParser):
def error(self, message):
raise RuntimeError(message)
class BannedUser(Converter):
async def convert(self, ctx, arg):
if ctx.guild.me.guild_permissions.ban_members:
if arg.isdigit():
try:
return (await ctx.guild.fetch_ban(Object(id=int(arg)))).user
except NotFound:
raise BadArgument
banned = [e.user for e in await ctx.guild.bans()]
if banned:
if (user := find(lambda u: str(u) == arg, banned)) is not None:
return user
else:
raise BadArgument
class MemberID(commands.Converter):
async def convert(self, ctx, argument):
try:
m = await commands.MemberConverter().convert(ctx, argument)
except commands.BadArgument:
try:
return int(argument, base=10)
except ValueError:
raise commands.BadArgument(f"{argument} is not a valid member or member ID.") from None
else:
return m.id
class ActionReason(commands.Converter):
async def convert(self, ctx, argument):
ret = argument
if len(ret) > 512:
reason_max = 512 - len(ret) - len(argument)
raise commands.BadArgument(f"reason is too long ({len(argument)}/{reason_max})")
return ret
BannedUsers = {}
async def Get_Banned_Users(bot):
bans = bot.db.field("SELECT id FROM users WHERE banned = ?", "True")
for UserID in bans:
BannedUsers + UserID
async def BannedU(ctx):
if ctx.author in BannedUsers:
print(f"Command by {ctx.author} blocked!")
async def pred(ctx):
if ctx.author in BannedUsers:
return ctx.send("You are banned from using commands")
return pred
async def BanUser(ctx, userid: MemberID, reason):
BannedUsers + userid
ctx.bot.db.execute("INSERT INTO users (?, ?)", (userid, reason,))
# db.execute("INSERT INTO users (Reason)", reason)
ctx.bot.db.commit()
return await ctx.send(f'{userid} Was banned from using the bot')
def can_execute_action(ctx, user, target):
return user.id == ctx.bot.owner_id or \
user == ctx.guild.owner or \
user.top_role > target.top_role
class NoMuteRole(commands.CommandError):
def __init__(self):
super().__init__('This server does not have a mute role set up.')
def can_mute():
async def predicate(ctx):
is_owner = await ctx.bot.is_owner(ctx.author)
if ctx.guild is None:
return False
if not ctx.author.guild_permissions.manage_roles and not is_owner:
return False
# This will only be used within this cog.
role = discord.utils.get(ctx.guild.roles, name='Muted')
for channel in ctx.guild.text_channels:
await channel.set_permissions(role, overwrite=discord.PermissionOverwrite(send_messages=False,
add_reactions=False))
for channel in ctx.guild.voice_channels:
await channel.set_permissions(role, overwrite=discord.PermissionOverwrite(speak=False))
if role is None:
perms = ctx.guild.default_role.permissions
role = await ctx.guild.create_role(name="Muted", permissions=perms)
return ctx.author.top_role > role
return commands.check(predicate)
class Mod(commands.Cog, description='Moderator go brrrrrrrr ~ban'):
def __init__(self, bot):
self.bot = bot
self.config = bot.config
async def _basic_cleanup_strategy(self, ctx, search):
count = 0
async for msg in ctx.history(limit=search, before=ctx.message):
if msg.author == ctx.me and not (msg.mentions or msg.role_mentions):
await msg.delete()
count += 1
return {'Bot': count}
async def _complex_cleanup_strategy(self, ctx, search):
prefixes = tuple(self.bot.get_guild_prefixes(ctx.guild)) # thanks startswith todo update this bc it wont work rn
def check(m):
return m.author == ctx.me or m.content.startswith(prefixes)
deleted = await ctx.channel.purge(limit=search, check=check, before=ctx.message)
return Counter(m.author.display_name for m in deleted)
async def _regular_user_cleanup_strategy(self, ctx, search):
prefixes = tuple(self.bot.get_guild_prefixes(ctx.guild))
def check(m):
return (m.author == ctx.me or m.content.startswith(prefixes)) and not (m.mentions or m.role_mentions)
deleted = await ctx.channel.purge(limit=search, check=check, before=ctx.message)
return Counter(m.author.display_name for m in deleted)
@commands.command()
async def cleanup(self, ctx, search=100):
"""Cleans up the bot's messages from the channel.
If a search number is specified, it searches that many messages to delete.
If the bot has Manage Messages permissions then it will try to delete
messages that look like they invoked the bot as well.
After the cleanup is completed, the bot will send you a message with
which people got their messages deleted and their count. This is useful
to see which users are spammers.
Members with Manage Messages can search up to 1000 messages.
Members without can search up to 25 messages.
"""
strategy = self._basic_cleanup_strategy
is_mod = ctx.channel.permissions_for(ctx.author).manage_messages
if ctx.channel.permissions_for(ctx.me).manage_messages:
if is_mod:
strategy = self._complex_cleanup_strategy
else:
strategy = self._regular_user_cleanup_strategy
if is_mod:
search = min(max(2, search), 1000)
else:
search = min(max(2, search), 25)
spammers = await strategy(ctx, search)
deleted = sum(spammers.values())
messages = [f'{deleted} message{' was' if deleted == 1 else 's were'} removed.']
if deleted:
messages.append('')
spammers = sorted(spammers.items(), key=lambda t: t[1], reverse=True)
messages.extend(f'- **{author}**: {count}' for author, count in spammers)
await ctx.send('\n'.join(messages), delete_after=10)
@commands.command(aliases=['newmembers', 'nu'])
@commands.guild_only()
async def newusers(self, ctx, *, count=5):
"""Tells you the newest members of the server.
This is useful to check if any suspicious members have
joined.
The count parameter can only be up to 25.
"""
count = max(min(count, 25), 5)
if not ctx.guild.chunked:
members = await ctx.guild.chunk(cache=True)
members = sorted(ctx.guild.members, key=lambda m: m.joined_at, reverse=True)[:count]
e = discord.Embed(title='New Members', colour=green)
for member in members:
body = f'Joined {format_relative(member.joined_at)}\nCreated {format_relative(member.created_at)}'
e.add_field(name=f'{member} (ID: {member.id})', value=body, inline=False)
await ctx.send(embed=e)
@commands.command()
@commands.guild_only()
@commands.has_permissions(manage_emojis=True)
async def emoji(self, ctx, emoji: discord.PartialEmoji, *roles: discord.Role):
"""This clones a specified emoji that only specified roles
are allowed to use.
"""
# fetch the emoji asset and read it as bytes.
emoji_bytes = await emoji.read()
# the key parameter here is `roles`, which controls
# what roles are able to use the emoji.
await ctx.guild.create_custom_emoji(
name=emoji.name,
image=emoji_bytes,
roles=roles,
reason='Very secret business.'
)
@commands.command()
@commands.guild_only()
@mod_or_permissions(kick_members=True)
async def kick(self, ctx, member: MemberConverterr, *, reason: ActionReason = None):
"""Kicks a member from the server.
In order for this to work, the bot must have Kick Member permissions.
To use this command you must have Kick Members permission.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
await ctx.guild.kick(member, reason=reason)
await ctx.send('\N{OK HAND SIGN}')
# @commands.command(name="delprofanity", aliases=["delswears", "delcurses"])
# @commands.guild_only
# @commands.has_permissions(manage_guild=True)
# async def remove_profanity(self, ctx, *words):
# with open("./data/profanity.txt", "r", encoding="utf-8") as f:
# stored = [w.strip() for w in f.readlines()]
#
# with open("./data/profanity.txt", "w", encoding="utf-8") as f:
# f.write("".join([f"{w}\n" for w in stored if w not in words]))
#
# profanity.load_censor_words_from_file("./data/profanity.txt")
# await ctx.send("Action complete.")
# await ctx.send("Action complete.")
@commands.command(aliases=["nick"])
@commands.guild_only()
@commands.has_permissions(manage_nicknames=True)
async def nickname(self, ctx, member: MemberConverterr, *, name: str = None):
""" Nicknames a user from the current server. """
if await checks.check_priv(ctx, member):
return
try:
await member.edit(nick=name, reason=default.responsible(ctx.author, "Changed by command"))
message = f"Changed **{member.name}'s** nickname to **{name}**"
if name is None:
message = f"Reset **{member.name}'s** nickname"
await ctx.send(message)
except Exception as e:
await ctx.send(e)
@commands.command(aliases=["massnick"])
@commands.guild_only()
@commands.has_permissions(manage_nicknames=True)
async def massnickname(self, ctx, *, name: str = None):
""" Nicknames all the users from the current server. """
for member in ctx.guild.members:
if await checks.check_priv(ctx, member):
return
else:
if member.id == 845186772698923029 or 511724576674414600:
continue
else:
try:
await member.edit(nick=name, reason=default.responsible(ctx.author, "Changed by command"))
message = f"Changed **{member.name}'s** nickname to **{name}**"
if name is None:
message = f"Reset **{member.name}'s** nickname"
await ctx.send(message)
except Exception as e:
await ctx.send(e)
@commands.command()
@commands.guild_only()
@mod_or_permissions(ban_members=True)
async def ban(self, ctx, member: MemberID, *, reason: ActionReason = None):
"""Bans a member from the server.
You can also ban from ID to ban regardless whether they're
in the server or not.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Ban Members permission.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
await ctx.guild.ban(member, reason=reason)
await ctx.send('\N{OK HAND SIGN}')
@commands.command()
@commands.guild_only()
@mod_or_permissions(ban_members=True)
async def multiban(self, ctx, members: commands.Greedy[MemberID], *, reason: ActionReason = None):
"""Bans multiple members from the server.
This only works through banning via ID.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Ban Members permission.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
total_members = len(members)
if total_members == 0:
return await ctx.send('Missing members to ban.')
confirm = await ctx.prompt(f'This will ban **{plural(total_members):member}**. Are you sure?', reacquire=False)
if not confirm:
return await ctx.send('Aborting.')
failed = 0
for member in members:
try:
await ctx.guild.ban(member, reason=reason)
except discord.HTTPException:
failed += 1
await ctx.send(f'Banned {total_members - failed}/{total_members} members.')
@commands.command()
@commands.guild_only()
@mod_or_permissions(kick_members=True)
async def softban(self, ctx, member: MemberID, *, reason: ActionReason = None):
"""Soft bans a member from the server.
A softban is basically banning the member from the server but
then unbanning the member as well. This allows you to essentially
kick the member while removing their messages.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Kick Members permissions.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
await ctx.guild.ban(member, reason=reason)
await ctx.guild.unban(member, reason=reason)
await ctx.send('\N{OK HAND SIGN}')
@commands.command()
@commands.guild_only()
@mod_or_permissions(ban_members=True)
async def massban(self, ctx, *, args):
"""Mass bans multiple members from the server.
This command has a powerful "command line" syntax. To use this command
you and the bot must both have Ban Members permission. **Every option is optional.**
Users are only banned **if and only if** all conditions are met.
The following options are valid.
`--channel` or `-c`: Channel to search for message history.
`--reason` or `-r`: The reason for the ban.
`--regex`: Regex that usernames must match.
`--created`: Matches users whose accounts were created less than specified minutes ago.
`--joined`: Matches users that joined less than specified minutes ago.
`--joined-before`: Matches users who joined before the member ID given.
`--joined-after`: Matches users who joined after the member ID given.
`--no-avatar`: Matches users who have no avatar. (no arguments)
`--no-roles`: Matches users that have no role. (no arguments)
`--show`: Show members instead of banning them (no arguments).
Message history filters (Requires `--channel`):
`--contains`: A substring to search for in the message.
`--starts`: A substring to search if the message starts with.
`--ends`: A substring to search if the message ends with.
`--match`: A regex to match the message content to.
`--search`: How many messages to search. Default 100. Max 2000.
`--after`: Messages must come after this message ID.
`--before`: Messages must come before this message ID.
`--files`: Checks if the message has attachments (no arguments).
`--embeds`: Checks if the message has embeds (no arguments).
"""
# For some reason there are cases due to caching that ctx.author
# can be a User even in a guild only context
# Rather than trying to work out the kink with it
# Just upgrade the member itself.
if not isinstance(ctx.author, MemberConverterr):
try:
author = await ctx.guild.fetch_member(ctx.author.id)
except discord.HTTPException:
return await ctx.send('Somehow, Discord does not seem to think you are in this server.')
else:
author = ctx.author
parser = Arguments(add_help=False, allow_abbrev=False)
parser.add_argument('--channel', '-c')
parser.add_argument('--reason', '-r')
parser.add_argument('--search', type=int, default=100)
parser.add_argument('--regex')
parser.add_argument('--no-avatar', action='store_true')
parser.add_argument('--no-roles', action='store_true')
parser.add_argument('--created', type=int)
parser.add_argument('--joined', type=int)
parser.add_argument('--joined-before', type=int)
parser.add_argument('--joined-after', type=int)
parser.add_argument('--contains')
parser.add_argument('--starts')
parser.add_argument('--ends')
parser.add_argument('--match')
parser.add_argument('--show', action='store_true')
parser.add_argument('--embeds', action='store_const', const=lambda m: len(m.embeds))
parser.add_argument('--files', action='store_const', const=lambda m: len(m.attachments))
parser.add_argument('--after', type=int)
parser.add_argument('--before', type=int)
try:
args = parser.parse_args(shlex.split(args))
except Exception as e:
return await ctx.send(str(e))
members = []
if args.channel:
channel = await commands.TextChannelConverter().convert(ctx, args.channel)
before = args.before and discord.Object(id=args.before)
after = args.after and discord.Object(id=args.after)
predicates = []
if args.contains:
predicates.append(lambda m: args.contains in m.content)
if args.starts:
predicates.append(lambda m: m.content.startswith(args.starts))
if args.ends:
predicates.append(lambda m: m.content.endswith(args.ends))
if args.match:
try:
_match = re.compile(args.match)
except re.error as e:
return await ctx.send(f'Invalid regex passed to `--match`: {e}')
else:
predicates.append(lambda m, x=_match: x.match(m.content))
if args.embeds:
predicates.append(args.embeds)
if args.files:
predicates.append(args.files)
async for message in channel.history(limit=min(max(1, args.search), 2000), before=before, after=after):
if all(p(message) for p in predicates):
members.append(message.author)
else:
if ctx.guild.chunked:
members = ctx.guild.members
else:
async with ctx.typing():
await ctx.guild.chunk(cache=True)
members = ctx.guild.members
# member filters
predicates = [
lambda m: isinstance(m, MemberConverterr) and can_execute_action(ctx, author, m), # Only if applicable
lambda m: not m.bot, # No bots
lambda m: m.discriminator != '0000', # No deleted users
]
converter = commands.MemberConverter()
if args.regex:
try:
_regex = re.compile(args.regex)
except re.error as e:
return await ctx.send(f'Invalid regex passed to `--regex`: {e}')
else:
predicates.append(lambda m, x=_regex: x.match(m.name))
if args.no_avatar:
predicates.append(lambda m: m.avatar == m.default_avatar)
if args.no_roles:
predicates.append(lambda m: len(getattr(m, 'roles', [])) <= 1)
now = discord.utils.utcnow()
if args.created:
def created(member, *, offset=now - datetime.timedelta(minutes=args.created)):
return member.created_at > offset
predicates.append(created)
if args.joined:
def joined(member, *, offset=now - datetime.timedelta(minutes=args.joined)):
if isinstance(member, discord.User):
# If the member is a user then they left already
return True
return member.joined_at and member.joined_at > offset
predicates.append(joined)
if args.joined_after:
_joined_after_member = await converter.convert(ctx, str(args.joined_after))
def joined_after(member, *, _other=_joined_after_member):
return member.joined_at and _other.joined_at and member.joined_at > _other.joined_at
predicates.append(joined_after)
if args.joined_before:
_joined_before_member = await converter.convert(ctx, str(args.joined_before))
def joined_before(member, *, _other=_joined_before_member):
return member.joined_at and _other.joined_at and member.joined_at < _other.joined_at
predicates.append(joined_before)
members = {m for m in members if all(p(m) for p in predicates)}
if len(members) == 0:
return await ctx.send('No members found matching criteria.')
if args.show:
members = sorted(members, key=lambda m: m.joined_at or now)
fmt = "\n".join(f'{m.id}\tJoined: {m.joined_at}\tCreated: {m.created_at}\t{m}' for m in members)
content = f'Current Time: {discord.utils.utcnow()}\nTotal members: {len(members)}\n{fmt}'
file = discord.File(BytesIO(content.encode('utf-8')), filename='members.txt')
return await ctx.send(file=file)
if args.reason is None:
return await ctx.send('--reason flag is required.')
else:
reason = await ActionReason().convert(ctx, args.reason)
confirm = await ctx.prompt(f'This will ban **{plural(len(members)):member}**. Are you sure?')
if not confirm:
return await ctx.send('Aborting.')
count = 0
for member in members:
try:
await ctx.guild.ban(member, reason=reason)
except discord.HTTPException:
pass
else:
count += 1
await ctx.send(f'Banned {count}/{len(members)}')
@commands.command()
@commands.guild_only()
@commands.max_concurrency(1, per=commands.BucketType.user)
@commands.has_permissions(ban_members=True)
async def massunban(self, ctx, *members: MemberID):
""" Mass unbans multiple members from the server. """
try:
for member_id in members:
await ctx.guild.unban(discord.Object(id=str(member_id)))
await ctx.send(default.actionmessage("massunbans", mass=True))
except Exception as e:
await ctx.send(e)
@commands.command()
@commands.guild_only()
@commands.max_concurrency(1, per=commands.BucketType.user)
@commands.has_permissions(kick_members=True)
async def masskick(self, ctx, reason: ActionReason, *members: MemberID):
""" Mass kicks multiple members from the server. """
try:
for member_id in members:
await ctx.guild.kick(discord.Object(id=str(member_id)), reason=default.responsible(ctx.author, reason))
await ctx.send(default.actionmessage("masskickd", mass=True))
except Exception as e:
await ctx.send(e)
@commands.command()
@commands.guild_only()
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, member: MemberID, *, reason: str = None):
""" Unbans a user from the current server. """
try:
await ctx.guild.unban(discord.Object(id=str(member)), reason=default.responsible(ctx.author, reason))
await ctx.send(default.actionmessage("unbanned"))
except Exception as e:
await ctx.send(e)
@commands.group(invoke_without_command=True)
@can_mute()
async def mute(self, ctx, members: commands.Greedy[discord.Member], *, reason: ActionReason = None):
"""Mutes members using the configured mute role.
The bot must have Manage Roles permission and be
above the muted role in the hierarchy.
To use this command you need to be higher than the
mute role in the hierarchy and have Manage Roles
permission at the server level."""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
guild = ctx.guild
total = len(members)
if total == 0:
return await ctx.warn('Missing members to mute.')
elif total > 20:
return await ctx.error('You may only mute 20 people at a time')
role = discord.utils.get(guild.roles, name='Muted')
failed = 0
em = discord.Embed(colour=invis, description='')
for member in members:
if role not in member.roles:
try:
await member.add_roles(role, reason=reason)
em.description += f'{self.bot.icons['greenTick']} {member.name} Sucsessfully muted'
except discord.HTTPException:
failed += 1
em.description += f'{self.bot.icons['RedTick']} {member.name} Failed to mute muted'
em.set_footer(text=f'Muted [{total - failed}/{total}]')
await ctx.try_reply(embed=em)
""""# Mute a Member
@commands.command(aliases=['Unmute'])
@commands.has_permissions(manage_roles=True)
@commands.guild_only()
async def unmute(self, ctx, mem: str):
member = getUser(ctx, mem)
if member:
role = utils.find(lambda r: "mute" in r.name.lower(), member.roles)
if role:
roles = member.roles
roles.remove(role)
asyncio.sleep(0.5)
await member.edit(roles=roles)
log.info(f'Unmuted {member}')
e = discord.Embed(color=embedColor(self))
e.set_author(name="\N{SPEAKER} Unmuted " + str(member))
await edit(ctx, embed=e)
else:
await edit(ctx, content="\N{HEAVY EXCLAMATION MARK SYMBOL} Member is not muted", ttl=5)
# SoftBan a Member (ban, delelte messagea and unban)
@commands.command(aliases=['Softban'])
@commands.has_permissions(ban_members=True)
@commands.guild_only()
async def softban(self, ctx, member: str, *, reason: str=None):
Softban a Member(Kick and delete Messages
member = getUser(ctx, member)
if member:
try:
await ctx.guild.ban(member, reason=reason)
await ctx.guild.unban(member)
except discord.Forbidden:
await edit(ctx, content="\N{HEAVY EXCLAMATION MARK SYMBOL} Missing permissions to ban this Member", ttl=5)
except discord.HTTPException:
await edit(ctx, content="\N{HEAVY EXCLAMATION MARK SYMBOL} Something went wrong while trying to ban...", ttl=5)
else:
e = discord.Embed(color=embedColor(self))
e.set_author(icon_url="https://cdn.discordapp.com/attachments/278603491520544768/301087009408024580/273910007857414147.png",
name="Soft Banned: " + str(member))
await edit(ctx, embed=e)"""
@commands.command()
@commands.is_owner()
async def do(self, ctx, times: int, *, command):
"""Repeats a command a specified number of times."""
msg = copy.copy(ctx.message)
msg.content = ctx.prefix + command
new_ctx = await self.bot.get_context(msg, cls=type(ctx))
for i in range(times):
await new_ctx.reinvoke()
#@commands.group(name='mute', invoke_without_command=True)
#@can_mute()
#async def _mute(self, ctx, members: commands.Greedy[MemberConverterr], *, reason: ActionReason = None):
# """Mutes members using the configured mute role.
# The bot must have Manage Roles permission and be
# above the muted role in the hierarchy.
# To use this command you need to be higher than the
# mute role in the hierarchy and have Manage Roles
# permission at the server level.
# """
#
# if reason is None:
# reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
#
# role = next((g for g in ctx.guild.roles if g.name == "Muted"), None)
# total = len(members)
# if total == 0:
# return await ctx.send('Missing members to mute.')
#
# failed = 0
# for member in members:
# try:
# await member.add_roles(role, reason=reason)
# except discord.HTTPException:
# failed += 1
#
# if failed == 0:
# await ctx.send('\N{THUMBS UP SIGN}')
# else:
# await ctx.send(f'Muted [{total - failed}/{total}]')
#
@commands.command(name='unmute')
@can_mute()
async def _unmute(self, ctx, members: commands.Greedy[MemberConverterr], *, reason: ActionReason = None):
"""Unmutes members using the configured mute role.
The bot must have Manage Roles permission and be
above the muted role in the hierarchy.
To use this command you need to be higher than the
mute role in the hierarchy and have Manage Roles
permission at the server level.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
role = next((g for g in ctx.guild.roles if g.name == "Muted"), None)
total = len(members)
if total == 0:
return await ctx.send('Missing members to unmute.')
failed = 0
for member in members:
try:
await member.remove_roles(role, reason=reason)
except discord.HTTPException:
failed += 1
if failed == 0:
await ctx.send('\N{THUMBS UP SIGN}')
else:
await ctx.send(f'Unmuted [{total - failed}/{total}]')
@commands.command(aliases=["ar"])
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def announcerole(self, ctx, *, role: discord.Role):
""" Makes a role mentionable and removes it whenever you mention the role """
if role == ctx.guild.default_role:
return await ctx.warn("To prevent abuse, I won't allow mentionable role for everyone/here role.")
if ctx.author.top_role.position <= role.position:
return await ctx.warn(
"It seems like the role you attempt to mention is over your permissions, therefore I won't allow you.")
if ctx.me.top_role.position <= role.position:
return await ctx.error("This role is above my permissions, I can't make it mentionable ;-;")
await role.edit(mentionable=True, reason=f"[ {ctx.author} ] announcerole command")
msg = await ctx.success(
f"**{role.name}** is now mentionable, if you don't mention it within 30 seconds, I will revert the changes.")
while True:
def role_checker(m):
if role.mention in m.content:
return True
return False
try:
checker = await self.bot.wait_for("message", timeout=30.0, check=role_checker)
if checker.author.id == ctx.author.id:
await role.edit(mentionable=False, reason=f"[ {ctx.author} ] announcerole command")
return await msg.edit(
content=f"**{role.name}** mentioned by **{ctx.author}** in {checker.channel.mention}")
else:
await checker.delete()
except asyncio.TimeoutError:
await role.edit(mentionable=False, reason=f"[ {ctx.author} ] announcerole command")
return await msg.edit(content=f"**{role.name}** was never mentioned by **{ctx.author}**...")
@commands.group()
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def find(self, ctx):
""" Finds a user within your search term """
if ctx.invoked_subcommand is None:
await ctx.send_help(str(ctx.command))
@find.command(name="playing")
async def find_playing(self, ctx, *, search: str):
loop = []
for i in ctx.guild.members:
if i.activities and (not i.bot):
for g in i.activities:
if g.name and (search.lower() in g.name.lower()):
loop.append(f"{i} | {type(g).__name__}: {g.name} ({i.id})")
await default.prettyResults(
ctx, "playing", f"Found **{len(loop)}** on your search for **{search}**", loop
)
@find.command(name="username", aliases=["name"])
async def find_name(self, ctx, *, search: str):
loop = [f"{i} ({i.id})" for i in ctx.guild.members if search.lower() in i.name.lower() and not i.bot]
await default.prettyResults(
ctx, "name", f"Found **{len(loop)}** on your search for **{search}**", loop
)
@find.command(name="nickname", aliases=["nick"])
async def find_nickname(self, ctx, *, search: str):
loop = [f"{i.nick} | {i} ({i.id})" for i in ctx.guild.members if i.nick if
(search.lower() in i.nick.lower()) and not i.bot]
await default.prettyResults(
ctx, "name", f"Found **{len(loop)}** on your search for **{search}**", loop
)
@find.command(name="id")
async def find_id(self, ctx, *, search: int):
loop = [f"{i} | {i} ({i.id})" for i in ctx.guild.members if (str(search) in str(i.id)) and not i.bot]
await default.prettyResults(
ctx, "name", f"Found **{len(loop)}** on your search for **{search}**", loop
)
@find.command(name="discriminator", aliases=["discrim"])
async def find_discriminator(self, ctx, *, search: str):
if not len(search) == 4 or not re.compile("^[0-9]*$").search(search):
return await ctx.send("You must provide exactly 4 digits")
loop = [f"{i} ({i.id})" for i in ctx.guild.members if search == i.discriminator]
await default.prettyResults(
ctx, "discriminator", f"Found **{len(loop)}** on your search for **{search}**", loop
)
@commands.command()
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def lock(self, ctx):
channel = ctx.channel
overwrite = channel.overwrites_for(ctx.guild.default_role)
if not overwrite.send_messages:
embed = discord.Embed(colour=magenta,
description=f"{channel.mention} is already a locked channel")
embed.set_author(name='Invalid usage',
icon_url=picture("Warning"))
try:
await ctx.send(embed=embed)
return
except:
try:
await ctx.author.send(embed=embed)
return
except:
return
embed = discord.Embed(colour=magenta,
description=f":lock: **Locked channel** {ctx.channel.mention}")
await ctx.send(embed=embed)
await channel.set_permissions(ctx.guild.default_role, send_messages=False)
@commands.command()
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def unlock(self, ctx):
channel = ctx.channel
overwrite = channel.overwrites_for(ctx.guild.default_role)
if overwrite.send_messages:
embed = discord.Embed(colour=magenta,
description=f"{channel.mention} is not a locked channel")
embed.set_author(name='Invalid usage',
icon_url=picture("Warning"))
try:
await ctx.send(embed=embed)
return
except:
try:
await ctx.author.send(embed=embed)
return
except:
return
await channel.set_permissions(ctx.guild.default_role, send_messages=True)
embed = discord.Embed(colour=0xFF004D,
description=f":unlock: **Unlocked channel** {ctx.channel.mention}")
try:
await ctx.send(embed=embed)
except:
try:
await ctx.author.send(embed=embed)
except:
pass
@commands.command()
@commands.has_permissions(manage_messages=True)
async def cls(self, ctx, amount: int):
amount2 = amount + 1
await ctx.channel.purge(limit=amount2)
@commands.group(aliases=["purge", "clr", "clear"])
@commands.guild_only()
@commands.max_concurrency(1, per=commands.BucketType.guild)
@commands.has_permissions(manage_messages=True)
async def prune(self, ctx):
""" Removes messages from the current server. """
if ctx.invoked_subcommand is None:
await ctx.send_help(str(ctx.command))
async def do_removal(self, ctx, limit, predicate, *, before=None, after=None):
if limit > 2000:
return await ctx.send(f'Too many messages to search given ({limit}/2000)')
if before is None:
before = ctx.message
else:
before = discord.Object(id=before)
if after is not None:
after = discord.Object(id=after)
try:
deleted = await ctx.channel.purge(limit=limit, before=before, after=after, check=predicate)
except discord.Forbidden as e:
return await ctx.send('I do not have permissions to delete messages.')
except discord.HTTPException as e:
return await ctx.send(f'Error: {e} (try a smaller search?)')
spammers = Counter(m.author.display_name for m in deleted)
deleted = len(deleted)
messages = [f'{deleted} message{' was' if deleted == 1 else 's were'} removed.']
if deleted:
messages.append('')
spammers = sorted(spammers.items(), key=lambda t: t[1], reverse=True)
messages.extend(f'**{name}**: {count}' for name, count in spammers)
to_send = '\n'.join(messages)
if len(to_send) > 2000:
await ctx.send(f'Successfully removed {deleted} messages.', delete_after=10)
else:
await ctx.send(to_send, delete_after=10)
@prune.command()
async def embeds(self, ctx, search=100):
"""Removes messages that have embeds in them."""
await self.do_removal(ctx, search, lambda e: len(e.embeds))
@prune.command()
async def files(self, ctx, search=100):
"""Removes messages that have attachments in them."""
await self.do_removal(ctx, search, lambda e: len(e.attachments))
@prune.command()
async def mentions(self, ctx, search=100):
"""Removes messages that have mentions in them."""
await self.do_removal(ctx, search, lambda e: len(e.mentions) or len(e.role_mentions))
@prune.command()
async def images(self, ctx, search=100):
"""Removes messages that have embeds or attachments."""
await self.do_removal(ctx, search, lambda e: len(e.embeds) or len(e.attachments))
@prune.command(name="all")
async def _remove_all(self, ctx, search=100):
"""Removes all messages."""
await self.do_removal(ctx, search, lambda e: True)
@prune.command()
async def user(self, ctx, member: MemberConverterr, search=100):
"""Removes all messages by the member."""
await self.do_removal(ctx, search, lambda e: e.author == member)
@prune.command()
async def contains(self, ctx, *, substr: str):
"""Removes all messages containing a substring.
The substring must be at least 3 characters long.
"""
if len(substr) < 3:
await ctx.send("The substring length must be at least 3 characters.")
else:
await self.do_removal(ctx, 100, lambda e: substr in e.content)
@prune.command(name="bot", aliases=['bots'])
async def _bots(self, ctx, prefix, search=100):
"""Removes a bot user's messages and messages with their optional prefix."""
def predicate(m):
return (m.webhook_id is None and m.author.bot) or m.content.startswith(tuple(prefix))
await self.do_removal(ctx, search, predicate)
@prune.command(name="users")
async def _users(self, ctx, search=100):
"""Removes only user messages. """
def predicate(m):
return m.author.bot is False
await self.do_removal(ctx, search, predicate)
@prune.command(name="emojis")
async def _emojis(self, ctx, search=100):
"""Removes all messages containing custom emoji."""
custom_emoji = re.compile(r"<a?:(.*?):(\d{17,21})>|[\u263a-\U0001f645]")
def predicate(m):
return custom_emoji.search(m.content)
await self.do_removal(ctx, search, predicate)
@prune.command(name="reactions")
async def _reactions(self, ctx, search=100):
"""Removes all reactions from messages that have them."""
if search > 2000:
return await ctx.send(f"Too many messages to search for ({search}/2000)")
total_reactions = 0
async for message in ctx.history(limit=search, before=ctx.message):
if len(message.reactions):
total_reactions += sum(r.count for r in message.reactions)
await message.clear_reactions()
await ctx.send(f"Successfully removed {total_reactions} reactions.")
@prune.command()
async def custom(self, ctx, *, args: str):
"""A more advanced purge command.
This command uses a powerful "command line" syntax.
Most options support multiple values to indicate 'any' match.
If the value has spaces it must be quoted.
The messages are only deleted if all options are met unless
the `--or` flag is passed, in which case only if any is met.
The following options are valid.
`--user`: A mention or name of the user to remove.
`--contains`: A substring to search for in the message.
`--starts`: A substring to search if the message starts with.
`--ends`: A substring to search if the message ends with.
`--search`: How many messages to search. Default 100. Max 2000.
`--after`: Messages must come after this message ID.
`--before`: Messages must come before this message ID.
Flag options (no arguments):
`--bot`: Check if it's a bot user.
`--embeds`: Check if the message has embeds.
`--files`: Check if the message has attachments.
`--emoji`: Check if the message has custom emoji.
`--reactions`: Check if the message has reactions
`--or`: Use logical OR for all options.
`--not`: Use logical NOT for all options.
"""
parser = Arguments(add_help=False, allow_abbrev=False)
parser.add_argument('--user', nargs='+')
parser.add_argument('--contains', nargs='+')
parser.add_argument('--starts', nargs='+')
parser.add_argument('--ends', nargs='+')
parser.add_argument('--or', action='store_true', dest='_or')
parser.add_argument('--not', action='store_true', dest='_not')
parser.add_argument('--emoji', action='store_true')
parser.add_argument('--bot', action='store_const', const=lambda m: m.author.bot)
parser.add_argument('--embeds', action='store_const', const=lambda m: len(m.embeds))
parser.add_argument('--files', action='store_const', const=lambda m: len(m.attachments))
parser.add_argument('--reactions', action='store_const', const=lambda m: len(m.reactions))
parser.add_argument('--search', type=int)
parser.add_argument('--after', type=int)
parser.add_argument('--before', type=int)
try:
args = parser.parse_args(shlex.split(args))
except Exception as e:
await ctx.send(str(e))
return
predicates = []
if args.bot:
predicates.append(args.bot)
if args.embeds:
predicates.append(args.embeds)
if args.files:
predicates.append(args.files)
if args.reactions:
predicates.append(args.reactions)
if args.emoji:
custom_emoji = re.compile(r'<:(\w+):(\d+)>')
predicates.append(lambda m: custom_emoji.search(m.content))
if args.user:
users = []
converter = commands.MemberConverter()
for u in args.user:
try:
user = await converter.convert(ctx, u)
users.append(user)
except Exception as e:
await ctx.send(str(e))
return
predicates.append(lambda m: m.author in users)
if args.contains:
predicates.append(lambda m: any(sub in m.content for sub in args.contains))
if args.starts:
predicates.append(lambda m: any(m.content.startswith(s) for s in args.starts))
if args.ends:
predicates.append(lambda m: any(m.content.endswith(s) for s in args.ends))
op = all if not args._or else any
def predicate(m):
r = op(p(m) for p in predicates)
if args._not:
return not r
return r
if args.after:
if args.search is None:
args.search = 2000
if args.search is None:
args.search = 100
args.search = max(0, min(2000, args.search)) # clamp from 0-2000
await self.do_removal(ctx, args.search, predicate, before=args.before, after=args.after)
# Mute related stuff
async def update_mute_role(self, ctx, config, role, *, merge=False):
guild = ctx.guild
if config and merge:
members = config.muted_members
# If the roles are being merged then the old members should get the new role
reason = f'Action done by {ctx.author} (ID: {ctx.author.id}): Merging mute roles'
async for member in self.bot.resolve_member_ids(guild, members):
if not member._roles.has(role.id):
try:
await member.add_roles(role, reason=reason)
except discord.HTTPException:
pass
else:
members = set()
members.update(map(lambda m: m.id, role.members))
#query = """INSERT INTO guild_mod_config (id, mute_role_id, muted_members)
# VALUES ($1, $2, $3::bigint[]) ON CONFLICT (id)
# DO UPDATE SET
# mute_role_id = EXCLUDED.mute_role_id,
# muted_members = EXCLUDED.muted_members
# """
#await self.bot.pool.execute(query, guild.id, role.id, list(members))
#self.get_guild_config.invalidate(self, guild.id)
def setup(bot):
bot.add_cog(Mod(bot))
| # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright (c) 2021. Jason Cameron +
# All rights reserved. +
# This file is part of the edoC discord bot project , +
# and is released under the "MIT License Agreement". Please see the LICENSE +
# file that should have been included as part of this package. +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import argparse
import asyncio
import copy
import datetime
import logging
import re
import shlex
from collections import Counter
from io import BytesIO
import discord
from discord import NotFound, Object
from discord.ext import commands
from discord.ext.commands import Converter, BadArgument
from discord.utils import find
from cogs.Discordinfo import format_relative, plural
# from lib.db import db
from utils import checks, default
from utils.checks import MemberConverterr
from utils.default import mod_or_permissions
from utils.vars import *
log = logging.getLogger('mod')
class Arguments(argparse.ArgumentParser):
def error(self, message):
raise RuntimeError(message)
class BannedUser(Converter):
async def convert(self, ctx, arg):
if ctx.guild.me.guild_permissions.ban_members:
if arg.isdigit():
try:
return (await ctx.guild.fetch_ban(Object(id=int(arg)))).user
except NotFound:
raise BadArgument
banned = [e.user for e in await ctx.guild.bans()]
if banned:
if (user := find(lambda u: str(u) == arg, banned)) is not None:
return user
else:
raise BadArgument
class MemberID(commands.Converter):
async def convert(self, ctx, argument):
try:
m = await commands.MemberConverter().convert(ctx, argument)
except commands.BadArgument:
try:
return int(argument, base=10)
except ValueError:
raise commands.BadArgument(f"{argument} is not a valid member or member ID.") from None
else:
return m.id
class ActionReason(commands.Converter):
async def convert(self, ctx, argument):
ret = argument
if len(ret) > 512:
reason_max = 512 - len(ret) - len(argument)
raise commands.BadArgument(f"reason is too long ({len(argument)}/{reason_max})")
return ret
BannedUsers = {}
async def Get_Banned_Users(bot):
bans = bot.db.field("SELECT id FROM users WHERE banned = ?", "True")
for UserID in bans:
BannedUsers + UserID
async def BannedU(ctx):
if ctx.author in BannedUsers:
print(f"Command by {ctx.author} blocked!")
async def pred(ctx):
if ctx.author in BannedUsers:
return ctx.send("You are banned from using commands")
return pred
async def BanUser(ctx, userid: MemberID, reason):
BannedUsers + userid
ctx.bot.db.execute("INSERT INTO users (?, ?)", (userid, reason,))
# db.execute("INSERT INTO users (Reason)", reason)
ctx.bot.db.commit()
return await ctx.send(f'{userid} Was banned from using the bot')
def can_execute_action(ctx, user, target):
return user.id == ctx.bot.owner_id or \
user == ctx.guild.owner or \
user.top_role > target.top_role
class NoMuteRole(commands.CommandError):
def __init__(self):
super().__init__('This server does not have a mute role set up.')
def can_mute():
async def predicate(ctx):
is_owner = await ctx.bot.is_owner(ctx.author)
if ctx.guild is None:
return False
if not ctx.author.guild_permissions.manage_roles and not is_owner:
return False
# This will only be used within this cog.
role = discord.utils.get(ctx.guild.roles, name='Muted')
for channel in ctx.guild.text_channels:
await channel.set_permissions(role, overwrite=discord.PermissionOverwrite(send_messages=False,
add_reactions=False))
for channel in ctx.guild.voice_channels:
await channel.set_permissions(role, overwrite=discord.PermissionOverwrite(speak=False))
if role is None:
perms = ctx.guild.default_role.permissions
role = await ctx.guild.create_role(name="Muted", permissions=perms)
return ctx.author.top_role > role
return commands.check(predicate)
class Mod(commands.Cog, description='Moderator go brrrrrrrr ~ban'):
def __init__(self, bot):
self.bot = bot
self.config = bot.config
async def _basic_cleanup_strategy(self, ctx, search):
count = 0
async for msg in ctx.history(limit=search, before=ctx.message):
if msg.author == ctx.me and not (msg.mentions or msg.role_mentions):
await msg.delete()
count += 1
return {'Bot': count}
async def _complex_cleanup_strategy(self, ctx, search):
prefixes = tuple(self.bot.get_guild_prefixes(ctx.guild)) # thanks startswith todo update this bc it wont work rn
def check(m):
return m.author == ctx.me or m.content.startswith(prefixes)
deleted = await ctx.channel.purge(limit=search, check=check, before=ctx.message)
return Counter(m.author.display_name for m in deleted)
async def _regular_user_cleanup_strategy(self, ctx, search):
prefixes = tuple(self.bot.get_guild_prefixes(ctx.guild))
def check(m):
return (m.author == ctx.me or m.content.startswith(prefixes)) and not (m.mentions or m.role_mentions)
deleted = await ctx.channel.purge(limit=search, check=check, before=ctx.message)
return Counter(m.author.display_name for m in deleted)
@commands.command()
async def cleanup(self, ctx, search=100):
"""Cleans up the bot's messages from the channel.
If a search number is specified, it searches that many messages to delete.
If the bot has Manage Messages permissions then it will try to delete
messages that look like they invoked the bot as well.
After the cleanup is completed, the bot will send you a message with
which people got their messages deleted and their count. This is useful
to see which users are spammers.
Members with Manage Messages can search up to 1000 messages.
Members without can search up to 25 messages.
"""
strategy = self._basic_cleanup_strategy
is_mod = ctx.channel.permissions_for(ctx.author).manage_messages
if ctx.channel.permissions_for(ctx.me).manage_messages:
if is_mod:
strategy = self._complex_cleanup_strategy
else:
strategy = self._regular_user_cleanup_strategy
if is_mod:
search = min(max(2, search), 1000)
else:
search = min(max(2, search), 25)
spammers = await strategy(ctx, search)
deleted = sum(spammers.values())
messages = [f'{deleted} message{" was" if deleted == 1 else "s were"} removed.']
if deleted:
messages.append('')
spammers = sorted(spammers.items(), key=lambda t: t[1], reverse=True)
messages.extend(f'- **{author}**: {count}' for author, count in spammers)
await ctx.send('\n'.join(messages), delete_after=10)
@commands.command(aliases=['newmembers', 'nu'])
@commands.guild_only()
async def newusers(self, ctx, *, count=5):
"""Tells you the newest members of the server.
This is useful to check if any suspicious members have
joined.
The count parameter can only be up to 25.
"""
count = max(min(count, 25), 5)
if not ctx.guild.chunked:
members = await ctx.guild.chunk(cache=True)
members = sorted(ctx.guild.members, key=lambda m: m.joined_at, reverse=True)[:count]
e = discord.Embed(title='New Members', colour=green)
for member in members:
body = f'Joined {format_relative(member.joined_at)}\nCreated {format_relative(member.created_at)}'
e.add_field(name=f'{member} (ID: {member.id})', value=body, inline=False)
await ctx.send(embed=e)
@commands.command()
@commands.guild_only()
@commands.has_permissions(manage_emojis=True)
async def emoji(self, ctx, emoji: discord.PartialEmoji, *roles: discord.Role):
"""This clones a specified emoji that only specified roles
are allowed to use.
"""
# fetch the emoji asset and read it as bytes.
emoji_bytes = await emoji.read()
# the key parameter here is `roles`, which controls
# what roles are able to use the emoji.
await ctx.guild.create_custom_emoji(
name=emoji.name,
image=emoji_bytes,
roles=roles,
reason='Very secret business.'
)
@commands.command()
@commands.guild_only()
@mod_or_permissions(kick_members=True)
async def kick(self, ctx, member: MemberConverterr, *, reason: ActionReason = None):
"""Kicks a member from the server.
In order for this to work, the bot must have Kick Member permissions.
To use this command you must have Kick Members permission.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
await ctx.guild.kick(member, reason=reason)
await ctx.send('\N{OK HAND SIGN}')
# @commands.command(name="delprofanity", aliases=["delswears", "delcurses"])
# @commands.guild_only
# @commands.has_permissions(manage_guild=True)
# async def remove_profanity(self, ctx, *words):
# with open("./data/profanity.txt", "r", encoding="utf-8") as f:
# stored = [w.strip() for w in f.readlines()]
#
# with open("./data/profanity.txt", "w", encoding="utf-8") as f:
# f.write("".join([f"{w}\n" for w in stored if w not in words]))
#
# profanity.load_censor_words_from_file("./data/profanity.txt")
# await ctx.send("Action complete.")
# await ctx.send("Action complete.")
@commands.command(aliases=["nick"])
@commands.guild_only()
@commands.has_permissions(manage_nicknames=True)
async def nickname(self, ctx, member: MemberConverterr, *, name: str = None):
""" Nicknames a user from the current server. """
if await checks.check_priv(ctx, member):
return
try:
await member.edit(nick=name, reason=default.responsible(ctx.author, "Changed by command"))
message = f"Changed **{member.name}'s** nickname to **{name}**"
if name is None:
message = f"Reset **{member.name}'s** nickname"
await ctx.send(message)
except Exception as e:
await ctx.send(e)
@commands.command(aliases=["massnick"])
@commands.guild_only()
@commands.has_permissions(manage_nicknames=True)
async def massnickname(self, ctx, *, name: str = None):
""" Nicknames all the users from the current server. """
for member in ctx.guild.members:
if await checks.check_priv(ctx, member):
return
else:
if member.id == 845186772698923029 or 511724576674414600:
continue
else:
try:
await member.edit(nick=name, reason=default.responsible(ctx.author, "Changed by command"))
message = f"Changed **{member.name}'s** nickname to **{name}**"
if name is None:
message = f"Reset **{member.name}'s** nickname"
await ctx.send(message)
except Exception as e:
await ctx.send(e)
@commands.command()
@commands.guild_only()
@mod_or_permissions(ban_members=True)
async def ban(self, ctx, member: MemberID, *, reason: ActionReason = None):
"""Bans a member from the server.
You can also ban from ID to ban regardless whether they're
in the server or not.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Ban Members permission.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
await ctx.guild.ban(member, reason=reason)
await ctx.send('\N{OK HAND SIGN}')
@commands.command()
@commands.guild_only()
@mod_or_permissions(ban_members=True)
async def multiban(self, ctx, members: commands.Greedy[MemberID], *, reason: ActionReason = None):
"""Bans multiple members from the server.
This only works through banning via ID.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Ban Members permission.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
total_members = len(members)
if total_members == 0:
return await ctx.send('Missing members to ban.')
confirm = await ctx.prompt(f'This will ban **{plural(total_members):member}**. Are you sure?', reacquire=False)
if not confirm:
return await ctx.send('Aborting.')
failed = 0
for member in members:
try:
await ctx.guild.ban(member, reason=reason)
except discord.HTTPException:
failed += 1
await ctx.send(f'Banned {total_members - failed}/{total_members} members.')
@commands.command()
@commands.guild_only()
@mod_or_permissions(kick_members=True)
async def softban(self, ctx, member: MemberID, *, reason: ActionReason = None):
"""Soft bans a member from the server.
A softban is basically banning the member from the server but
then unbanning the member as well. This allows you to essentially
kick the member while removing their messages.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Kick Members permissions.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
await ctx.guild.ban(member, reason=reason)
await ctx.guild.unban(member, reason=reason)
await ctx.send('\N{OK HAND SIGN}')
@commands.command()
@commands.guild_only()
@mod_or_permissions(ban_members=True)
async def massban(self, ctx, *, args):
"""Mass bans multiple members from the server.
This command has a powerful "command line" syntax. To use this command
you and the bot must both have Ban Members permission. **Every option is optional.**
Users are only banned **if and only if** all conditions are met.
The following options are valid.
`--channel` or `-c`: Channel to search for message history.
`--reason` or `-r`: The reason for the ban.
`--regex`: Regex that usernames must match.
`--created`: Matches users whose accounts were created less than specified minutes ago.
`--joined`: Matches users that joined less than specified minutes ago.
`--joined-before`: Matches users who joined before the member ID given.
`--joined-after`: Matches users who joined after the member ID given.
`--no-avatar`: Matches users who have no avatar. (no arguments)
`--no-roles`: Matches users that have no role. (no arguments)
`--show`: Show members instead of banning them (no arguments).
Message history filters (Requires `--channel`):
`--contains`: A substring to search for in the message.
`--starts`: A substring to search if the message starts with.
`--ends`: A substring to search if the message ends with.
`--match`: A regex to match the message content to.
`--search`: How many messages to search. Default 100. Max 2000.
`--after`: Messages must come after this message ID.
`--before`: Messages must come before this message ID.
`--files`: Checks if the message has attachments (no arguments).
`--embeds`: Checks if the message has embeds (no arguments).
"""
# For some reason there are cases due to caching that ctx.author
# can be a User even in a guild only context
# Rather than trying to work out the kink with it
# Just upgrade the member itself.
if not isinstance(ctx.author, MemberConverterr):
try:
author = await ctx.guild.fetch_member(ctx.author.id)
except discord.HTTPException:
return await ctx.send('Somehow, Discord does not seem to think you are in this server.')
else:
author = ctx.author
parser = Arguments(add_help=False, allow_abbrev=False)
parser.add_argument('--channel', '-c')
parser.add_argument('--reason', '-r')
parser.add_argument('--search', type=int, default=100)
parser.add_argument('--regex')
parser.add_argument('--no-avatar', action='store_true')
parser.add_argument('--no-roles', action='store_true')
parser.add_argument('--created', type=int)
parser.add_argument('--joined', type=int)
parser.add_argument('--joined-before', type=int)
parser.add_argument('--joined-after', type=int)
parser.add_argument('--contains')
parser.add_argument('--starts')
parser.add_argument('--ends')
parser.add_argument('--match')
parser.add_argument('--show', action='store_true')
parser.add_argument('--embeds', action='store_const', const=lambda m: len(m.embeds))
parser.add_argument('--files', action='store_const', const=lambda m: len(m.attachments))
parser.add_argument('--after', type=int)
parser.add_argument('--before', type=int)
try:
args = parser.parse_args(shlex.split(args))
except Exception as e:
return await ctx.send(str(e))
members = []
if args.channel:
channel = await commands.TextChannelConverter().convert(ctx, args.channel)
before = args.before and discord.Object(id=args.before)
after = args.after and discord.Object(id=args.after)
predicates = []
if args.contains:
predicates.append(lambda m: args.contains in m.content)
if args.starts:
predicates.append(lambda m: m.content.startswith(args.starts))
if args.ends:
predicates.append(lambda m: m.content.endswith(args.ends))
if args.match:
try:
_match = re.compile(args.match)
except re.error as e:
return await ctx.send(f'Invalid regex passed to `--match`: {e}')
else:
predicates.append(lambda m, x=_match: x.match(m.content))
if args.embeds:
predicates.append(args.embeds)
if args.files:
predicates.append(args.files)
async for message in channel.history(limit=min(max(1, args.search), 2000), before=before, after=after):
if all(p(message) for p in predicates):
members.append(message.author)
else:
if ctx.guild.chunked:
members = ctx.guild.members
else:
async with ctx.typing():
await ctx.guild.chunk(cache=True)
members = ctx.guild.members
# member filters
predicates = [
lambda m: isinstance(m, MemberConverterr) and can_execute_action(ctx, author, m), # Only if applicable
lambda m: not m.bot, # No bots
lambda m: m.discriminator != '0000', # No deleted users
]
converter = commands.MemberConverter()
if args.regex:
try:
_regex = re.compile(args.regex)
except re.error as e:
return await ctx.send(f'Invalid regex passed to `--regex`: {e}')
else:
predicates.append(lambda m, x=_regex: x.match(m.name))
if args.no_avatar:
predicates.append(lambda m: m.avatar == m.default_avatar)
if args.no_roles:
predicates.append(lambda m: len(getattr(m, 'roles', [])) <= 1)
now = discord.utils.utcnow()
if args.created:
def created(member, *, offset=now - datetime.timedelta(minutes=args.created)):
return member.created_at > offset
predicates.append(created)
if args.joined:
def joined(member, *, offset=now - datetime.timedelta(minutes=args.joined)):
if isinstance(member, discord.User):
# If the member is a user then they left already
return True
return member.joined_at and member.joined_at > offset
predicates.append(joined)
if args.joined_after:
_joined_after_member = await converter.convert(ctx, str(args.joined_after))
def joined_after(member, *, _other=_joined_after_member):
return member.joined_at and _other.joined_at and member.joined_at > _other.joined_at
predicates.append(joined_after)
if args.joined_before:
_joined_before_member = await converter.convert(ctx, str(args.joined_before))
def joined_before(member, *, _other=_joined_before_member):
return member.joined_at and _other.joined_at and member.joined_at < _other.joined_at
predicates.append(joined_before)
members = {m for m in members if all(p(m) for p in predicates)}
if len(members) == 0:
return await ctx.send('No members found matching criteria.')
if args.show:
members = sorted(members, key=lambda m: m.joined_at or now)
fmt = "\n".join(f'{m.id}\tJoined: {m.joined_at}\tCreated: {m.created_at}\t{m}' for m in members)
content = f'Current Time: {discord.utils.utcnow()}\nTotal members: {len(members)}\n{fmt}'
file = discord.File(BytesIO(content.encode('utf-8')), filename='members.txt')
return await ctx.send(file=file)
if args.reason is None:
return await ctx.send('--reason flag is required.')
else:
reason = await ActionReason().convert(ctx, args.reason)
confirm = await ctx.prompt(f'This will ban **{plural(len(members)):member}**. Are you sure?')
if not confirm:
return await ctx.send('Aborting.')
count = 0
for member in members:
try:
await ctx.guild.ban(member, reason=reason)
except discord.HTTPException:
pass
else:
count += 1
await ctx.send(f'Banned {count}/{len(members)}')
@commands.command()
@commands.guild_only()
@commands.max_concurrency(1, per=commands.BucketType.user)
@commands.has_permissions(ban_members=True)
async def massunban(self, ctx, *members: MemberID):
""" Mass unbans multiple members from the server. """
try:
for member_id in members:
await ctx.guild.unban(discord.Object(id=str(member_id)))
await ctx.send(default.actionmessage("massunbans", mass=True))
except Exception as e:
await ctx.send(e)
@commands.command()
@commands.guild_only()
@commands.max_concurrency(1, per=commands.BucketType.user)
@commands.has_permissions(kick_members=True)
async def masskick(self, ctx, reason: ActionReason, *members: MemberID):
""" Mass kicks multiple members from the server. """
try:
for member_id in members:
await ctx.guild.kick(discord.Object(id=str(member_id)), reason=default.responsible(ctx.author, reason))
await ctx.send(default.actionmessage("masskickd", mass=True))
except Exception as e:
await ctx.send(e)
@commands.command()
@commands.guild_only()
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, member: MemberID, *, reason: str = None):
""" Unbans a user from the current server. """
try:
await ctx.guild.unban(discord.Object(id=str(member)), reason=default.responsible(ctx.author, reason))
await ctx.send(default.actionmessage("unbanned"))
except Exception as e:
await ctx.send(e)
@commands.group(invoke_without_command=True)
@can_mute()
async def mute(self, ctx, members: commands.Greedy[discord.Member], *, reason: ActionReason = None):
"""Mutes members using the configured mute role.
The bot must have Manage Roles permission and be
above the muted role in the hierarchy.
To use this command you need to be higher than the
mute role in the hierarchy and have Manage Roles
permission at the server level."""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
guild = ctx.guild
total = len(members)
if total == 0:
return await ctx.warn('Missing members to mute.')
elif total > 20:
return await ctx.error('You may only mute 20 people at a time')
role = discord.utils.get(guild.roles, name='Muted')
failed = 0
em = discord.Embed(colour=invis, description='')
for member in members:
if role not in member.roles:
try:
await member.add_roles(role, reason=reason)
em.description += f'{self.bot.icons["greenTick"]} {member.name} Sucsessfully muted'
except discord.HTTPException:
failed += 1
em.description += f'{self.bot.icons["RedTick"]} {member.name} Failed to mute muted'
em.set_footer(text=f'Muted [{total - failed}/{total}]')
await ctx.try_reply(embed=em)
""""# Mute a Member
@commands.command(aliases=['Unmute'])
@commands.has_permissions(manage_roles=True)
@commands.guild_only()
async def unmute(self, ctx, mem: str):
member = getUser(ctx, mem)
if member:
role = utils.find(lambda r: "mute" in r.name.lower(), member.roles)
if role:
roles = member.roles
roles.remove(role)
asyncio.sleep(0.5)
await member.edit(roles=roles)
log.info(f'Unmuted {member}')
e = discord.Embed(color=embedColor(self))
e.set_author(name="\N{SPEAKER} Unmuted " + str(member))
await edit(ctx, embed=e)
else:
await edit(ctx, content="\N{HEAVY EXCLAMATION MARK SYMBOL} Member is not muted", ttl=5)
# SoftBan a Member (ban, delelte messagea and unban)
@commands.command(aliases=['Softban'])
@commands.has_permissions(ban_members=True)
@commands.guild_only()
async def softban(self, ctx, member: str, *, reason: str=None):
Softban a Member(Kick and delete Messages
member = getUser(ctx, member)
if member:
try:
await ctx.guild.ban(member, reason=reason)
await ctx.guild.unban(member)
except discord.Forbidden:
await edit(ctx, content="\N{HEAVY EXCLAMATION MARK SYMBOL} Missing permissions to ban this Member", ttl=5)
except discord.HTTPException:
await edit(ctx, content="\N{HEAVY EXCLAMATION MARK SYMBOL} Something went wrong while trying to ban...", ttl=5)
else:
e = discord.Embed(color=embedColor(self))
e.set_author(icon_url="https://cdn.discordapp.com/attachments/278603491520544768/301087009408024580/273910007857414147.png",
name="Soft Banned: " + str(member))
await edit(ctx, embed=e)"""
@commands.command()
@commands.is_owner()
async def do(self, ctx, times: int, *, command):
"""Repeats a command a specified number of times."""
msg = copy.copy(ctx.message)
msg.content = ctx.prefix + command
new_ctx = await self.bot.get_context(msg, cls=type(ctx))
for i in range(times):
await new_ctx.reinvoke()
#@commands.group(name='mute', invoke_without_command=True)
#@can_mute()
#async def _mute(self, ctx, members: commands.Greedy[MemberConverterr], *, reason: ActionReason = None):
# """Mutes members using the configured mute role.
# The bot must have Manage Roles permission and be
# above the muted role in the hierarchy.
# To use this command you need to be higher than the
# mute role in the hierarchy and have Manage Roles
# permission at the server level.
# """
#
# if reason is None:
# reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
#
# role = next((g for g in ctx.guild.roles if g.name == "Muted"), None)
# total = len(members)
# if total == 0:
# return await ctx.send('Missing members to mute.')
#
# failed = 0
# for member in members:
# try:
# await member.add_roles(role, reason=reason)
# except discord.HTTPException:
# failed += 1
#
# if failed == 0:
# await ctx.send('\N{THUMBS UP SIGN}')
# else:
# await ctx.send(f'Muted [{total - failed}/{total}]')
#
@commands.command(name='unmute')
@can_mute()
async def _unmute(self, ctx, members: commands.Greedy[MemberConverterr], *, reason: ActionReason = None):
"""Unmutes members using the configured mute role.
The bot must have Manage Roles permission and be
above the muted role in the hierarchy.
To use this command you need to be higher than the
mute role in the hierarchy and have Manage Roles
permission at the server level.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
role = next((g for g in ctx.guild.roles if g.name == "Muted"), None)
total = len(members)
if total == 0:
return await ctx.send('Missing members to unmute.')
failed = 0
for member in members:
try:
await member.remove_roles(role, reason=reason)
except discord.HTTPException:
failed += 1
if failed == 0:
await ctx.send('\N{THUMBS UP SIGN}')
else:
await ctx.send(f'Unmuted [{total - failed}/{total}]')
@commands.command(aliases=["ar"])
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def announcerole(self, ctx, *, role: discord.Role):
""" Makes a role mentionable and removes it whenever you mention the role """
if role == ctx.guild.default_role:
return await ctx.warn("To prevent abuse, I won't allow mentionable role for everyone/here role.")
if ctx.author.top_role.position <= role.position:
return await ctx.warn(
"It seems like the role you attempt to mention is over your permissions, therefore I won't allow you.")
if ctx.me.top_role.position <= role.position:
return await ctx.error("This role is above my permissions, I can't make it mentionable ;-;")
await role.edit(mentionable=True, reason=f"[ {ctx.author} ] announcerole command")
msg = await ctx.success(
f"**{role.name}** is now mentionable, if you don't mention it within 30 seconds, I will revert the changes.")
while True:
def role_checker(m):
if role.mention in m.content:
return True
return False
try:
checker = await self.bot.wait_for("message", timeout=30.0, check=role_checker)
if checker.author.id == ctx.author.id:
await role.edit(mentionable=False, reason=f"[ {ctx.author} ] announcerole command")
return await msg.edit(
content=f"**{role.name}** mentioned by **{ctx.author}** in {checker.channel.mention}")
else:
await checker.delete()
except asyncio.TimeoutError:
await role.edit(mentionable=False, reason=f"[ {ctx.author} ] announcerole command")
return await msg.edit(content=f"**{role.name}** was never mentioned by **{ctx.author}**...")
@commands.group()
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def find(self, ctx):
""" Finds a user within your search term """
if ctx.invoked_subcommand is None:
await ctx.send_help(str(ctx.command))
@find.command(name="playing")
async def find_playing(self, ctx, *, search: str):
loop = []
for i in ctx.guild.members:
if i.activities and (not i.bot):
for g in i.activities:
if g.name and (search.lower() in g.name.lower()):
loop.append(f"{i} | {type(g).__name__}: {g.name} ({i.id})")
await default.prettyResults(
ctx, "playing", f"Found **{len(loop)}** on your search for **{search}**", loop
)
@find.command(name="username", aliases=["name"])
async def find_name(self, ctx, *, search: str):
loop = [f"{i} ({i.id})" for i in ctx.guild.members if search.lower() in i.name.lower() and not i.bot]
await default.prettyResults(
ctx, "name", f"Found **{len(loop)}** on your search for **{search}**", loop
)
@find.command(name="nickname", aliases=["nick"])
async def find_nickname(self, ctx, *, search: str):
loop = [f"{i.nick} | {i} ({i.id})" for i in ctx.guild.members if i.nick if
(search.lower() in i.nick.lower()) and not i.bot]
await default.prettyResults(
ctx, "name", f"Found **{len(loop)}** on your search for **{search}**", loop
)
@find.command(name="id")
async def find_id(self, ctx, *, search: int):
loop = [f"{i} | {i} ({i.id})" for i in ctx.guild.members if (str(search) in str(i.id)) and not i.bot]
await default.prettyResults(
ctx, "name", f"Found **{len(loop)}** on your search for **{search}**", loop
)
@find.command(name="discriminator", aliases=["discrim"])
async def find_discriminator(self, ctx, *, search: str):
if not len(search) == 4 or not re.compile("^[0-9]*$").search(search):
return await ctx.send("You must provide exactly 4 digits")
loop = [f"{i} ({i.id})" for i in ctx.guild.members if search == i.discriminator]
await default.prettyResults(
ctx, "discriminator", f"Found **{len(loop)}** on your search for **{search}**", loop
)
@commands.command()
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def lock(self, ctx):
channel = ctx.channel
overwrite = channel.overwrites_for(ctx.guild.default_role)
if not overwrite.send_messages:
embed = discord.Embed(colour=magenta,
description=f"{channel.mention} is already a locked channel")
embed.set_author(name='Invalid usage',
icon_url=picture("Warning"))
try:
await ctx.send(embed=embed)
return
except:
try:
await ctx.author.send(embed=embed)
return
except:
return
embed = discord.Embed(colour=magenta,
description=f":lock: **Locked channel** {ctx.channel.mention}")
await ctx.send(embed=embed)
await channel.set_permissions(ctx.guild.default_role, send_messages=False)
@commands.command()
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def unlock(self, ctx):
channel = ctx.channel
overwrite = channel.overwrites_for(ctx.guild.default_role)
if overwrite.send_messages:
embed = discord.Embed(colour=magenta,
description=f"{channel.mention} is not a locked channel")
embed.set_author(name='Invalid usage',
icon_url=picture("Warning"))
try:
await ctx.send(embed=embed)
return
except:
try:
await ctx.author.send(embed=embed)
return
except:
return
await channel.set_permissions(ctx.guild.default_role, send_messages=True)
embed = discord.Embed(colour=0xFF004D,
description=f":unlock: **Unlocked channel** {ctx.channel.mention}")
try:
await ctx.send(embed=embed)
except:
try:
await ctx.author.send(embed=embed)
except:
pass
@commands.command()
@commands.has_permissions(manage_messages=True)
async def cls(self, ctx, amount: int):
amount2 = amount + 1
await ctx.channel.purge(limit=amount2)
@commands.group(aliases=["purge", "clr", "clear"])
@commands.guild_only()
@commands.max_concurrency(1, per=commands.BucketType.guild)
@commands.has_permissions(manage_messages=True)
async def prune(self, ctx):
""" Removes messages from the current server. """
if ctx.invoked_subcommand is None:
await ctx.send_help(str(ctx.command))
async def do_removal(self, ctx, limit, predicate, *, before=None, after=None):
if limit > 2000:
return await ctx.send(f'Too many messages to search given ({limit}/2000)')
if before is None:
before = ctx.message
else:
before = discord.Object(id=before)
if after is not None:
after = discord.Object(id=after)
try:
deleted = await ctx.channel.purge(limit=limit, before=before, after=after, check=predicate)
except discord.Forbidden as e:
return await ctx.send('I do not have permissions to delete messages.')
except discord.HTTPException as e:
return await ctx.send(f'Error: {e} (try a smaller search?)')
spammers = Counter(m.author.display_name for m in deleted)
deleted = len(deleted)
messages = [f'{deleted} message{" was" if deleted == 1 else "s were"} removed.']
if deleted:
messages.append('')
spammers = sorted(spammers.items(), key=lambda t: t[1], reverse=True)
messages.extend(f'**{name}**: {count}' for name, count in spammers)
to_send = '\n'.join(messages)
if len(to_send) > 2000:
await ctx.send(f'Successfully removed {deleted} messages.', delete_after=10)
else:
await ctx.send(to_send, delete_after=10)
@prune.command()
async def embeds(self, ctx, search=100):
"""Removes messages that have embeds in them."""
await self.do_removal(ctx, search, lambda e: len(e.embeds))
@prune.command()
async def files(self, ctx, search=100):
"""Removes messages that have attachments in them."""
await self.do_removal(ctx, search, lambda e: len(e.attachments))
@prune.command()
async def mentions(self, ctx, search=100):
"""Removes messages that have mentions in them."""
await self.do_removal(ctx, search, lambda e: len(e.mentions) or len(e.role_mentions))
@prune.command()
async def images(self, ctx, search=100):
"""Removes messages that have embeds or attachments."""
await self.do_removal(ctx, search, lambda e: len(e.embeds) or len(e.attachments))
@prune.command(name="all")
async def _remove_all(self, ctx, search=100):
"""Removes all messages."""
await self.do_removal(ctx, search, lambda e: True)
@prune.command()
async def user(self, ctx, member: MemberConverterr, search=100):
"""Removes all messages by the member."""
await self.do_removal(ctx, search, lambda e: e.author == member)
@prune.command()
async def contains(self, ctx, *, substr: str):
"""Removes all messages containing a substring.
The substring must be at least 3 characters long.
"""
if len(substr) < 3:
await ctx.send("The substring length must be at least 3 characters.")
else:
await self.do_removal(ctx, 100, lambda e: substr in e.content)
@prune.command(name="bot", aliases=['bots'])
async def _bots(self, ctx, prefix, search=100):
"""Removes a bot user's messages and messages with their optional prefix."""
def predicate(m):
return (m.webhook_id is None and m.author.bot) or m.content.startswith(tuple(prefix))
await self.do_removal(ctx, search, predicate)
@prune.command(name="users")
async def _users(self, ctx, search=100):
"""Removes only user messages. """
def predicate(m):
return m.author.bot is False
await self.do_removal(ctx, search, predicate)
@prune.command(name="emojis")
async def _emojis(self, ctx, search=100):
"""Removes all messages containing custom emoji."""
custom_emoji = re.compile(r"<a?:(.*?):(\d{17,21})>|[\u263a-\U0001f645]")
def predicate(m):
return custom_emoji.search(m.content)
await self.do_removal(ctx, search, predicate)
@prune.command(name="reactions")
async def _reactions(self, ctx, search=100):
"""Removes all reactions from messages that have them."""
if search > 2000:
return await ctx.send(f"Too many messages to search for ({search}/2000)")
total_reactions = 0
async for message in ctx.history(limit=search, before=ctx.message):
if len(message.reactions):
total_reactions += sum(r.count for r in message.reactions)
await message.clear_reactions()
await ctx.send(f"Successfully removed {total_reactions} reactions.")
@prune.command()
async def custom(self, ctx, *, args: str):
"""A more advanced purge command.
This command uses a powerful "command line" syntax.
Most options support multiple values to indicate 'any' match.
If the value has spaces it must be quoted.
The messages are only deleted if all options are met unless
the `--or` flag is passed, in which case only if any is met.
The following options are valid.
`--user`: A mention or name of the user to remove.
`--contains`: A substring to search for in the message.
`--starts`: A substring to search if the message starts with.
`--ends`: A substring to search if the message ends with.
`--search`: How many messages to search. Default 100. Max 2000.
`--after`: Messages must come after this message ID.
`--before`: Messages must come before this message ID.
Flag options (no arguments):
`--bot`: Check if it's a bot user.
`--embeds`: Check if the message has embeds.
`--files`: Check if the message has attachments.
`--emoji`: Check if the message has custom emoji.
`--reactions`: Check if the message has reactions
`--or`: Use logical OR for all options.
`--not`: Use logical NOT for all options.
"""
parser = Arguments(add_help=False, allow_abbrev=False)
parser.add_argument('--user', nargs='+')
parser.add_argument('--contains', nargs='+')
parser.add_argument('--starts', nargs='+')
parser.add_argument('--ends', nargs='+')
parser.add_argument('--or', action='store_true', dest='_or')
parser.add_argument('--not', action='store_true', dest='_not')
parser.add_argument('--emoji', action='store_true')
parser.add_argument('--bot', action='store_const', const=lambda m: m.author.bot)
parser.add_argument('--embeds', action='store_const', const=lambda m: len(m.embeds))
parser.add_argument('--files', action='store_const', const=lambda m: len(m.attachments))
parser.add_argument('--reactions', action='store_const', const=lambda m: len(m.reactions))
parser.add_argument('--search', type=int)
parser.add_argument('--after', type=int)
parser.add_argument('--before', type=int)
try:
args = parser.parse_args(shlex.split(args))
except Exception as e:
await ctx.send(str(e))
return
predicates = []
if args.bot:
predicates.append(args.bot)
if args.embeds:
predicates.append(args.embeds)
if args.files:
predicates.append(args.files)
if args.reactions:
predicates.append(args.reactions)
if args.emoji:
custom_emoji = re.compile(r'<:(\w+):(\d+)>')
predicates.append(lambda m: custom_emoji.search(m.content))
if args.user:
users = []
converter = commands.MemberConverter()
for u in args.user:
try:
user = await converter.convert(ctx, u)
users.append(user)
except Exception as e:
await ctx.send(str(e))
return
predicates.append(lambda m: m.author in users)
if args.contains:
predicates.append(lambda m: any(sub in m.content for sub in args.contains))
if args.starts:
predicates.append(lambda m: any(m.content.startswith(s) for s in args.starts))
if args.ends:
predicates.append(lambda m: any(m.content.endswith(s) for s in args.ends))
op = all if not args._or else any
def predicate(m):
r = op(p(m) for p in predicates)
if args._not:
return not r
return r
if args.after:
if args.search is None:
args.search = 2000
if args.search is None:
args.search = 100
args.search = max(0, min(2000, args.search)) # clamp from 0-2000
await self.do_removal(ctx, args.search, predicate, before=args.before, after=args.after)
# Mute related stuff
async def update_mute_role(self, ctx, config, role, *, merge=False):
guild = ctx.guild
if config and merge:
members = config.muted_members
# If the roles are being merged then the old members should get the new role
reason = f'Action done by {ctx.author} (ID: {ctx.author.id}): Merging mute roles'
async for member in self.bot.resolve_member_ids(guild, members):
if not member._roles.has(role.id):
try:
await member.add_roles(role, reason=reason)
except discord.HTTPException:
pass
else:
members = set()
members.update(map(lambda m: m.id, role.members))
#query = """INSERT INTO guild_mod_config (id, mute_role_id, muted_members)
# VALUES ($1, $2, $3::bigint[]) ON CONFLICT (id)
# DO UPDATE SET
# mute_role_id = EXCLUDED.mute_role_id,
# muted_members = EXCLUDED.muted_members
# """
#await self.bot.pool.execute(query, guild.id, role.id, list(members))
#self.get_guild_config.invalidate(self, guild.id)
def setup(bot):
bot.add_cog(Mod(bot))
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import zipfile, tarfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import typing as T
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.interpreterbase import typed_pos_args, InvalidArguments
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg, relpath, is_linux, git
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException, OptionKey
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target, ConfigurationData
import mesonbuild.modules.pkgconfig
from mesonbuild.scripts import destdir_join
from mesonbuild.mtest import TAPParser, TestResult
from mesonbuild.wrap.wrap import PackageDefinition, WrapException
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
if T.TYPE_CHECKING:
from mesonbuild.compilers import Compiler
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
def get_dynamic_section_entry(fname: str, entry: str) -> T.Optional[str]:
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return str(m.group(1))
return None # The file did not contain the specified entry.
def get_soname(fname: str) -> T.Optional[str]:
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname: str) -> T.Optional[str]:
raw = get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
# Get both '' and None here
if not raw:
return None
# nix/nixos adds a bunch of stuff to the rpath out of necessity that we
# don't check for, so clear those
final = ':'.join([e for e in raw.split(':') if not e.startswith('/nix')])
return final
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def _git_init(project_dir):
# If a user has git configuration init.defaultBranch set we want to override that
with tempfile.TemporaryDirectory() as d:
out = git(['--version'], str(d))[1]
if version_compare(mesonbuild.environment.search_version(out), '>= 2.28'):
extra_cmd = ['--initial-branch', 'master']
else:
extra_cmd = []
subprocess.check_call(['git', 'init'] + extra_cmd, cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
_git_add_all(project_dir)
def _git_add_all(project_dir):
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
key = OptionKey(feature)
if key not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10'), '2016.10')
self.assertEqual(searchfunc('2016.10 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('oops v1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.oops 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.x'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class_none_flush(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
a = cc.compiler_args(['-I.'])
#first we are checking if the tree construction deduplicates the correct -I argument
a += ['-I..']
a += ['-I./tests/']
a += ['-I./tests2/']
#think this here as assertion, we cannot apply it, otherwise the CompilerArgs would already flush the changes:
# assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..', '-I.'])
a += ['-I.']
a += ['-I.', '-I./tests/']
self.assertEqual(a, ['-I.', '-I./tests/', '-I./tests2/', '-I..'])
#then we are checking that when CompilerArgs already have a build container list, that the deduplication is taking the correct one
a += ['-I.', '-I./tests2/']
self.assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..'])
def test_compiler_args_class_d(self):
d = mesonbuild.compilers.DmdDCompiler([], 'fake', MachineChoice.HOST, 'info', 'arch')
# check include order is kept when deduplicating
a = d.compiler_args(['-Ifirst', '-Isecond', '-Ithird'])
a += ['-Ifirst']
self.assertEqual(a, ['-Ifirst', '-Isecond', '-Ithird'])
def test_compiler_args_class_clike(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
# Test that empty initialization works
a = cc.compiler_args()
self.assertEqual(a, [])
# Test that list initialization works
a = cc.compiler_args(['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cc.compiler_args(['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
def test_unholder(self):
unholder = mesonbuild.mesonlib.unholder
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
holders = [holder1, holder3]
self.assertEqual(1, unholder(holder1))
self.assertEqual([1], unholder([holder1]))
self.assertEqual([1, 3], unholder(holders))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
# flatten nested lists
kwargs = {'sources': [1, [2, [3]]]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
def test_pkgconfig_module(self):
dummystate = mock.Mock()
dummystate.subproject = 'dummy'
_mock = mock.Mock(spec=mesonbuild.dependencies.ExternalDependency)
_mock.pcdep = mock.Mock()
_mock.pcdep.name = "some_name"
_mock.version_reqs = []
_mock = mock.Mock(held_object=_mock)
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc._find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
@skipIfNoPkgconfig
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.options[OptionKey('link_args', lang='c')] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[-1] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[-1] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[-1] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(sorted(deps), sorted(expdeps))
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
env.scratch_dir = tmpdir
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
def test_validate_json(self) -> None:
"""Validate the json schema for the test cases."""
try:
from jsonschema import validate, ValidationError
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('Python jsonschema module not found.')
with Path('data/test.schema.json').open() as f:
schema = json.load(f)
errors = [] # type: T.Tuple[str, Exception]
for p in Path('test cases').glob('**/test.json'):
with p.open() as f:
try:
validate(json.load(f), schema=schema)
except ValidationError as e:
errors.append((p.resolve(), e))
for f, e in errors:
print('Failed to validate: "{}"'.format(f))
print(str(e))
self.assertFalse(errors)
def test_typed_pos_args_types(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], int)
self.assertIsInstance(args[2], bool)
_(None, mock.Mock(), ['string', 1, False], None)
def test_typed_pos_args_types_invalid(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1.0, False], None)
self.assertEqual(str(cm.exception), 'foo argument 2 was of type "float" but should have been "int"')
def test_typed_pos_args_types_wrong_number(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1], None)
self.assertEqual(str(cm.exception), 'foo takes exactly 3 arguments, but got 2.')
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1, True, True], None)
self.assertEqual(str(cm.exception), 'foo takes exactly 3 arguments, but got 4.')
def test_typed_pos_args_varargs(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertIsInstance(args[1][0], str)
self.assertIsInstance(args[1][1], str)
_(None, mock.Mock(), ['string', 'var', 'args'], None)
def test_typed_pos_args_varargs_not_given(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertEqual(args[1], [])
_(None, mock.Mock(), ['string'], None)
def test_typed_pos_args_varargs_invalid(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 0], None)
self.assertEqual(str(cm.exception), 'foo argument 4 was of type "int" but should have been "str"')
def test_typed_pos_args_varargs_invalid_mulitple_types(self) -> None:
@typed_pos_args('foo', str, varargs=(str, list))
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 0], None)
self.assertEqual(str(cm.exception), 'foo argument 4 was of type "int" but should have been one of: "str", "list"')
def test_typed_pos_args_max_varargs(self) -> None:
@typed_pos_args('foo', str, varargs=str, max_varargs=5)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertIsInstance(args[1][0], str)
self.assertIsInstance(args[1][1], str)
_(None, mock.Mock(), ['string', 'var', 'args'], None)
def test_typed_pos_args_max_varargs_exceeded(self) -> None:
@typed_pos_args('foo', str, varargs=str, max_varargs=1)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args'], None)
self.assertEqual(str(cm.exception), 'foo takes between 1 and 2 arguments, but got 3.')
def test_typed_pos_args_min_varargs(self) -> None:
@typed_pos_args('foo', varargs=str, max_varargs=2, min_varargs=1)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], list)
self.assertIsInstance(args[0][0], str)
self.assertIsInstance(args[0][1], str)
_(None, mock.Mock(), ['string', 'var'], None)
def test_typed_pos_args_min_varargs_not_met(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes at least 2 arguments, but got 1.')
def test_typed_pos_args_min_and_max_varargs_exceeded(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1, max_varargs=2)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 'bar'], None)
self.assertEqual(str(cm.exception), 'foo takes between 2 and 3 arguments, but got 4.')
def test_typed_pos_args_min_and_max_varargs_not_met(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1, max_varargs=2)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes between 2 and 3 arguments, but got 1.')
def test_typed_pos_args_variadic_and_optional(self) -> None:
@typed_pos_args('foo', str, optargs=[str], varargs=str, min_varargs=0)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(AssertionError) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(
str(cm.exception),
'varargs and optargs not supported together as this would be ambiguous')
def test_typed_pos_args_min_optargs_not_met(self) -> None:
@typed_pos_args('foo', str, str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes at least 2 arguments, but got 1.')
def test_typed_pos_args_min_optargs_max_exceeded(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', '1', '2'], None)
self.assertEqual(str(cm.exception), 'foo takes at most 2 arguments, but got 3.')
def test_typed_pos_args_optargs_not_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsNone(args[1])
_(None, mock.Mock(), ['string'], None)
def test_typed_pos_args_optargs_some_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str, int])
def _(obj, node, args: T.Tuple[str, T.Optional[str], T.Optional[int]], kwargs) -> None:
self.assertEqual(len(args), 3)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsInstance(args[1], str)
self.assertEqual(args[1], '1')
self.assertIsNone(args[2])
_(None, mock.Mock(), ['string', '1'], None)
def test_typed_pos_args_optargs_all_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsInstance(args[1], str)
_(None, mock.Mock(), ['string', '1'], None)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options():
self.assertIn(str(opt), md)
for opt in comp.base_options:
self.assertIn(str(opt), md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError('Could not find "{}" heading'.format(name))
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
for subcontent in (subcontent1, subcontent2):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, set([
*[str(k) for k in mesonbuild.coredata.BUILTIN_OPTIONS],
*[str(k) for k in mesonbuild.coredata.BUILTIN_OPTIONS_PER_MACHINE],
]))
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError('Invalid debug value {!r} in row:\n{}'.format(debug, m.group()))
env.coredata.set_option(OptionKey('buildtype'), buildtype)
self.assertEqual(env.coredata.options[OptionKey('buildtype')].value, buildtype)
self.assertEqual(env.coredata.options[OptionKey('optimization')].value, opt)
self.assertEqual(env.coredata.options[OptionKey('debug')].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
def test_mesondata_is_up_to_date(self):
from mesonbuild.mesondata import mesondata
err_msg = textwrap.dedent('''
###########################################################
### mesonbuild.mesondata is not up-to-date ###
### Please regenerate it by running tools/gen_data.py ###
###########################################################
''')
root_dir = Path(__file__).resolve().parent
mesonbuild_dir = root_dir / 'mesonbuild'
data_dirs = mesonbuild_dir.glob('**/data')
data_files = [] # type: T.List[T.Tuple(str, str)]
for i in data_dirs:
for p in i.iterdir():
data_files += [(p.relative_to(mesonbuild_dir).as_posix(), hashlib.sha256(p.read_bytes()).hexdigest())]
current_files = set(mesondata.keys())
scanned_files = set([x[0] for x in data_files])
self.assertSetEqual(current_files, scanned_files, err_msg + 'Data files were added or removed\n')
errors = []
for i in data_files:
if mesondata[i[0]].sha256sum != i[1]:
errors += [i[0]]
self.assertListEqual(errors, [], err_msg + 'Files were changed')
class BasePlatformTests(unittest.TestCase):
prefix = '/usr'
libdir = 'lib'
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_native_file = None
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
self.linuxlike_test_dir = os.path.join(src_root, 'test cases/linuxlike')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
encoding='utf-8',
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None,
workdir=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix]
if self.libdir:
args += ['--libdir', self.libdir]
if self.meson_native_file:
args += ['--native-file', self.meson_native_file]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args, override_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
with mock.patch.dict(os.environ, override_envvars):
run_mtest_inprocess(['-C', self.builddir])
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def get_meson_log_sanitychecks(self):
'''
Same as above, but for the sanity checks that were run
'''
log = self.get_meson_log()
prefix = 'Sanity check compiler command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertReconfiguredBuildIsNoop(self):
'Assert that we reconfigured and then there was nothing to do'
ret = self.build()
self.assertIn('The Meson build system', ret)
if self.backend is Backend.ninja:
for line in ret.split('\n'):
if line in self.no_rebuild_stdout:
break
else:
raise AssertionError('build was reconfigured, but was not no-op')
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
# XXX: Note CustomBuild did indeed rebuild, because of the regen checker!
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target of each type said that no rebuild was done
# We always have at least one CustomBuild target for the regen checker
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
@staticmethod
def get_target_from_filename(filename):
base = os.path.splitext(filename)[0]
if base.startswith(('lib', 'cyg')):
return base[3:]
return base
def assertBuildRelinkedOnlyTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
linked_targets = []
for line in ret.split('\n'):
if 'Linking target' in line:
fname = line.rsplit('target ')[-1]
linked_targets.append(self.get_target_from_filename(fname))
self.assertEqual(linked_targets, [target])
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE)
matches = linkre.findall(ret)
self.assertEqual(len(matches), 1, msg=matches)
self.assertEqual(self.get_target_from_filename(matches[0]), target)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '88 default options')
self.init(testdir, default_args=False, inprocess=True)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
break
else:
raise self.fail('Did not find option "prefix"')
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_do_conf_file_by_format(self):
def conf_str(in_data, confdata, vformat):
(result, missing_variables, confdata_useless) = mesonbuild.mesonlib.do_conf_str(in_data, confdata, variable_format = vformat)
return '\n'.join(result)
def check_formats(confdata, result):
self.assertEqual(conf_str(['#mesondefine VAR'], confdata, 'meson'), result)
self.assertEqual(conf_str(['#cmakedefine VAR ${VAR}'], confdata, 'cmake'), result)
self.assertEqual(conf_str(['#cmakedefine VAR @VAR@'], confdata, 'cmake@'), result)
confdata = ConfigurationData()
# Key error as they do not exists
check_formats(confdata, '/* #undef VAR */\n')
# Check boolean
confdata.values = {'VAR': (False, 'description')}
check_formats(confdata, '#undef VAR\n')
confdata.values = {'VAR': (True, 'description')}
check_formats(confdata, '#define VAR\n')
# Check string
confdata.values = {'VAR': ('value', 'description')}
check_formats(confdata, '#define VAR value\n')
# Check integer
confdata.values = {'VAR': (10, 'description')}
check_formats(confdata, '#define VAR 10\n')
# Check multiple string with cmake formats
confdata.values = {'VAR': ('value', 'description')}
self.assertEqual(conf_str(['#cmakedefine VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value')
self.assertEqual(conf_str(['#cmakedefine VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value')
# Handles meson format exceptions
# Unknown format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'unknown_format')
# More than 2 params in mesondefine
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'meson')
# Mismatched line with format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#cmakedefine VAR'], confdata, 'meson')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake@')
# Dict value in confdata
confdata.values = {'VAR': (['value'], 'description')}
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'meson')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '88 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '164 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '52 run target')
self.init(testdir)
self.run_target('check_exists')
self.run_target('check-env')
self.run_target('check-env-ct')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '60 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
'new_directory': 'share/new_directory',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '141 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '60 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
def read_logs():
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
return list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
logged = read_logs()
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
# Verify that with --dry-run we obtain the same logs but with nothing
# actually installed
windows_proof_rmtree(self.installdir)
self._run(self.meson_command + ['install', '--dry-run', '--destdir', self.installdir], workdir=self.builddir)
self.assertEqual(logged, read_logs())
self.assertFalse(os.path.exists(self.installdir))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_nopromote(self):
testdir = os.path.join(self.common_test_dir, '99 subproject subdir')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--wrap-mode=nopromote'])
self.assertIn('Dependency "subsub" not found', cm.exception.stdout)
def test_force_fallback_for(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--force-fallback-for=zlib,foo'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt'), encoding='utf-8') as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt'), encoding='utf-8') as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
# Setup that does not define a wrapper works with --wrapper
self._run(self.mtest_command + ['--setup=timeout', '--wrapper', shutil.which('valgrind')])
# Setup that skips test works
self._run(self.mtest_command + ['--setup=good'])
with open(os.path.join(self.logdir, 'testlog-good.txt'), encoding='utf-8') as f:
exclude_suites_log = f.read()
self.assertFalse('buggy' in exclude_suites_log)
# --suite overrides add_test_setup(xclude_suites)
self._run(self.mtest_command + ['--setup=good', '--suite', 'buggy'])
with open(os.path.join(self.logdir, 'testlog-good.txt'), encoding='utf-8') as f:
include_suites_log = f.read()
self.assertTrue('buggy' in include_suites_log)
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt'), encoding='utf-8') as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt'), encoding='utf-8') as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt'), encoding='utf-8') as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '130 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
if mesonbuild.environment.detect_msys2_arch() and ('MESON_RSP_THRESHOLD' in os.environ):
raise unittest.SkipTest('Test does not yet support gcc rsp files on msys2')
testdir = os.path.join(self.common_test_dir, '131 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# Need to run the build so the private dir is created.
self.build()
pdirs = glob(os.path.join(self.builddir, 'sub4/someexe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], "-I" + privdir)
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
pdirs = glob(os.path.join(self.builddir, 'somefxe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], '-I' + privdir)
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_sunos():
self.assertIsInstance(cc.linker, (mesonbuild.linkers.SolarisDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl. This can be either an
# ld-like linker of link.exe-like linker (usually the
# former for msys2, the latter otherwise)
self.assertIsInstance(cc.linker, (mesonbuild.linkers.MSVCDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
os.environ[evar] = ' '.join(quote_arg(w) for w in wrappercc)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
os.environ['AR'] = ' '.join(quote_arg(w) for w in wrapperlinker)
# Need a new env to re-run environment loading
env = get_fake_env(testdir, self.builddir, self.prefix)
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '134 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '133 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '110 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_noop_changes_cause_no_rebuilds(self):
'''
Test that no-op changes to the build files such as mtime do not cause
a rebuild of anything.
'''
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of meson.build should not rebuild anything
self.utime(os.path.join(testdir, 'meson.build'))
self.assertReconfiguredBuildIsNoop()
# Changing mtime of libefile.c should rebuild the library, but not relink the executable
self.utime(os.path.join(testdir, 'libfile.c'))
self.assertBuildRelinkedOnlyTarget('mylib')
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertBuildRelinkedOnlyTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '58 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertBuildRelinkedOnlyTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '91 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
@skip_if_not_base_option('b_lto_threads')
def test_lto_threads(self):
if is_cygwin():
raise unittest.SkipTest('LTO is broken on Cygwin.')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
extra_args: T.List[str] = []
if cc.get_id() == 'clang':
if is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
else:
extra_args.append('-D_cargs=-Werror=unused-command-line-argument')
self.init(testdir, extra_args=['-Db_lto=true', '-Db_lto_threads=8'] + extra_args)
self.build()
self.run_tests()
expected = set(cc.get_lto_compile_args(threads=8))
targets = self.introspect('--targets')
# This assumes all of the targets support lto
for t in targets:
for s in t['target_sources']:
for e in expected:
self.assertIn(e, s['parameters'])
@skip_if_not_base_option('b_lto_mode')
@skip_if_not_base_option('b_lto_threads')
def test_lto_mode(self):
testdir = os.path.join(self.common_test_dir, '6 linkshared')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() != 'clang':
raise unittest.SkipTest('Only clang currently supports thinLTO')
if cc.linker.id not in {'ld.lld', 'ld.gold', 'ld64', 'lld-link'}:
raise unittest.SkipTest('thinLTO requires ld.lld, ld.gold, ld64, or lld-link')
elif is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args=['-Db_lto=true', '-Db_lto_mode=thin', '-Db_lto_threads=8', '-Dc_args=-Werror=unused-command-line-argument'])
self.build()
self.run_tests()
expected = set(cc.get_lto_compile_args(threads=8, mode='thin'))
targets = self.introspect('--targets')
# This assumes all of the targets support lto
for t in targets:
for s in t['target_sources']:
self.assertTrue(expected.issubset(set(s['parameters'])), f'Incorrect values for {t['name']}')
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init, _git_add_all)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def has_working_hg(self):
if not shutil.which('hg'):
return False
try:
# This check should not be necessary, but
# CI under macOS passes the above test even
# though Mercurial is not installed.
if subprocess.call(['hg', '--version'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
return False
return True
except FileNotFoundError:
return False
def test_dist_hg(self):
if not self.has_working_hg():
raise unittest.SkipTest('Mercurial not found or broken.')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}', version: '1.0')".format(name))
return path
def dist_impl(self, vcs_init, vcs_add_all=None, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write(textwrap.dedent('''\
project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
subproject('samerepo', required : false)
'''))
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write(textwrap.dedent('''\
#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
'''))
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
if vcs_add_all:
vcs_add_all(self.create_dummy_subproject(project_dir, 'samerepo'))
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
# Verify that without --include-subprojects we have files from
# the main project and also files from subprojects part of the
# main vcs repository.
z = zipfile.ZipFile(zip_distfile)
expected = ['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']
if vcs_add_all:
expected += ['disttest-1.4.3/subprojects/',
'disttest-1.4.3/subprojects/samerepo/',
'disttest-1.4.3/subprojects/samerepo/meson.build']
self.assertEqual(sorted(expected),
sorted(z.namelist()))
# Verify that with --include-subprojects we now also have files
# from tarball and separate vcs subprojects. But not files from
# unused subprojects.
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
expected += ['disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/vcssub/meson.build']
self.assertEqual(sorted(expected),
sorted(z.namelist()))
if vcs_add_all:
# Verify we can distribute separately subprojects in the same vcs
# repository as the main project.
subproject_dir = os.path.join(project_dir, 'subprojects', 'samerepo')
self.new_builddir()
self.init(subproject_dir)
self.build('dist')
xz_distfile = os.path.join(self.distdir, 'samerepo-1.0.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
tar = tarfile.open(xz_distfile, "r:xz")
self.assertEqual(sorted(['samerepo-1.0',
'samerepo-1.0/meson.build']),
sorted([i.name for i in tar]))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '40 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '151 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '151 reserved targets')
targets = mesonbuild.coredata.FORBIDDEN_TARGET_NAMES
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
@mock.patch.dict(os.environ)
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix(), '-DLIBFOO']
# pkg-config and pkgconf does not respect the same order
self.assertEqual(sorted(foo_dep.get_compile_args()), sorted(cargs))
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '41 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
# When running under Travis Mac CI, the file updates seem to happen
# too fast so the timestamps do not get properly updated.
# Call this method before file operations in appropriate places
# to make things work.
def mac_ci_delay(self):
if is_osx() and is_ci():
import time
time.sleep(1)
def test_options_with_choices_changing(self) -> None:
"""Detect when options like arrays or combos have their choices change."""
testdir = Path(os.path.join(self.unit_test_dir, '85 change option choices'))
options1 = str(testdir / 'meson_options.1.txt')
options2 = str(testdir / 'meson_options.2.txt')
# Test that old options are changed to the new defaults if they are not valid
real_options = str(testdir / 'meson_options.txt')
self.addCleanup(os.unlink, real_options)
shutil.copy(options1, real_options)
self.init(str(testdir))
self.mac_ci_delay()
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'b')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
self.wipe()
self.mac_ci_delay()
# When the old options are valid they should remain
shutil.copy(options1, real_options)
self.init(str(testdir), extra_args=['-Dcombo=c', '-Darray=b,c'])
self.mac_ci_delay()
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'c')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b', 'c'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'],
cwd=workdir,
stdout=subprocess.DEVNULL)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
self.new_builddir()
out = self.init(tdir, workdir=wd)
expected = os.path.join(relpath(tdir, self.src_root), 'meson.build')
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, out)
def test_error_location_path(self):
'''Test locations in meson errors contain correct paths'''
# this list contains errors from all the different steps in the
# lexer/parser/interpreter we have tests for.
for (t, f) in [
('10 out of bounds', 'meson.build'),
('18 wrong plusassign', 'meson.build'),
('61 bad option argument', 'meson_options.txt'),
('102 subdir parse error', os.path.join('subdir', 'meson.build')),
('103 invalid option file', 'meson_options.txt'),
]:
tdir = os.path.join(self.src_root, 'test cases', 'failing', t)
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
try:
self.init(tdir, workdir=wd)
except subprocess.CalledProcessError as e:
expected = os.path.join('test cases', 'failing', t, f)
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, e.output)
else:
self.fail('configure unexpectedly succeeded')
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
for l in ['cpp', 'cs', 'd', 'java', 'cuda', 'fortran', 'objc', 'objcpp', 'rust']:
try:
comp = env.detect_compiler_for(l, MachineChoice.HOST)
with tempfile.TemporaryDirectory() as d:
comp.sanity_check(d, env)
langs.append(l)
except EnvironmentException:
pass
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in {'c', 'cpp', 'd'}:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
elif lang in {'java'}:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'Foo.' + lang), 'w') as f:
f.write('public class Foo { public static void main() {} }')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '173 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '182 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('WARNING: target links against shared modules. This is not '
'recommended as it is not supported on some platforms')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args that affect the
# configuration, and as a bonus, test that --profile-self works.
self.init(testdir, extra_args=['--profile-self', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'static')
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.assertEqual(obj.options[OptionKey('set_sub_opt')].value, True)
self.assertEqual(obj.options[OptionKey('subp_opt', 'subp')].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
else:
self.assertIn('as both', str(cm.exception))
self.init(testdir)
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
else:
self.assertIn('as both', str(cm.exception))
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
msg = "Option 'foo' must have a value separated by equals sign."
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.init(testdir, extra_args=['-Dfoo'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn(msg, cm.exception.output)
else:
self.assertIn(msg, str(cm.exception))
self.init(testdir)
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.setconf('-Dfoo')
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn(msg, cm.exception.output)
else:
self.assertIn(msg, str(cm.exception))
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('subp_opt', 'subp')].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('set_percent_opt')].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar',
'-Db_lundef=false', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('bindir')].value, 'bar')
self.assertEqual(obj.options[OptionKey('buildtype')].value, 'release')
self.assertEqual(obj.options[OptionKey('b_sanitize')].value, 'thread')
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('bindir')].value, 'foo')
self.assertEqual(obj.options[OptionKey('buildtype')].value, 'plain')
self.assertEqual(obj.options[OptionKey('b_sanitize')].value, 'address')
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '208 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['optimization'], '0')
self.setconf('-Doptimization=g')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['optimization'], 'g')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = ['{0}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '158 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '34 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '41 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '44 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '99 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': '1.0'
},
{
'descriptive_name': 'sub_implicit',
'name': 'sub_implicit',
'version': '1.0',
},
{
'descriptive_name': 'sub-novar',
'name': 'sub_novar',
'version': '1.0',
},
{
'descriptive_name': 'subsub',
'name': 'subsub',
'version': 'undefined'
},
{
'descriptive_name': 'subsubsub',
'name': 'subsubsub',
'version': 'undefined'
},
]
}
res['subprojects'] = sorted(res['subprojects'], key=lambda i: i['name'])
self.assertDictEqual(expected, res)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '43 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '76 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '76 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
dummydir = os.path.join(testdir, 'dummydir.h')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
self.assertNotIn(dummydir, out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
# XXX: These now generate in a different order, is that okay?
self.assertListEqual(sorted(res_nb, key=lambda x: x['name']), sorted(res_wb, key=lambda x: x['name']))
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_buildoptions_cross_only(self):
testdir = os.path.join(self.unit_test_dir, '84 cross only introspect')
testfile = os.path.join(testdir, 'meson.build')
res = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
optnames = [o['name'] for o in res]
self.assertIn('c_args', optnames)
self.assertNotIn('build.c_args', optnames)
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj, strict: bool = True):
for i in key_type_list:
if isinstance(i[1], (list, tuple)) and None in i[1]:
i = (i[0], tuple([x for x in i[1] if x is not None]))
if i[0] not in obj or obj[i[0]] is None:
continue
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
if strict:
for k in obj.keys():
found = False
for i in key_type_list:
if k == i[0]:
found = True
break
self.assertTrue(found, 'Key "{}" not in expected list'.format(k))
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
('depends', list),
('workdir', (str, None)),
('priority', int),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
('choices', (list, None)),
('value', (str, int, bool, list)),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('extra_files', list),
('subproject', (str, None)),
('install_filename', (list, None)),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Match target ids to input and output files for ease of reference
src_to_id = {}
out_to_id = {}
for i in res['targets']:
print(json.dump(i, sys.stdout))
out_to_id.update({os.path.relpath(out, self.builddir): i['id']
for out in i['filename']})
for group in i['target_sources']:
src_to_id.update({os.path.relpath(src, testdir): i['id']
for src in group['sources']})
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
deps_to_find = {'test case 1': [src_to_id['t1.cpp']],
'test case 2': [src_to_id['t2.cpp'], src_to_id['t3.cpp']],
'benchmark 1': [out_to_id['file2'], src_to_id['t3.cpp']]}
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertEqual(sorted(i['depends']),
sorted(deps_to_find[i['name']]))
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i, strict=False)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in res1:
if i['name'] == 'cpp_std':
i['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
i['value'] = 'c++14'
if i['name'] == 'buildtype':
i['value'] = 'release'
if i['name'] == 'optimization':
i['value'] = '3'
if i['name'] == 'debug':
i['value'] = False
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
res_wb = [i for i in res_wb if i['type'] != 'custom']
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_ast_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--ast'] + self.meson_args)
node_counter = {}
def accept_node(json_node):
self.assertIsInstance(json_node, dict)
for i in ['lineno', 'colno', 'end_lineno', 'end_colno']:
self.assertIn(i, json_node)
self.assertIsInstance(json_node[i], int)
self.assertIn('node', json_node)
n = json_node['node']
self.assertIsInstance(n, str)
self.assertIn(n, nodes)
if n not in node_counter:
node_counter[n] = 0
node_counter[n] = node_counter[n] + 1
for nodeDesc in nodes[n]:
key = nodeDesc[0]
func = nodeDesc[1]
self.assertIn(key, json_node)
if func is None:
tp = nodeDesc[2]
self.assertIsInstance(json_node[key], tp)
continue
func(json_node[key])
def accept_node_list(node_list):
self.assertIsInstance(node_list, list)
for i in node_list:
accept_node(i)
def accept_kwargs(kwargs):
self.assertIsInstance(kwargs, list)
for i in kwargs:
self.assertIn('key', i)
self.assertIn('val', i)
accept_node(i['key'])
accept_node(i['val'])
nodes = {
'BooleanNode': [('value', None, bool)],
'IdNode': [('value', None, str)],
'NumberNode': [('value', None, int)],
'StringNode': [('value', None, str)],
'ContinueNode': [],
'BreakNode': [],
'ArgumentNode': [('positional', accept_node_list), ('kwargs', accept_kwargs)],
'ArrayNode': [('args', accept_node)],
'DictNode': [('args', accept_node)],
'EmptyNode': [],
'OrNode': [('left', accept_node), ('right', accept_node)],
'AndNode': [('left', accept_node), ('right', accept_node)],
'ComparisonNode': [('left', accept_node), ('right', accept_node), ('ctype', None, str)],
'ArithmeticNode': [('left', accept_node), ('right', accept_node), ('op', None, str)],
'NotNode': [('right', accept_node)],
'CodeBlockNode': [('lines', accept_node_list)],
'IndexNode': [('object', accept_node), ('index', accept_node)],
'MethodNode': [('object', accept_node), ('args', accept_node), ('name', None, str)],
'FunctionNode': [('args', accept_node), ('name', None, str)],
'AssignmentNode': [('value', accept_node), ('var_name', None, str)],
'PlusAssignmentNode': [('value', accept_node), ('var_name', None, str)],
'ForeachClauseNode': [('items', accept_node), ('block', accept_node), ('varnames', None, list)],
'IfClauseNode': [('ifs', accept_node_list), ('else', accept_node)],
'IfNode': [('condition', accept_node), ('block', accept_node)],
'UMinusNode': [('right', accept_node)],
'TernaryNode': [('condition', accept_node), ('true', accept_node), ('false', accept_node)],
}
accept_node(res_nb)
for n, c in [('ContinueNode', 2), ('BreakNode', 1), ('NotNode', 3)]:
self.assertIn(n, node_counter)
self.assertEqual(node_counter[n], c)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '73 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string : bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean : False
Another boolean: True
Some string : Hello World
A list : string
1
True
empty list :
enabled_opt : enabled
A number : 1
yes : YES
no : NO
coma list : a, b, c
Stuff
missing prog : NO
existing prog : ''' + sys.executable + '''
missing dep : NO
internal dep : YES
Plugins
long coma list : alpha, alphacolor, apetag, audiofx, audioparsers, auparse,
autodetect, avi
Subprojects
sub : YES
sub2 : NO Problem encountered: This subproject failed
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
def test_meson_compile(self):
"""Test the meson compile command."""
def get_exe_name(basename: str) -> str:
if is_windows():
return '{}.exe'.format(basename)
else:
return basename
def get_shared_lib_name(basename: str) -> str:
if mesonbuild.environment.detect_msys2_arch():
return 'lib{}.dll'.format(basename)
elif is_windows():
return '{}.dll'.format(basename)
elif is_cygwin():
return 'cyg{}.dll'.format(basename)
elif is_osx():
return 'lib{}.dylib'.format(basename)
else:
return 'lib{}.so'.format(basename)
def get_static_lib_name(basename: str) -> str:
return 'lib{}.a'.format(basename)
# Base case (no targets or additional arguments)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
self._run([*self.meson_command, 'compile', '-C', self.builddir])
self.assertPathExists(os.path.join(self.builddir, get_exe_name('trivialprog')))
# `--clean`
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
# Target specified in a project with unique names
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--wipe'])
# Multiple targets and target type specified
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'mylib', 'mycpplib:shared_library'])
# Check that we have a shared lib, but not an executable, i.e. check that target actually worked
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mylib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('prog')))
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mycpplib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('cppprog')))
# Target specified in a project with non unique names
testdir = os.path.join(self.common_test_dir, '186 same target name')
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir, './foo'])
self.assertPathExists(os.path.join(self.builddir, get_static_lib_name('foo')))
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'sub/foo'])
self.assertPathExists(os.path.join(self.builddir, 'sub', get_static_lib_name('foo')))
# run_target
testdir = os.path.join(self.common_test_dir, '52 run target')
self.init(testdir, extra_args=['--wipe'])
out = self._run([*self.meson_command, 'compile', '-C', self.builddir, 'py3hi'])
self.assertIn('I am Python3.', out)
# `--$BACKEND-args`
testdir = os.path.join(self.common_test_dir, '1 trivial')
if self.backend is Backend.ninja:
self.init(testdir, extra_args=['--wipe'])
# Dry run - should not create a program
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--ninja-args=-n'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
elif self.backend is Backend.vs:
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir])
# Explicitly clean the target through msbuild interface
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--vs-args=-t:{}:Clean'.format(re.sub(r'[\%\$\@\;\.\(\)\']', '_', get_exe_name('trivialprog')))])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
def test_spurious_reconfigure_built_dep_file(self):
testdir = os.path.join(self.unit_test_dir, '75 dep files')
# Regression test: Spurious reconfigure was happening when build
# directory is inside source directory.
# See https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/85.
srcdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, srcdir)
builddir = os.path.join(srcdir, '_build')
self.change_builddir(builddir)
self.init(srcdir)
self.build()
# During first configure the file did not exist so no dependency should
# have been set. A rebuild should not trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
self.init(srcdir, extra_args=['--reconfigure'])
# During the reconfigure the file did exist, but is inside build
# directory, so no dependency should have been set. A rebuild should not
# trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
def _test_junit(self, case: str) -> None:
try:
import lxml.etree as et
except ImportError:
raise unittest.SkipTest('lxml required, but not found.')
schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd')))
self.init(case)
self.run_tests()
junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml'))
try:
schema.assertValid(junit)
except et.DocumentInvalid as e:
self.fail(e.error_log)
def test_junit_valid_tap(self):
self._test_junit(os.path.join(self.common_test_dir, '207 tap tests'))
def test_junit_valid_exitcode(self):
self._test_junit(os.path.join(self.common_test_dir, '42 test args'))
def test_junit_valid_gtest(self):
self._test_junit(os.path.join(self.framework_test_dir, '2 gtest'))
def test_link_language_linker(self):
# TODO: there should be some way to query how we're linking things
# without resorting to reading the ninja.build file
if self.backend is not Backend.ninja:
raise unittest.SkipTest('This test reads the ninja file')
testdir = os.path.join(self.common_test_dir, '226 link language')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
self.assertRegex(contents, r'build main(\.exe)?.*: c_LINKER')
self.assertRegex(contents, r'build (lib|cyg)?mylib.*: c_LINKER')
def test_commands_documented(self):
'''
Test that all listed meson commands are documented in Commands.md.
'''
# The docs directory is not in release tarballs.
if not os.path.isdir('docs'):
raise unittest.SkipTest('Doc directory does not exist.')
doc_path = 'docs/markdown/Commands.md'
md = None
with open(doc_path, encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
## Get command sections
section_pattern = re.compile(r'^### (.+)$', re.MULTILINE)
md_command_section_matches = [i for i in section_pattern.finditer(md)]
md_command_sections = dict()
for i, s in enumerate(md_command_section_matches):
section_end = len(md) if i == len(md_command_section_matches) - 1 else md_command_section_matches[i + 1].start()
md_command_sections[s.group(1)] = (s.start(), section_end)
## Validate commands
md_commands = set(k for k,v in md_command_sections.items())
help_output = self._run(self.meson_command + ['--help'])
help_commands = set(c.strip() for c in re.findall(r'usage:(?:.+)?{((?:[a-z]+,*)+?)}', help_output, re.MULTILINE|re.DOTALL)[0].split(','))
self.assertEqual(md_commands | {'help'}, help_commands, 'Doc file: `{}`'.format(doc_path))
## Validate that each section has proper placeholders
def get_data_pattern(command):
return re.compile(
r'{{ ' + command + r'_usage.inc }}[\r\n]'
r'.*?'
r'{{ ' + command + r'_arguments.inc }}[\r\n]',
flags = re.MULTILINE|re.DOTALL)
for command in md_commands:
m = get_data_pattern(command).search(md, pos=md_command_sections[command][0], endpos=md_command_sections[command][1])
self.assertIsNotNone(m, 'Command `{}` is missing placeholders for dynamic data. Doc file: `{}`'.format(command, doc_path))
def _check_coverage_files(self, types=('text', 'xml', 'html')):
covdir = Path(self.builddir) / 'meson-logs'
files = []
if 'text' in types:
files.append('coverage.txt')
if 'xml' in types:
files.append('coverage.xml')
if 'html' in types:
files.append('coveragereport/index.html')
for f in files:
self.assertTrue((covdir / f).is_file(), msg='{} is not a file'.format(f))
def test_coverage(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_complex(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '106 generatorcustom')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_html(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
self._check_coverage_files(['html'])
def test_coverage_text(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-text')
self._check_coverage_files(['text'])
def test_coverage_xml(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-xml')
self._check_coverage_files(['xml'])
def test_cross_file_constants(self):
with temp_filename() as crossfile1, temp_filename() as crossfile2:
with open(crossfile1, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
compiler = 'gcc'
'''))
with open(crossfile2, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
toolchain = '/toolchain/'
common_flags = ['--sysroot=' + toolchain / 'sysroot']
[properties]
c_args = common_flags + ['-DSOMETHING']
cpp_args = c_args + ['-DSOMETHING_ELSE']
[binaries]
c = toolchain / compiler
'''))
values = mesonbuild.coredata.parse_machine_files([crossfile1, crossfile2])
self.assertEqual(values['binaries']['c'], '/toolchain/gcc')
self.assertEqual(values['properties']['c_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING'])
self.assertEqual(values['properties']['cpp_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING', '-DSOMETHING_ELSE'])
@unittest.skipIf(is_windows(), 'Directory cleanup fails for some reason')
def test_wrap_git(self):
with tempfile.TemporaryDirectory() as tmpdir:
srcdir = os.path.join(tmpdir, 'src')
shutil.copytree(os.path.join(self.unit_test_dir, '82 wrap-git'), srcdir)
upstream = os.path.join(srcdir, 'subprojects', 'wrap_git_upstream')
upstream_uri = Path(upstream).as_uri()
_git_init(upstream)
with open(os.path.join(srcdir, 'subprojects', 'wrap_git.wrap'), 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = {}
patch_directory = wrap_git_builddef
revision = master
'''.format(upstream_uri)))
self.init(srcdir)
self.build()
self.run_tests()
def test_multi_output_custom_target_no_warning(self):
testdir = os.path.join(self.common_test_dir, '229 custom_target source')
out = self.init(testdir)
self.assertNotRegex(out, 'WARNING:.*Using the first one.')
self.build()
self.run_tests()
@unittest.skipUnless(is_linux() and (re.search('^i.86$|^x86$|^x64$|^x86_64$|^amd64$', platform.processor()) is not None),
'Requires ASM compiler for x86 or x86_64 platform currently only available on Linux CI runners')
def test_nostdlib(self):
testdir = os.path.join(self.unit_test_dir, '79 nostdlib')
machinefile = os.path.join(self.builddir, 'machine.txt')
with open(machinefile, 'w') as f:
f.write(textwrap.dedent('''
[properties]
c_stdlib = 'mylibc'
'''))
# Test native C stdlib
self.meson_native_file = machinefile
self.init(testdir)
self.build()
# Test cross C stdlib
self.new_builddir()
self.meson_native_file = None
self.meson_cross_file = machinefile
self.init(testdir)
self.build()
def test_meson_version_compare(self):
testdir = os.path.join(self.unit_test_dir, '83 meson version compare')
out = self.init(testdir)
self.assertNotRegex(out, r'WARNING')
def test_wrap_redirect(self):
redirect_wrap = os.path.join(self.builddir, 'redirect.wrap')
real_wrap = os.path.join(self.builddir, 'foo/subprojects/real.wrap')
os.makedirs(os.path.dirname(real_wrap))
# Invalid redirect, filename must have .wrap extension
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrapper
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be a .wrap file'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename cannot be in parent directory
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = ../real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename cannot contain ".."'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename must be in foo/subprojects/real.wrap
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be in the form foo/subprojects/bar.wrap'):
wrap = PackageDefinition(redirect_wrap)
# Correct redirect
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrap
'''))
with open(real_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = http://invalid
'''))
wrap = PackageDefinition(redirect_wrap)
self.assertEqual(wrap.get('url'), 'http://invalid')
@skip_if_no_cmake
def test_nested_cmake_rebuild(self) -> None:
# This checks a bug where if a non-meson project is used as a third
# level (or deeper) subproject it doesn't cause a rebuild if the build
# files for that project are changed
testdir = os.path.join(self.unit_test_dir, '86 nested subproject regenerate depends')
cmakefile = Path(testdir) / 'subprojects' / 'sub2' / 'CMakeLists.txt'
self.init(testdir)
self.build()
with cmakefile.open('a') as f:
os.utime(str(cmakefile))
self.assertReconfiguredBuildIsNoop()
def test_version_file(self):
srcdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(srcdir)
projinfo = self.introspect('--projectinfo')
self.assertEqual(projinfo['version'], '1.0.0')
def test_cflags_cppflags(self):
envs = {'CPPFLAGS': '-DCPPFLAG',
'CFLAGS': '-DCFLAG',
'CXXFLAGS': '-DCXXFLAG'}
srcdir = os.path.join(self.unit_test_dir, '90 multiple envvars')
self.init(srcdir, override_envvars=envs)
self.build()
def test_build_b_options(self) -> None:
# Currently (0.57) these do nothing, but they've always been allowed
srcdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(srcdir, extra_args=['-Dbuild.b_lto=true'])
def test_install_skip_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '91 install skip subprojects')
self.init(testdir)
self.build()
main_expected = [
'',
'share',
'include',
'foo',
'bin',
'share/foo',
'share/foo/foo.dat',
'include/foo.h',
'foo/foofile',
'bin/foo' + exe_suffix,
]
bar_expected = [
'bar',
'share/foo/bar.dat',
'include/bar.h',
'bin/bar' + exe_suffix,
'bar/barfile'
]
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() == 'msvc':
main_expected.append('bin/foo.pdb')
bar_expected.append('bin/bar.pdb')
prefix = destdir_join(self.installdir, self.prefix)
main_expected = [Path(prefix, p) for p in main_expected]
bar_expected = [Path(prefix, p) for p in bar_expected]
all_expected = main_expected + bar_expected
def check_installed_files(extra_args, expected):
args = ['install', '--destdir', self.installdir] + extra_args
self._run(self.meson_command + args, workdir=self.builddir)
all_files = [p for p in Path(self.installdir).rglob('*')]
self.assertEqual(sorted(expected), sorted(all_files))
windows_proof_rmtree(self.installdir)
check_installed_files([], all_expected)
check_installed_files(['--skip-subprojects'], main_expected)
check_installed_files(['--skip-subprojects', 'bar'], main_expected)
check_installed_files(['--skip-subprojects', 'another'], all_expected)
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(boost_root.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. A wrap file from a subproject is used but fails because it does not
contain required keys.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Neither a subproject directory nor a .*nosubproj.wrap.* file was found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'Dependency .*somenotfounddep.* from subproject .*subprojects/somesubproj.* found: .*NO.*')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
self.assertRegex(out, r'Missing key .*source_filename.* in subsubproject.wrap')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
def test_override_dependency_twice(self):
self.assertMesonRaises("meson.override_dependency('foo', declare_dependency())\n" +
"meson.override_dependency('foo', declare_dependency())",
"""Tried to override dependency 'foo' which has already been resolved or overridden""")
@unittest.skipIf(is_windows(), 'zlib is not available on Windows')
def test_override_resolved_dependency(self):
self.assertMesonRaises("dependency('zlib')\n" +
"meson.override_dependency('zlib', declare_dependency())",
"""Tried to override dependency 'zlib' which has already been resolved or overridden""")
def test_error_func(self):
self.assertMesonRaises("error('a', 'b', ['c', ['d', {'e': 'f'}]], 'g')",
"Problem encountered: a b \['c', \['d', {'e' : 'f'}\]\] g")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
@mock.patch.dict(os.environ)
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd.exe with args without searching
prog = ExternalProgram('cmd', command=['cmd', '/C'])
self.assertTrue(prog.found(), msg='cmd not found with args')
self.assertPathEqual(prog.get_command()[0], 'cmd')
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# If `.PY` is in PATHEXT, scripts can be found as programs
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
# Finding a script in PATH w/o extension works and adds the interpreter
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script with an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script-ext.py', command=[os.path.join(testdir, 'test-script-ext.py'), '--help'])
self.assertTrue(prog.found(), msg='test-script-ext.py with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script without an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script', command=[os.path.join(testdir, 'test-script'), '--help'])
self.assertTrue(prog.found(), msg='test-script with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username)
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
envvars = [mesonbuild.envconfig.ENV_VAR_PROG_MAP['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if f'{lang}_ld' in mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP:
envvars.append(
mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP[f'{lang}_ld'])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('optlink', 'c', 'optlink')
@skip_if_not_language('rust')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
@skip_if_not_language('d')
def test_link_environment_variable_d(self):
env = get_fake_env()
comp = getattr(env, 'detect_d_compiler')(MachineChoice.HOST)
if comp.id == 'dmd':
raise unittest.SkipTest('meson cannot reliably make DMD use a different linker.')
self._check_ld('lld-link', 'd', 'lld-link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--buildtype=release'])
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id)
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
def test_qt5dependency_vscrt(self):
'''
Test that qt5 dependencies use the debug module suffix when b_vscrt is
set to 'mdd'
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if OptionKey('b_vscrt') not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake') and not is_ci():
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output and not is_ci():
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Setup with /MDd
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
# Verify that we're linking to the debug versions of Qt DLLs
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build qt5core.exe: cpp_LINKER.*Qt5Cored.lib', contents)
self.assertIsNotNone(m, msg=contents)
def test_compiler_checks_vscrt(self):
'''
Test that the correct VS CRT is used when running compiler checks
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if OptionKey('b_vscrt') not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
def sanitycheck_vscrt(vscrt):
checks = self.get_meson_log_sanitychecks()
self.assertTrue(len(checks) > 0)
for check in checks:
self.assertIn(vscrt, check)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=release'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=md'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mt'])
sanitycheck_vscrt('/MT')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mtd'])
sanitycheck_vscrt('/MTd')
def test_modules(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('C++ modules only work with the Ninja backend (not {}).'.format(self.backend.name))
if 'VSCMD_VER' not in os.environ:
raise unittest.SkipTest('C++ modules is only supported with Visual Studio.')
if version_compare(os.environ['VSCMD_VER'], '<16.9.0'):
raise unittest.SkipTest('C++ modules are only supported with VS 2019 Preview or newer.')
self.init(os.path.join(self.unit_test_dir, '87 cpp modules'))
self.build()
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '149 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
def test_removing_unused_linker_args(self):
testdir = os.path.join(self.common_test_dir, '105 has arg')
env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic -framework Foundation'}
self.init(testdir, override_envvars=env)
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
@mock.patch.dict(os.environ)
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs)
self.assertTrue(libhello_nolib.found())
self.assertEqual(libhello_nolib.get_link_args(), [])
self.assertEqual(libhello_nolib.get_compile_args(), [])
self.assertEqual(libhello_nolib.get_pkgconfig_variable('foo', {}), 'bar')
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
# See common/45 pkgconfig-gen/meson.build for description of the case this test
with open(os.path.join(privatedir1, 'simple2.pc')) as f:
content = f.read()
self.assertIn('Libs: -L${libdir} -lsimple2 -lsimple1', content)
self.assertIn('Libs.private: -lz', content)
with open(os.path.join(privatedir1, 'simple3.pc')) as f:
content = f.read()
self.assertEqual(1, content.count('-lsimple3'))
with open(os.path.join(privatedir1, 'simple5.pc')) as f:
content = f.read()
self.assertNotIn('-lstat2', content)
@mock.patch.dict(os.environ)
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def test_qt6dependency_qmake_detection(self):
'''
Test that qt6 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt6'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 6' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 6.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt6
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt6 \(modules: Core\) found: YES .* \((qmake|qmake-qt6)\)\n')
def glob_sofiles_without_privdir(self, g):
files = glob(g)
return [f for f in files if not f.endswith('.p')]
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(self.glob_sofiles_without_privdir(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(self.glob_sofiles_without_privdir(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(self.glob_sofiles_without_privdir(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '37 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '37 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir: str, compiler: 'Compiler') -> None:
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_cpp20 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=10.0.0', None) or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=10.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
key = OptionKey('std', lang=compiler.language)
for v in compiler.get_options()[key].choices:
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
elif '++20' in v and not has_cpp20:
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
self.init(testdir, extra_args=[f'-D{key!s}={v}'])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print(f'{key!s} was {v!r}')
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if compiler.language == 'c':
env_flag_name = 'CFLAGS'
elif compiler.language == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(compiler.language))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc)
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp)
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '43 subproject')
self.init(testdir, extra_args='--unity=subprojects')
pdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/simpletest*.p'))
self.assertEqual(len(pdirs), 1)
self.assertPathExists(os.path.join(pdirs[0], 'simpletest-unity0.c'))
sdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/*sublib*.p'))
self.assertEqual(len(sdirs), 1)
self.assertPathExists(os.path.join(sdirs[0], 'sublib-unity0.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '60 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '191 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_introspect_installed(self):
testdir = os.path.join(self.linuxlike_test_dir, '7 library versions')
self.init(testdir)
install = self.introspect('--installed')
install = {os.path.basename(k): v for k, v in install.items()}
print(install)
if is_osx():
the_truth = {
'libmodule.dylib': '/usr/lib/libmodule.dylib',
'libnoversion.dylib': '/usr/lib/libnoversion.dylib',
'libonlysoversion.5.dylib': '/usr/lib/libonlysoversion.5.dylib',
'libonlysoversion.dylib': '/usr/lib/libonlysoversion.dylib',
'libonlyversion.1.dylib': '/usr/lib/libonlyversion.1.dylib',
'libonlyversion.dylib': '/usr/lib/libonlyversion.dylib',
'libsome.0.dylib': '/usr/lib/libsome.0.dylib',
'libsome.dylib': '/usr/lib/libsome.dylib',
}
the_truth_2 = {'/usr/lib/libsome.dylib',
'/usr/lib/libsome.0.dylib',
}
else:
the_truth = {
'libmodule.so': '/usr/lib/libmodule.so',
'libnoversion.so': '/usr/lib/libnoversion.so',
'libonlysoversion.so': '/usr/lib/libonlysoversion.so',
'libonlysoversion.so.5': '/usr/lib/libonlysoversion.so.5',
'libonlyversion.so': '/usr/lib/libonlyversion.so',
'libonlyversion.so.1': '/usr/lib/libonlyversion.so.1',
'libonlyversion.so.1.4.5': '/usr/lib/libonlyversion.so.1.4.5',
'libsome.so': '/usr/lib/libsome.so',
'libsome.so.0': '/usr/lib/libsome.so.0',
'libsome.so.1.2.3': '/usr/lib/libsome.so.1.2.3',
}
the_truth_2 = {'/usr/lib/libsome.so',
'/usr/lib/libsome.so.0',
'/usr/lib/libsome.so.1.2.3'}
self.assertDictEqual(install, the_truth)
targets = self.introspect('--targets')
for t in targets:
if t['name'] != 'some':
continue
self.assertSetEqual(the_truth_2, set(t['install_filename']))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skipIfNoPkgconfig
def test_build_rpath_pkgconfig(self):
'''
Test that current build artefacts (libs) are found first on the rpath,
manually specified rpath comes second and additional rpath elements (from
pkg-config files) come last
'''
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '90 pkgconfig build rpath order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar:/foo/dummy')
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar:/foo/dummy')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz:/foo/dummy')
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz:/foo/dummy')
def test_global_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
if is_osx():
raise unittest.SkipTest('Global RPATHs via LDFLAGS not yet supported on MacOS (does anybody need it?)')
testdir = os.path.join(self.unit_test_dir, '81 global-rpath')
oldinstalldir = self.installdir
# Build and install an external library without DESTDIR.
# The external library generates a .pc file without an rpath.
yonder_dir = os.path.join(testdir, 'yonder')
yonder_prefix = os.path.join(oldinstalldir, 'yonder')
yonder_libdir = os.path.join(yonder_prefix, self.libdir)
self.prefix = yonder_prefix
self.installdir = yonder_prefix
self.init(yonder_dir)
self.build()
self.install(use_destdir=False)
# Since rpath has multiple valid formats we need to
# test that they are all properly used.
rpath_formats = [
('-Wl,-rpath=', False),
('-Wl,-rpath,', False),
('-Wl,--just-symbols=', True),
('-Wl,--just-symbols,', True),
('-Wl,-R', False),
('-Wl,-R,', False)
]
for rpath_format, exception in rpath_formats:
# Build an app that uses that installed library.
# Supply the rpath to the installed library via LDFLAGS
# (as systems like buildroot and guix are wont to do)
# and verify install preserves that rpath.
self.new_builddir()
env = {'LDFLAGS': rpath_format + yonder_libdir,
'PKG_CONFIG_PATH': os.path.join(yonder_libdir, 'pkgconfig')}
if exception:
with self.assertRaises(subprocess.CalledProcessError):
self.init(testdir, override_envvars=env)
continue
self.init(testdir, override_envvars=env)
self.build()
self.install(use_destdir=False)
got_rpath = get_rpath(os.path.join(yonder_prefix, 'bin/rpathified'))
self.assertEqual(got_rpath, yonder_libdir, rpath_format)
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
tool_path = os.path.join(testdir, 'some_cross_tool.py')
crossfile.write(textwrap.dedent(f'''\
[binaries]
c = '{shutil.which('gcc' if is_sunos() else 'cc')}'
ar = '{shutil.which('ar')}'
strip = '{shutil.which('strip')}'
sometool.py = ['{tool_path}']
someothertool.py = '{tool_path}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
'''))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({OptionKey('pkg_config_path'): pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.src_root, 'test cases', 'native', '9 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not (is_osx() or is_linux()):
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
if not is_osx():
# Rest of the workflow only works on macOS
return
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
@skipIfNoPkgconfig
def test_usage_pkgconfig_prefixes(self):
'''
Build and install two external libraries, to different prefixes,
then build and install a client program that finds them via pkgconfig,
and verify the installed client program runs.
'''
oldinstalldir = self.installdir
# Build and install both external libraries without DESTDIR
val1dir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'val1')
val1prefix = os.path.join(oldinstalldir, 'val1')
self.prefix = val1prefix
self.installdir = val1prefix
self.init(val1dir)
self.build()
self.install(use_destdir=False)
self.new_builddir()
env1 = {}
env1['PKG_CONFIG_PATH'] = os.path.join(val1prefix, self.libdir, 'pkgconfig')
val2dir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'val2')
val2prefix = os.path.join(oldinstalldir, 'val2')
self.prefix = val2prefix
self.installdir = val2prefix
self.init(val2dir, override_envvars=env1)
self.build()
self.install(use_destdir=False)
self.new_builddir()
# Build, install, and run the client program
env2 = {}
env2['PKG_CONFIG_PATH'] = os.path.join(val2prefix, self.libdir, 'pkgconfig')
testdir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'client')
testprefix = os.path.join(oldinstalldir, 'client')
self.prefix = testprefix
self.installdir = testprefix
self.init(testdir, override_envvars=env2)
self.build()
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'client')
env3 = {}
if is_cygwin():
env3['PATH'] = os.path.join(val1prefix, 'bin') + \
os.pathsep + \
os.path.join(val2prefix, 'bin') + \
os.pathsep + os.environ['PATH']
out = self._run([prog], override_envvars=env3).strip()
# Expected output is val1 + val2 = 3
self.assertEqual(out, '3')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('60 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('191 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('191 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
linker = cc.linker
if not linker.export_dynamic_args(env):
raise unittest.SkipTest('Not applicable for linkers without --export-dynamic')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
nativefile = tempfile.NamedTemporaryFile(mode='w')
nativefile.write(textwrap.dedent('''\
[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'build_wrapper.py'))))
nativefile.flush()
self.meson_native_file = nativefile.name
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write(textwrap.dedent('''\
[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir)
def test_identity_cross_env(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
env = {
'CC_FOR_BUILD': '"' + os.path.join(testdir, 'build_wrapper.py') + '"',
}
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write(textwrap.dedent('''\
[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
envvars = [mesonbuild.envconfig.ENV_VAR_PROG_MAP['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if f'{lang}_ld' in mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP:
envvars.append(
mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP[f'{lang}_ld'])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if isinstance(comp, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler,
mesonbuild.compilers.AppleClangObjCCompiler,
mesonbuild.compilers.AppleClangObjCPPCompiler)):
raise unittest.SkipTest('AppleClang is currently only supported with ld64')
if lang != 'rust' and comp.use_linker_args('bfd') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'ld.lld')
@skip_if_not_language('rust')
@skipIfNoExecutable('ld.gold') # need an additional check here because _check_ld checks for gcc
def test_ld_environment_variable_rust(self):
self._check_ld('gcc', 'gcc -fuse-ld=gold', 'rust', 'ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold')
@skip_if_not_language('objc')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold')
@skip_if_not_language('objcpp')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold')
@skip_if_not_language('fortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold')
@skip_if_not_language('d')
def test_ld_environment_variable_d(self):
# At least for me, ldc defaults to gold, and gdc defaults to bfd, so
# let's pick lld, which isn't the default for either (currently)
self._check_ld('ld.lld', 'lld', 'd', 'ld.lld')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '74 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = http://server.invalid/foo
source_fallback_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = http://server.invalid/foo
patch_fallback_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def test_no_rpath_for_static(self):
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertIsNone(build_rpath)
def test_lookup_system_after_broken_fallback(self):
# Just to generate libfoo.pc so we can test system dependency lookup.
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
privatedir = self.privatedir
# Write test project where the first dependency() returns not-found
# because 'broken' subproject does not exit, but that should not prevent
# the 2nd dependency() to lookup on system.
self.new_builddir()
with tempfile.TemporaryDirectory() as d:
with open(os.path.join(d, 'meson.build'), 'w') as f:
f.write(textwrap.dedent('''\
project('test')
dependency('notfound', fallback: 'broken', required: false)
dependency('libfoo', fallback: 'broken', required: true)
'''))
self.init(d, override_envvars={'PKG_CONFIG_LIBDIR': privatedir})
def test_as_link_whole(self):
testdir = os.path.join(self.unit_test_dir, '78 as link whole')
self.init(testdir)
with open(os.path.join(self.privatedir, 'bar1.pc')) as f:
content = f.read()
self.assertIn('-lfoo', content)
with open(os.path.join(self.privatedir, 'bar2.pc')) as f:
content = f.read()
self.assertNotIn('-lfoo', content)
def test_prelinking(self):
# Prelinking currently only works on recently new GNU toolchains.
# Skip everything else. When support for other toolchains is added,
# remove limitations as necessary.
if is_osx():
raise unittest.SkipTest('Prelinking not supported on Darwin.')
if 'clang' in os.environ.get('CC', 'dummy'):
raise unittest.SkipTest('Prelinking not supported with Clang.')
gccver = subprocess.check_output(['cc', '--version'])
if b'7.5.0' in gccver:
raise unittest.SkipTest('GCC on Bionic is too old to be supported.')
testdir = os.path.join(self.unit_test_dir, '88 prelinking')
self.init(testdir)
self.build()
outlib = os.path.join(self.builddir, 'libprelinked.a')
ar = shutil.which('ar')
self.assertTrue(os.path.exists(outlib))
self.assertTrue(ar is not None)
p = subprocess.run([ar, 't', outlib],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True, timeout=1)
obj_files = p.stdout.strip().split('\n')
self.assertEqual(len(obj_files), 1)
self.assertTrue(obj_files[0].endswith('-prelink.o'))
class BaseLinuxCrossTests(BasePlatformTests):
# Don't pass --libdir when cross-compiling. We have tests that
# check whether meson auto-detects it correctly.
libdir = None
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_cross_libdir_subproject(self):
# Guard against a regression where calling "subproject"
# would reset the value of libdir to its default value.
testdir = os.path.join(self.unit_test_dir, '77 subdir libdir')
self.init(testdir, extra_args=['--libdir=fuf'])
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'fuf')
return
self.assertTrue(False, 'Libdir specified on command line gets reset.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def test_run_native_test(self):
'''
https://github.com/mesonbuild/meson/issues/7997
check run native test in crossbuild without exe wrapper
'''
testdir = os.path.join(self.unit_test_dir, '89 run native test')
stamp_file = os.path.join(self.builddir, 'native_test_has_run.stamp')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(stamp_file)
self.run_tests()
self.assertPathExists(stamp_file)
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
error_message = "An exe_wrapper is needed but was not found. Please define one in cross file and check the command and/or add it to PATH."
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
self.assertEqual(str(cm.exception), error_message)
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.assertEqual(str(cm.exception), error_message)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
self.assertEqual(str(cm.exception),
"The exe_wrapper defined in the cross file 'broken' was not found. Please check the command and/or add it to PATH.")
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD', 'Boost']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': 'debug=true'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
if isinstance(v, (bool, int, float)):
f.write("{}={}\n".format(k, v))
elif isinstance(v, list):
f.write("{}=[{}]\n".format(k, ', '.join(["'{}'".format(w) for w in v])))
else:
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary, entry=None):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
elif is_osx():
binary = 'python'
else:
binary = 'python2'
# We not have python2, check for it
for v in ['2', '2.7', '-2.7']:
rc = subprocess.call(['pkg-config', '--cflags', 'python{}'.format(v)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc == 0:
break
else:
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', binary, entry='python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang: str, binary: str, version_str: str, version: str) -> None:
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = [wrapper]
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = [wrapper]
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overridden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
def test_user_options(self):
testcase = os.path.join(self.common_test_dir, '41 options')
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0),
('CaseSenSiTivE', 'SOME other Value'),
('CASESENSITIVE', 'some other Value')]:
config = self.helper_create_native_file({'project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_user_options_command_line_overrides(self):
testcase = os.path.join(self.common_test_dir, '41 options')
config = self.helper_create_native_file({'project options': {'other_one': True}})
self.init(testcase, extra_args=['--native-file', config, '-Dother_one=false'])
def test_user_options_subproject(self):
testcase = os.path.join(self.unit_test_dir, '80 user options for subproject')
s = os.path.join(testcase, 'subprojects')
if not os.path.exists(s):
os.mkdir(s)
s = os.path.join(s, 'sub')
if not os.path.exists(s):
sub = os.path.join(self.common_test_dir, '41 options')
shutil.copytree(sub, s)
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0)]:
config = self.helper_create_native_file({'sub:project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_option_bool(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'werror': True}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'werror' in each['name']:
self.assertEqual(each['value'], True)
break
else:
self.fail('Did not find werror in build options?')
def test_option_integer(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'unity_size': 100}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'unity_size' in each['name']:
self.assertEqual(each['value'], 100)
break
else:
self.fail('Did not find unity_size in build options?')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('Did not find werror in build options?')
def test_builtin_options_conf_overrides_env(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'pkg_config_path': '/foo'}})
self.init(testcase, extra_args=['--native-file', config], override_envvars={'PKG_CONFIG_PATH': '/bar'})
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/foo'])
break
else:
self.fail('Did not find pkg_config_path in build options?')
def test_builtin_options_subprojects(self):
testcase = os.path.join(self.common_test_dir, '99 subproject subdir')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both', 'c_args': ['-Dfoo']}, 'sub:built-in options': {'default_library': 'static'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'c_args' in each['name']:
# This path will be hit twice, once for build and once for host,
self.assertEqual(each['value'], ['-Dfoo'])
found += 1
elif each['name'] == 'default_library':
self.assertEqual(each['value'], 'both')
found += 1
elif each['name'] == 'sub:default_library':
self.assertEqual(each['value'], 'static')
found += 1
self.assertEqual(found, 4, 'Did not find all three sections')
def test_builtin_options_subprojects_overrides_buildfiles(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '224 persubproject options')
config = self.helper_create_native_file({'sub2:built-in options': {'default_library': 'shared'}})
with self.assertRaises((RuntimeError, subprocess.CalledProcessError)) as cm:
self.init(testcase, extra_args=['--native-file', config])
if isinstance(cm, RuntimeError):
check = str(cm.exception)
else:
check = cm.exception.stdout
self.assertIn(check, 'Parent should override default_library')
def test_builtin_options_subprojects_dont_inherits_parent_override(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '224 persubproject options')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both'}})
self.init(testcase, extra_args=['--native-file', config])
def test_builtin_options_compiler_properties(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'c_args': ['-DFOO']},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DFOO'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_compiler_properties_legacy(self):
# The legacy placement in properties is still valid if a 'built-in
# options' setting is present, but doesn't have the lang_args
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DBAR'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_paths(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'bindir': 'foo'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'foo')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def setUp(self):
super().setUp()
self.current_config = 0
self.current_wrapper = 0
def _cross_file_generator(self, *, needs_exe_wrapper: bool = False,
exe_wrapper: T.Optional[T.List[str]] = None) -> str:
if is_windows():
raise unittest.SkipTest('Cannot run this test on non-mingw/non-cygwin windows')
return textwrap.dedent(f"""\
[binaries]
c = '{shutil.which('gcc' if is_sunos() else 'cc')}'
ar = '{shutil.which('ar')}'
strip = '{shutil.which('strip')}'
exe_wrapper = {str(exe_wrapper) if exe_wrapper is not None else '[]'}
[properties]
needs_exe_wrapper = {needs_exe_wrapper}
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""")
def _stub_exe_wrapper(self) -> str:
return textwrap.dedent('''\
#!/usr/bin/env python3
import subprocess
import sys
sys.exit(subprocess.run(sys.argv[1:]).returncode)
''')
def test_needs_exe_wrapper_true(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
def test_needs_exe_wrapper_false(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=False))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertNotRegex(out, r'Skipped:\s*1\n')
def test_needs_exe_wrapper_true_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
s = Path(d) / 'wrapper.py'
with s.open('wt') as f:
f.write(self._stub_exe_wrapper())
s.chmod(0o774)
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(
needs_exe_wrapper=True,
exe_wrapper=[str(s)]))
self.init(testdir, extra_args=['--cross-file=' + str(p), '-Dexpect=true'])
out = self.run_target('test')
self.assertRegex(out, r'Ok:\s*3\s*\n')
def test_cross_exe_passed_no_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
self.build()
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
# The test uses mocking and thus requires that the current process is the
# one to run the Meson steps. If we are using an external test executable
# (most commonly in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = self._cross_file_generator()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def helper_create_cross_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overridden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_user_options(self):
# This is just a touch test for cross file, since the implementation
# shares code after loading from the files
testcase = os.path.join(self.common_test_dir, '41 options')
config = self.helper_create_cross_file({'project options': {'testoption': 'some other value'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--cross-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--cross-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('No c++ standard set?')
def test_builtin_options_per_machine(self):
"""Test options that are allowed to be set on a per-machine basis.
Such options could be passed twice, once for the build machine, and
once for the host machine. I've picked pkg-config path, but any would
do that can be set for both.
"""
testcase = os.path.join(self.common_test_dir, '2 cpp')
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross/path', 'cpp_std': 'c++17'}})
native = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native/path', 'cpp_std': 'c++14'}})
# Ensure that PKG_CONFIG_PATH is not set in the environment
with mock.patch.dict('os.environ'):
for k in ['PKG_CONFIG_PATH', 'PKG_CONFIG_PATH_FOR_BUILD']:
try:
del os.environ[k]
except KeyError:
pass
self.init(testcase, extra_args=['--cross-file', cross, '--native-file', native])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross/path'])
found += 1
elif each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++17')
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native/path'])
found += 1
elif each['name'] == 'build.cpp_std':
self.assertEqual(each['value'], 'c++14')
found += 1
if found == 4:
break
self.assertEqual(found, 4, 'Did not find all sections.')
def test_builtin_options_conf_overrides_env(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native'}})
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross'}})
self.init(testcase, extra_args=['--native-file', config, '--cross-file', cross],
override_envvars={'PKG_CONFIG_PATH': '/bar', 'PKG_CONFIG_PATH_FOR_BUILD': '/dir'})
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross'])
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native'])
found += 1
if found == 2:
break
self.assertEqual(found, 2, 'Did not find all sections.')
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser()
return iter(parser.parse(io.StringIO(s)))
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, num_tests=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, num_tests=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, num_tests=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, num_tests=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, num_tests=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, num_tests=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, num_tests=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, num_tests=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
class SubprojectsCommandTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.root_dir = Path(self.builddir)
self.project_dir = self.root_dir / 'src'
self._create_project(self.project_dir)
self.subprojects_dir = self.project_dir / 'subprojects'
os.makedirs(str(self.subprojects_dir))
def _create_project(self, path, project_name='dummy'):
os.makedirs(str(path), exist_ok=True)
with open(str(path / 'meson.build'), 'w') as f:
f.write("project('{}')".format(project_name))
def _git(self, cmd, workdir):
return git(cmd, str(workdir), check=True)[1].strip()
def _git_config(self, workdir):
self._git(['config', 'user.name', 'Meson Test'], workdir)
self._git(['config', 'user.email', 'meson.test@example.com'], workdir)
def _git_remote(self, cmd, name):
return self._git(cmd, self.root_dir / name)
def _git_local(self, cmd, name):
return self._git(cmd, self.subprojects_dir / name)
def _git_local_branch(self, name):
# Same as `git branch --show-current` but compatible with older git version
branch = self._git_local(['rev-parse', '--abbrev-ref', 'HEAD'], name)
return branch if branch != 'HEAD' else ''
def _git_local_commit(self, name, ref='HEAD'):
return self._git_local(['rev-parse', ref], name)
def _git_remote_commit(self, name, ref='HEAD'):
return self._git_remote(['rev-parse', ref], name)
def _git_create_repo(self, path):
# If a user has git configuration init.defaultBranch set we want to override that
with tempfile.TemporaryDirectory() as d:
out = git(['--version'], str(d))[1]
if version_compare(mesonbuild.environment.search_version(out), '>= 2.28'):
extra_cmd = ['--initial-branch', 'master']
else:
extra_cmd = []
self._create_project(path)
self._git(['init'] + extra_cmd, path)
self._git_config(path)
self._git(['add', '.'], path)
self._git(['commit', '-m', 'Initial commit'], path)
def _git_create_remote_repo(self, name):
self._git_create_repo(self.root_dir / name)
def _git_create_local_repo(self, name):
self._git_create_repo(self.subprojects_dir / name)
def _git_create_remote_commit(self, name, branch):
self._git_remote(['checkout', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name)
def _git_create_remote_branch(self, name, branch):
self._git_remote(['checkout', '-b', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name)
def _git_create_remote_tag(self, name, tag):
self._git_remote(['commit', '--allow-empty', '-m', 'tag {} commit'.format(tag)], name)
self._git_remote(['tag', tag], name)
def _wrap_create_git(self, name, revision='master'):
path = self.root_dir / name
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-git]
url={}
revision={}
'''.format(os.path.abspath(str(path)), revision)))
def _wrap_create_file(self, name, tarball='dummy.tar.gz'):
path = self.root_dir / tarball
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-file]
source_url={}
'''.format(os.path.abspath(str(path)))))
def _subprojects_cmd(self, args):
return self._run(self.meson_command + ['subprojects'] + args, workdir=str(self.project_dir))
def test_git_update(self):
subp_name = 'sub1'
# Create a fake remote git repository and a wrap file. Checks that
# "meson subprojects download" works.
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
self._subprojects_cmd(['download'])
self.assertPathExists(str(self.subprojects_dir / subp_name))
self._git_config(self.subprojects_dir / subp_name)
# Create a new remote branch and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new branch.
self._git_create_remote_branch(subp_name, 'newbranch')
self._wrap_create_git(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch. Checks the new commit is pulled into existing
# local newbranch. Make sure it does not print spurious 'git stash' message.
self._git_create_remote_commit(subp_name, 'newbranch')
out = self._subprojects_cmd(['update', '--reset'])
self.assertNotIn('No local changes to save', out)
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch and switch to another branch. Checks that it
# switch current branch to newbranch and pull latest commit.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Stage some local changes then update. Checks that local changes got
# stashed.
self._create_project(self.subprojects_dir / subp_name, 'new_project_name')
self._git_local(['add', '.'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
self.assertTrue(self._git_local(['stash', 'list'], subp_name))
# Create a new remote tag and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new tag in detached mode.
self._git_create_remote_tag(subp_name, 'newtag')
self._wrap_create_git(subp_name, 'newtag')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newtag'))
# Create a new remote commit and update the wrap file with the commit id.
# Checks that "meson subprojects update --reset" checkout the new commit
# in detached mode.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
new_commit = self._git_remote(['rev-parse', 'HEAD'], subp_name)
self._wrap_create_git(subp_name, new_commit)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), new_commit)
# Create a local project not in a git repository, then update it with
# a git wrap. Without --reset it should print error message and return
# failure. With --reset it should delete existing project and clone the
# new project.
subp_name = 'sub2'
self._create_project(self.subprojects_dir / subp_name)
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self._subprojects_cmd(['update'])
self.assertIn('Not a git repository', cm.exception.output)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name))
@skipIfNoExecutable('true')
def test_foreach(self):
self._create_project(self.subprojects_dir / 'sub_file')
self._wrap_create_file('sub_file')
self._git_create_local_repo('sub_git')
self._wrap_create_git('sub_git')
self._git_create_local_repo('sub_git_no_wrap')
def ran_in(s):
ret = []
prefix = 'Executing command in '
for l in s.splitlines():
if l.startswith(prefix):
ret.append(l[len(prefix):])
return sorted(ret)
dummy_cmd = ['true']
out = self._subprojects_cmd(['foreach'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git', 'subprojects/sub_git_no_wrap']))
out = self._subprojects_cmd(['foreach', '--types', 'git,file'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git']))
out = self._subprojects_cmd(['foreach', '--types', 'file'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_file'])
out = self._subprojects_cmd(['foreach', '--types', 'git'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_git'])
def _clang_at_least(compiler: 'Compiler', minver: str, apple_minver: T.Optional[str]) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
if apple_minver is None:
return False
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.CFLAGS_MAPPING.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
if arg in ('-f', '--failfast'):
arg = '--exitfirst'
pytest_args.append(arg)
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def running_single_tests(argv, cases):
'''
Check whether we only got arguments for running individual tests, not
entire testcases, and not all testcases (no test args).
'''
got_test_arg = False
for arg in argv:
if arg.startswith('-'):
continue
for case in cases:
if not arg.startswith(case):
continue
if '.' not in arg:
# Got a testcase, done
return False
got_test_arg = True
return got_test_arg
def main():
unset_envs()
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests', 'SubprojectsCommandTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = []
# Don't use pytest-xdist when running single unit tests since it wastes
# time spawning a lot of processes to distribute tests to in that case.
if not running_single_tests(sys.argv, cases):
pytest_args += ['-n', 'auto']
pytest_args += ['./run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# Fallback to plain unittest.
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
print('Meson build system', mesonbuild.coredata.version, 'Unit Tests')
start = time.monotonic()
try:
raise SystemExit(main())
finally:
print('Total time: {:.3f} seconds'.format(time.monotonic() - start))
| #!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import zipfile, tarfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import typing as T
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.interpreterbase import typed_pos_args, InvalidArguments
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg, relpath, is_linux, git
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException, OptionKey
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target, ConfigurationData
import mesonbuild.modules.pkgconfig
from mesonbuild.scripts import destdir_join
from mesonbuild.mtest import TAPParser, TestResult
from mesonbuild.wrap.wrap import PackageDefinition, WrapException
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
if T.TYPE_CHECKING:
from mesonbuild.compilers import Compiler
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
def get_dynamic_section_entry(fname: str, entry: str) -> T.Optional[str]:
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return str(m.group(1))
return None # The file did not contain the specified entry.
def get_soname(fname: str) -> T.Optional[str]:
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname: str) -> T.Optional[str]:
raw = get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
# Get both '' and None here
if not raw:
return None
# nix/nixos adds a bunch of stuff to the rpath out of necessity that we
# don't check for, so clear those
final = ':'.join([e for e in raw.split(':') if not e.startswith('/nix')])
return final
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def _git_init(project_dir):
# If a user has git configuration init.defaultBranch set we want to override that
with tempfile.TemporaryDirectory() as d:
out = git(['--version'], str(d))[1]
if version_compare(mesonbuild.environment.search_version(out), '>= 2.28'):
extra_cmd = ['--initial-branch', 'master']
else:
extra_cmd = []
subprocess.check_call(['git', 'init'] + extra_cmd, cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
_git_add_all(project_dir)
def _git_add_all(project_dir):
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
key = OptionKey(feature)
if key not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10'), '2016.10')
self.assertEqual(searchfunc('2016.10 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('oops v1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.oops 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.x'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class_none_flush(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
a = cc.compiler_args(['-I.'])
#first we are checking if the tree construction deduplicates the correct -I argument
a += ['-I..']
a += ['-I./tests/']
a += ['-I./tests2/']
#think this here as assertion, we cannot apply it, otherwise the CompilerArgs would already flush the changes:
# assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..', '-I.'])
a += ['-I.']
a += ['-I.', '-I./tests/']
self.assertEqual(a, ['-I.', '-I./tests/', '-I./tests2/', '-I..'])
#then we are checking that when CompilerArgs already have a build container list, that the deduplication is taking the correct one
a += ['-I.', '-I./tests2/']
self.assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..'])
def test_compiler_args_class_d(self):
d = mesonbuild.compilers.DmdDCompiler([], 'fake', MachineChoice.HOST, 'info', 'arch')
# check include order is kept when deduplicating
a = d.compiler_args(['-Ifirst', '-Isecond', '-Ithird'])
a += ['-Ifirst']
self.assertEqual(a, ['-Ifirst', '-Isecond', '-Ithird'])
def test_compiler_args_class_clike(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
# Test that empty initialization works
a = cc.compiler_args()
self.assertEqual(a, [])
# Test that list initialization works
a = cc.compiler_args(['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cc.compiler_args(['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
def test_unholder(self):
unholder = mesonbuild.mesonlib.unholder
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
holders = [holder1, holder3]
self.assertEqual(1, unholder(holder1))
self.assertEqual([1], unholder([holder1]))
self.assertEqual([1, 3], unholder(holders))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
# flatten nested lists
kwargs = {'sources': [1, [2, [3]]]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
def test_pkgconfig_module(self):
dummystate = mock.Mock()
dummystate.subproject = 'dummy'
_mock = mock.Mock(spec=mesonbuild.dependencies.ExternalDependency)
_mock.pcdep = mock.Mock()
_mock.pcdep.name = "some_name"
_mock.version_reqs = []
_mock = mock.Mock(held_object=_mock)
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc._find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
@skipIfNoPkgconfig
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.options[OptionKey('link_args', lang='c')] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[-1] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[-1] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[-1] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(sorted(deps), sorted(expdeps))
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
env.scratch_dir = tmpdir
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
def test_validate_json(self) -> None:
"""Validate the json schema for the test cases."""
try:
from jsonschema import validate, ValidationError
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('Python jsonschema module not found.')
with Path('data/test.schema.json').open() as f:
schema = json.load(f)
errors = [] # type: T.Tuple[str, Exception]
for p in Path('test cases').glob('**/test.json'):
with p.open() as f:
try:
validate(json.load(f), schema=schema)
except ValidationError as e:
errors.append((p.resolve(), e))
for f, e in errors:
print('Failed to validate: "{}"'.format(f))
print(str(e))
self.assertFalse(errors)
def test_typed_pos_args_types(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], int)
self.assertIsInstance(args[2], bool)
_(None, mock.Mock(), ['string', 1, False], None)
def test_typed_pos_args_types_invalid(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1.0, False], None)
self.assertEqual(str(cm.exception), 'foo argument 2 was of type "float" but should have been "int"')
def test_typed_pos_args_types_wrong_number(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1], None)
self.assertEqual(str(cm.exception), 'foo takes exactly 3 arguments, but got 2.')
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1, True, True], None)
self.assertEqual(str(cm.exception), 'foo takes exactly 3 arguments, but got 4.')
def test_typed_pos_args_varargs(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertIsInstance(args[1][0], str)
self.assertIsInstance(args[1][1], str)
_(None, mock.Mock(), ['string', 'var', 'args'], None)
def test_typed_pos_args_varargs_not_given(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertEqual(args[1], [])
_(None, mock.Mock(), ['string'], None)
def test_typed_pos_args_varargs_invalid(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 0], None)
self.assertEqual(str(cm.exception), 'foo argument 4 was of type "int" but should have been "str"')
def test_typed_pos_args_varargs_invalid_mulitple_types(self) -> None:
@typed_pos_args('foo', str, varargs=(str, list))
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 0], None)
self.assertEqual(str(cm.exception), 'foo argument 4 was of type "int" but should have been one of: "str", "list"')
def test_typed_pos_args_max_varargs(self) -> None:
@typed_pos_args('foo', str, varargs=str, max_varargs=5)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertIsInstance(args[1][0], str)
self.assertIsInstance(args[1][1], str)
_(None, mock.Mock(), ['string', 'var', 'args'], None)
def test_typed_pos_args_max_varargs_exceeded(self) -> None:
@typed_pos_args('foo', str, varargs=str, max_varargs=1)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args'], None)
self.assertEqual(str(cm.exception), 'foo takes between 1 and 2 arguments, but got 3.')
def test_typed_pos_args_min_varargs(self) -> None:
@typed_pos_args('foo', varargs=str, max_varargs=2, min_varargs=1)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], list)
self.assertIsInstance(args[0][0], str)
self.assertIsInstance(args[0][1], str)
_(None, mock.Mock(), ['string', 'var'], None)
def test_typed_pos_args_min_varargs_not_met(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes at least 2 arguments, but got 1.')
def test_typed_pos_args_min_and_max_varargs_exceeded(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1, max_varargs=2)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 'bar'], None)
self.assertEqual(str(cm.exception), 'foo takes between 2 and 3 arguments, but got 4.')
def test_typed_pos_args_min_and_max_varargs_not_met(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1, max_varargs=2)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes between 2 and 3 arguments, but got 1.')
def test_typed_pos_args_variadic_and_optional(self) -> None:
@typed_pos_args('foo', str, optargs=[str], varargs=str, min_varargs=0)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(AssertionError) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(
str(cm.exception),
'varargs and optargs not supported together as this would be ambiguous')
def test_typed_pos_args_min_optargs_not_met(self) -> None:
@typed_pos_args('foo', str, str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes at least 2 arguments, but got 1.')
def test_typed_pos_args_min_optargs_max_exceeded(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', '1', '2'], None)
self.assertEqual(str(cm.exception), 'foo takes at most 2 arguments, but got 3.')
def test_typed_pos_args_optargs_not_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsNone(args[1])
_(None, mock.Mock(), ['string'], None)
def test_typed_pos_args_optargs_some_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str, int])
def _(obj, node, args: T.Tuple[str, T.Optional[str], T.Optional[int]], kwargs) -> None:
self.assertEqual(len(args), 3)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsInstance(args[1], str)
self.assertEqual(args[1], '1')
self.assertIsNone(args[2])
_(None, mock.Mock(), ['string', '1'], None)
def test_typed_pos_args_optargs_all_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsInstance(args[1], str)
_(None, mock.Mock(), ['string', '1'], None)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options():
self.assertIn(str(opt), md)
for opt in comp.base_options:
self.assertIn(str(opt), md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError('Could not find "{}" heading'.format(name))
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
for subcontent in (subcontent1, subcontent2):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, set([
*[str(k) for k in mesonbuild.coredata.BUILTIN_OPTIONS],
*[str(k) for k in mesonbuild.coredata.BUILTIN_OPTIONS_PER_MACHINE],
]))
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError('Invalid debug value {!r} in row:\n{}'.format(debug, m.group()))
env.coredata.set_option(OptionKey('buildtype'), buildtype)
self.assertEqual(env.coredata.options[OptionKey('buildtype')].value, buildtype)
self.assertEqual(env.coredata.options[OptionKey('optimization')].value, opt)
self.assertEqual(env.coredata.options[OptionKey('debug')].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
def test_mesondata_is_up_to_date(self):
from mesonbuild.mesondata import mesondata
err_msg = textwrap.dedent('''
###########################################################
### mesonbuild.mesondata is not up-to-date ###
### Please regenerate it by running tools/gen_data.py ###
###########################################################
''')
root_dir = Path(__file__).resolve().parent
mesonbuild_dir = root_dir / 'mesonbuild'
data_dirs = mesonbuild_dir.glob('**/data')
data_files = [] # type: T.List[T.Tuple(str, str)]
for i in data_dirs:
for p in i.iterdir():
data_files += [(p.relative_to(mesonbuild_dir).as_posix(), hashlib.sha256(p.read_bytes()).hexdigest())]
current_files = set(mesondata.keys())
scanned_files = set([x[0] for x in data_files])
self.assertSetEqual(current_files, scanned_files, err_msg + 'Data files were added or removed\n')
errors = []
for i in data_files:
if mesondata[i[0]].sha256sum != i[1]:
errors += [i[0]]
self.assertListEqual(errors, [], err_msg + 'Files were changed')
class BasePlatformTests(unittest.TestCase):
prefix = '/usr'
libdir = 'lib'
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_native_file = None
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
self.linuxlike_test_dir = os.path.join(src_root, 'test cases/linuxlike')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
encoding='utf-8',
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None,
workdir=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix]
if self.libdir:
args += ['--libdir', self.libdir]
if self.meson_native_file:
args += ['--native-file', self.meson_native_file]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args, override_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
with mock.patch.dict(os.environ, override_envvars):
run_mtest_inprocess(['-C', self.builddir])
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def get_meson_log_sanitychecks(self):
'''
Same as above, but for the sanity checks that were run
'''
log = self.get_meson_log()
prefix = 'Sanity check compiler command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertReconfiguredBuildIsNoop(self):
'Assert that we reconfigured and then there was nothing to do'
ret = self.build()
self.assertIn('The Meson build system', ret)
if self.backend is Backend.ninja:
for line in ret.split('\n'):
if line in self.no_rebuild_stdout:
break
else:
raise AssertionError('build was reconfigured, but was not no-op')
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
# XXX: Note CustomBuild did indeed rebuild, because of the regen checker!
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target of each type said that no rebuild was done
# We always have at least one CustomBuild target for the regen checker
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
@staticmethod
def get_target_from_filename(filename):
base = os.path.splitext(filename)[0]
if base.startswith(('lib', 'cyg')):
return base[3:]
return base
def assertBuildRelinkedOnlyTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
linked_targets = []
for line in ret.split('\n'):
if 'Linking target' in line:
fname = line.rsplit('target ')[-1]
linked_targets.append(self.get_target_from_filename(fname))
self.assertEqual(linked_targets, [target])
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE)
matches = linkre.findall(ret)
self.assertEqual(len(matches), 1, msg=matches)
self.assertEqual(self.get_target_from_filename(matches[0]), target)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '88 default options')
self.init(testdir, default_args=False, inprocess=True)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
break
else:
raise self.fail('Did not find option "prefix"')
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_do_conf_file_by_format(self):
def conf_str(in_data, confdata, vformat):
(result, missing_variables, confdata_useless) = mesonbuild.mesonlib.do_conf_str(in_data, confdata, variable_format = vformat)
return '\n'.join(result)
def check_formats(confdata, result):
self.assertEqual(conf_str(['#mesondefine VAR'], confdata, 'meson'), result)
self.assertEqual(conf_str(['#cmakedefine VAR ${VAR}'], confdata, 'cmake'), result)
self.assertEqual(conf_str(['#cmakedefine VAR @VAR@'], confdata, 'cmake@'), result)
confdata = ConfigurationData()
# Key error as they do not exists
check_formats(confdata, '/* #undef VAR */\n')
# Check boolean
confdata.values = {'VAR': (False, 'description')}
check_formats(confdata, '#undef VAR\n')
confdata.values = {'VAR': (True, 'description')}
check_formats(confdata, '#define VAR\n')
# Check string
confdata.values = {'VAR': ('value', 'description')}
check_formats(confdata, '#define VAR value\n')
# Check integer
confdata.values = {'VAR': (10, 'description')}
check_formats(confdata, '#define VAR 10\n')
# Check multiple string with cmake formats
confdata.values = {'VAR': ('value', 'description')}
self.assertEqual(conf_str(['#cmakedefine VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value')
self.assertEqual(conf_str(['#cmakedefine VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value')
# Handles meson format exceptions
# Unknown format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'unknown_format')
# More than 2 params in mesondefine
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'meson')
# Mismatched line with format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#cmakedefine VAR'], confdata, 'meson')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake@')
# Dict value in confdata
confdata.values = {'VAR': (['value'], 'description')}
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'meson')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '88 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '164 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '52 run target')
self.init(testdir)
self.run_target('check_exists')
self.run_target('check-env')
self.run_target('check-env-ct')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '60 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
'new_directory': 'share/new_directory',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '141 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '60 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
def read_logs():
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
return list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
logged = read_logs()
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
# Verify that with --dry-run we obtain the same logs but with nothing
# actually installed
windows_proof_rmtree(self.installdir)
self._run(self.meson_command + ['install', '--dry-run', '--destdir', self.installdir], workdir=self.builddir)
self.assertEqual(logged, read_logs())
self.assertFalse(os.path.exists(self.installdir))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_nopromote(self):
testdir = os.path.join(self.common_test_dir, '99 subproject subdir')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--wrap-mode=nopromote'])
self.assertIn('Dependency "subsub" not found', cm.exception.stdout)
def test_force_fallback_for(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--force-fallback-for=zlib,foo'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt'), encoding='utf-8') as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt'), encoding='utf-8') as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
# Setup that does not define a wrapper works with --wrapper
self._run(self.mtest_command + ['--setup=timeout', '--wrapper', shutil.which('valgrind')])
# Setup that skips test works
self._run(self.mtest_command + ['--setup=good'])
with open(os.path.join(self.logdir, 'testlog-good.txt'), encoding='utf-8') as f:
exclude_suites_log = f.read()
self.assertFalse('buggy' in exclude_suites_log)
# --suite overrides add_test_setup(xclude_suites)
self._run(self.mtest_command + ['--setup=good', '--suite', 'buggy'])
with open(os.path.join(self.logdir, 'testlog-good.txt'), encoding='utf-8') as f:
include_suites_log = f.read()
self.assertTrue('buggy' in include_suites_log)
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt'), encoding='utf-8') as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt'), encoding='utf-8') as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt'), encoding='utf-8') as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '130 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
if mesonbuild.environment.detect_msys2_arch() and ('MESON_RSP_THRESHOLD' in os.environ):
raise unittest.SkipTest('Test does not yet support gcc rsp files on msys2')
testdir = os.path.join(self.common_test_dir, '131 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# Need to run the build so the private dir is created.
self.build()
pdirs = glob(os.path.join(self.builddir, 'sub4/someexe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], "-I" + privdir)
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
pdirs = glob(os.path.join(self.builddir, 'somefxe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], '-I' + privdir)
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_sunos():
self.assertIsInstance(cc.linker, (mesonbuild.linkers.SolarisDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl. This can be either an
# ld-like linker of link.exe-like linker (usually the
# former for msys2, the latter otherwise)
self.assertIsInstance(cc.linker, (mesonbuild.linkers.MSVCDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
os.environ[evar] = ' '.join(quote_arg(w) for w in wrappercc)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
os.environ['AR'] = ' '.join(quote_arg(w) for w in wrapperlinker)
# Need a new env to re-run environment loading
env = get_fake_env(testdir, self.builddir, self.prefix)
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '134 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '133 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '110 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_noop_changes_cause_no_rebuilds(self):
'''
Test that no-op changes to the build files such as mtime do not cause
a rebuild of anything.
'''
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of meson.build should not rebuild anything
self.utime(os.path.join(testdir, 'meson.build'))
self.assertReconfiguredBuildIsNoop()
# Changing mtime of libefile.c should rebuild the library, but not relink the executable
self.utime(os.path.join(testdir, 'libfile.c'))
self.assertBuildRelinkedOnlyTarget('mylib')
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertBuildRelinkedOnlyTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '58 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertBuildRelinkedOnlyTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '91 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
@skip_if_not_base_option('b_lto_threads')
def test_lto_threads(self):
if is_cygwin():
raise unittest.SkipTest('LTO is broken on Cygwin.')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
extra_args: T.List[str] = []
if cc.get_id() == 'clang':
if is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
else:
extra_args.append('-D_cargs=-Werror=unused-command-line-argument')
self.init(testdir, extra_args=['-Db_lto=true', '-Db_lto_threads=8'] + extra_args)
self.build()
self.run_tests()
expected = set(cc.get_lto_compile_args(threads=8))
targets = self.introspect('--targets')
# This assumes all of the targets support lto
for t in targets:
for s in t['target_sources']:
for e in expected:
self.assertIn(e, s['parameters'])
@skip_if_not_base_option('b_lto_mode')
@skip_if_not_base_option('b_lto_threads')
def test_lto_mode(self):
testdir = os.path.join(self.common_test_dir, '6 linkshared')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() != 'clang':
raise unittest.SkipTest('Only clang currently supports thinLTO')
if cc.linker.id not in {'ld.lld', 'ld.gold', 'ld64', 'lld-link'}:
raise unittest.SkipTest('thinLTO requires ld.lld, ld.gold, ld64, or lld-link')
elif is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args=['-Db_lto=true', '-Db_lto_mode=thin', '-Db_lto_threads=8', '-Dc_args=-Werror=unused-command-line-argument'])
self.build()
self.run_tests()
expected = set(cc.get_lto_compile_args(threads=8, mode='thin'))
targets = self.introspect('--targets')
# This assumes all of the targets support lto
for t in targets:
for s in t['target_sources']:
self.assertTrue(expected.issubset(set(s['parameters'])), f'Incorrect values for {t["name"]}')
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init, _git_add_all)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def has_working_hg(self):
if not shutil.which('hg'):
return False
try:
# This check should not be necessary, but
# CI under macOS passes the above test even
# though Mercurial is not installed.
if subprocess.call(['hg', '--version'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
return False
return True
except FileNotFoundError:
return False
def test_dist_hg(self):
if not self.has_working_hg():
raise unittest.SkipTest('Mercurial not found or broken.')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}', version: '1.0')".format(name))
return path
def dist_impl(self, vcs_init, vcs_add_all=None, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write(textwrap.dedent('''\
project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
subproject('samerepo', required : false)
'''))
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write(textwrap.dedent('''\
#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
'''))
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
if vcs_add_all:
vcs_add_all(self.create_dummy_subproject(project_dir, 'samerepo'))
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
# Verify that without --include-subprojects we have files from
# the main project and also files from subprojects part of the
# main vcs repository.
z = zipfile.ZipFile(zip_distfile)
expected = ['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']
if vcs_add_all:
expected += ['disttest-1.4.3/subprojects/',
'disttest-1.4.3/subprojects/samerepo/',
'disttest-1.4.3/subprojects/samerepo/meson.build']
self.assertEqual(sorted(expected),
sorted(z.namelist()))
# Verify that with --include-subprojects we now also have files
# from tarball and separate vcs subprojects. But not files from
# unused subprojects.
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
expected += ['disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/vcssub/meson.build']
self.assertEqual(sorted(expected),
sorted(z.namelist()))
if vcs_add_all:
# Verify we can distribute separately subprojects in the same vcs
# repository as the main project.
subproject_dir = os.path.join(project_dir, 'subprojects', 'samerepo')
self.new_builddir()
self.init(subproject_dir)
self.build('dist')
xz_distfile = os.path.join(self.distdir, 'samerepo-1.0.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
tar = tarfile.open(xz_distfile, "r:xz")
self.assertEqual(sorted(['samerepo-1.0',
'samerepo-1.0/meson.build']),
sorted([i.name for i in tar]))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '40 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '151 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '151 reserved targets')
targets = mesonbuild.coredata.FORBIDDEN_TARGET_NAMES
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
@mock.patch.dict(os.environ)
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix(), '-DLIBFOO']
# pkg-config and pkgconf does not respect the same order
self.assertEqual(sorted(foo_dep.get_compile_args()), sorted(cargs))
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '41 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
# When running under Travis Mac CI, the file updates seem to happen
# too fast so the timestamps do not get properly updated.
# Call this method before file operations in appropriate places
# to make things work.
def mac_ci_delay(self):
if is_osx() and is_ci():
import time
time.sleep(1)
def test_options_with_choices_changing(self) -> None:
"""Detect when options like arrays or combos have their choices change."""
testdir = Path(os.path.join(self.unit_test_dir, '85 change option choices'))
options1 = str(testdir / 'meson_options.1.txt')
options2 = str(testdir / 'meson_options.2.txt')
# Test that old options are changed to the new defaults if they are not valid
real_options = str(testdir / 'meson_options.txt')
self.addCleanup(os.unlink, real_options)
shutil.copy(options1, real_options)
self.init(str(testdir))
self.mac_ci_delay()
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'b')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
self.wipe()
self.mac_ci_delay()
# When the old options are valid they should remain
shutil.copy(options1, real_options)
self.init(str(testdir), extra_args=['-Dcombo=c', '-Darray=b,c'])
self.mac_ci_delay()
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'c')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b', 'c'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'],
cwd=workdir,
stdout=subprocess.DEVNULL)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
self.new_builddir()
out = self.init(tdir, workdir=wd)
expected = os.path.join(relpath(tdir, self.src_root), 'meson.build')
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, out)
def test_error_location_path(self):
'''Test locations in meson errors contain correct paths'''
# this list contains errors from all the different steps in the
# lexer/parser/interpreter we have tests for.
for (t, f) in [
('10 out of bounds', 'meson.build'),
('18 wrong plusassign', 'meson.build'),
('61 bad option argument', 'meson_options.txt'),
('102 subdir parse error', os.path.join('subdir', 'meson.build')),
('103 invalid option file', 'meson_options.txt'),
]:
tdir = os.path.join(self.src_root, 'test cases', 'failing', t)
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
try:
self.init(tdir, workdir=wd)
except subprocess.CalledProcessError as e:
expected = os.path.join('test cases', 'failing', t, f)
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, e.output)
else:
self.fail('configure unexpectedly succeeded')
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
for l in ['cpp', 'cs', 'd', 'java', 'cuda', 'fortran', 'objc', 'objcpp', 'rust']:
try:
comp = env.detect_compiler_for(l, MachineChoice.HOST)
with tempfile.TemporaryDirectory() as d:
comp.sanity_check(d, env)
langs.append(l)
except EnvironmentException:
pass
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in {'c', 'cpp', 'd'}:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
elif lang in {'java'}:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'Foo.' + lang), 'w') as f:
f.write('public class Foo { public static void main() {} }')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '173 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '182 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('WARNING: target links against shared modules. This is not '
'recommended as it is not supported on some platforms')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args that affect the
# configuration, and as a bonus, test that --profile-self works.
self.init(testdir, extra_args=['--profile-self', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'static')
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.assertEqual(obj.options[OptionKey('set_sub_opt')].value, True)
self.assertEqual(obj.options[OptionKey('subp_opt', 'subp')].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
else:
self.assertIn('as both', str(cm.exception))
self.init(testdir)
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
else:
self.assertIn('as both', str(cm.exception))
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
msg = "Option 'foo' must have a value separated by equals sign."
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.init(testdir, extra_args=['-Dfoo'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn(msg, cm.exception.output)
else:
self.assertIn(msg, str(cm.exception))
self.init(testdir)
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.setconf('-Dfoo')
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn(msg, cm.exception.output)
else:
self.assertIn(msg, str(cm.exception))
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('subp_opt', 'subp')].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('set_percent_opt')].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar',
'-Db_lundef=false', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('bindir')].value, 'bar')
self.assertEqual(obj.options[OptionKey('buildtype')].value, 'release')
self.assertEqual(obj.options[OptionKey('b_sanitize')].value, 'thread')
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('bindir')].value, 'foo')
self.assertEqual(obj.options[OptionKey('buildtype')].value, 'plain')
self.assertEqual(obj.options[OptionKey('b_sanitize')].value, 'address')
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '208 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['optimization'], '0')
self.setconf('-Doptimization=g')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['optimization'], 'g')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = ['{0}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '158 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '34 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '41 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '44 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '99 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': '1.0'
},
{
'descriptive_name': 'sub_implicit',
'name': 'sub_implicit',
'version': '1.0',
},
{
'descriptive_name': 'sub-novar',
'name': 'sub_novar',
'version': '1.0',
},
{
'descriptive_name': 'subsub',
'name': 'subsub',
'version': 'undefined'
},
{
'descriptive_name': 'subsubsub',
'name': 'subsubsub',
'version': 'undefined'
},
]
}
res['subprojects'] = sorted(res['subprojects'], key=lambda i: i['name'])
self.assertDictEqual(expected, res)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '43 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '76 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '76 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
dummydir = os.path.join(testdir, 'dummydir.h')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
self.assertNotIn(dummydir, out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
# XXX: These now generate in a different order, is that okay?
self.assertListEqual(sorted(res_nb, key=lambda x: x['name']), sorted(res_wb, key=lambda x: x['name']))
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_buildoptions_cross_only(self):
testdir = os.path.join(self.unit_test_dir, '84 cross only introspect')
testfile = os.path.join(testdir, 'meson.build')
res = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
optnames = [o['name'] for o in res]
self.assertIn('c_args', optnames)
self.assertNotIn('build.c_args', optnames)
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj, strict: bool = True):
for i in key_type_list:
if isinstance(i[1], (list, tuple)) and None in i[1]:
i = (i[0], tuple([x for x in i[1] if x is not None]))
if i[0] not in obj or obj[i[0]] is None:
continue
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
if strict:
for k in obj.keys():
found = False
for i in key_type_list:
if k == i[0]:
found = True
break
self.assertTrue(found, 'Key "{}" not in expected list'.format(k))
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
('depends', list),
('workdir', (str, None)),
('priority', int),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
('choices', (list, None)),
('value', (str, int, bool, list)),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('extra_files', list),
('subproject', (str, None)),
('install_filename', (list, None)),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Match target ids to input and output files for ease of reference
src_to_id = {}
out_to_id = {}
for i in res['targets']:
print(json.dump(i, sys.stdout))
out_to_id.update({os.path.relpath(out, self.builddir): i['id']
for out in i['filename']})
for group in i['target_sources']:
src_to_id.update({os.path.relpath(src, testdir): i['id']
for src in group['sources']})
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
deps_to_find = {'test case 1': [src_to_id['t1.cpp']],
'test case 2': [src_to_id['t2.cpp'], src_to_id['t3.cpp']],
'benchmark 1': [out_to_id['file2'], src_to_id['t3.cpp']]}
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertEqual(sorted(i['depends']),
sorted(deps_to_find[i['name']]))
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i, strict=False)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in res1:
if i['name'] == 'cpp_std':
i['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
i['value'] = 'c++14'
if i['name'] == 'buildtype':
i['value'] = 'release'
if i['name'] == 'optimization':
i['value'] = '3'
if i['name'] == 'debug':
i['value'] = False
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
res_wb = [i for i in res_wb if i['type'] != 'custom']
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_ast_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--ast'] + self.meson_args)
node_counter = {}
def accept_node(json_node):
self.assertIsInstance(json_node, dict)
for i in ['lineno', 'colno', 'end_lineno', 'end_colno']:
self.assertIn(i, json_node)
self.assertIsInstance(json_node[i], int)
self.assertIn('node', json_node)
n = json_node['node']
self.assertIsInstance(n, str)
self.assertIn(n, nodes)
if n not in node_counter:
node_counter[n] = 0
node_counter[n] = node_counter[n] + 1
for nodeDesc in nodes[n]:
key = nodeDesc[0]
func = nodeDesc[1]
self.assertIn(key, json_node)
if func is None:
tp = nodeDesc[2]
self.assertIsInstance(json_node[key], tp)
continue
func(json_node[key])
def accept_node_list(node_list):
self.assertIsInstance(node_list, list)
for i in node_list:
accept_node(i)
def accept_kwargs(kwargs):
self.assertIsInstance(kwargs, list)
for i in kwargs:
self.assertIn('key', i)
self.assertIn('val', i)
accept_node(i['key'])
accept_node(i['val'])
nodes = {
'BooleanNode': [('value', None, bool)],
'IdNode': [('value', None, str)],
'NumberNode': [('value', None, int)],
'StringNode': [('value', None, str)],
'ContinueNode': [],
'BreakNode': [],
'ArgumentNode': [('positional', accept_node_list), ('kwargs', accept_kwargs)],
'ArrayNode': [('args', accept_node)],
'DictNode': [('args', accept_node)],
'EmptyNode': [],
'OrNode': [('left', accept_node), ('right', accept_node)],
'AndNode': [('left', accept_node), ('right', accept_node)],
'ComparisonNode': [('left', accept_node), ('right', accept_node), ('ctype', None, str)],
'ArithmeticNode': [('left', accept_node), ('right', accept_node), ('op', None, str)],
'NotNode': [('right', accept_node)],
'CodeBlockNode': [('lines', accept_node_list)],
'IndexNode': [('object', accept_node), ('index', accept_node)],
'MethodNode': [('object', accept_node), ('args', accept_node), ('name', None, str)],
'FunctionNode': [('args', accept_node), ('name', None, str)],
'AssignmentNode': [('value', accept_node), ('var_name', None, str)],
'PlusAssignmentNode': [('value', accept_node), ('var_name', None, str)],
'ForeachClauseNode': [('items', accept_node), ('block', accept_node), ('varnames', None, list)],
'IfClauseNode': [('ifs', accept_node_list), ('else', accept_node)],
'IfNode': [('condition', accept_node), ('block', accept_node)],
'UMinusNode': [('right', accept_node)],
'TernaryNode': [('condition', accept_node), ('true', accept_node), ('false', accept_node)],
}
accept_node(res_nb)
for n, c in [('ContinueNode', 2), ('BreakNode', 1), ('NotNode', 3)]:
self.assertIn(n, node_counter)
self.assertEqual(node_counter[n], c)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '73 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string : bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean : False
Another boolean: True
Some string : Hello World
A list : string
1
True
empty list :
enabled_opt : enabled
A number : 1
yes : YES
no : NO
coma list : a, b, c
Stuff
missing prog : NO
existing prog : ''' + sys.executable + '''
missing dep : NO
internal dep : YES
Plugins
long coma list : alpha, alphacolor, apetag, audiofx, audioparsers, auparse,
autodetect, avi
Subprojects
sub : YES
sub2 : NO Problem encountered: This subproject failed
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
def test_meson_compile(self):
"""Test the meson compile command."""
def get_exe_name(basename: str) -> str:
if is_windows():
return '{}.exe'.format(basename)
else:
return basename
def get_shared_lib_name(basename: str) -> str:
if mesonbuild.environment.detect_msys2_arch():
return 'lib{}.dll'.format(basename)
elif is_windows():
return '{}.dll'.format(basename)
elif is_cygwin():
return 'cyg{}.dll'.format(basename)
elif is_osx():
return 'lib{}.dylib'.format(basename)
else:
return 'lib{}.so'.format(basename)
def get_static_lib_name(basename: str) -> str:
return 'lib{}.a'.format(basename)
# Base case (no targets or additional arguments)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
self._run([*self.meson_command, 'compile', '-C', self.builddir])
self.assertPathExists(os.path.join(self.builddir, get_exe_name('trivialprog')))
# `--clean`
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
# Target specified in a project with unique names
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--wipe'])
# Multiple targets and target type specified
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'mylib', 'mycpplib:shared_library'])
# Check that we have a shared lib, but not an executable, i.e. check that target actually worked
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mylib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('prog')))
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mycpplib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('cppprog')))
# Target specified in a project with non unique names
testdir = os.path.join(self.common_test_dir, '186 same target name')
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir, './foo'])
self.assertPathExists(os.path.join(self.builddir, get_static_lib_name('foo')))
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'sub/foo'])
self.assertPathExists(os.path.join(self.builddir, 'sub', get_static_lib_name('foo')))
# run_target
testdir = os.path.join(self.common_test_dir, '52 run target')
self.init(testdir, extra_args=['--wipe'])
out = self._run([*self.meson_command, 'compile', '-C', self.builddir, 'py3hi'])
self.assertIn('I am Python3.', out)
# `--$BACKEND-args`
testdir = os.path.join(self.common_test_dir, '1 trivial')
if self.backend is Backend.ninja:
self.init(testdir, extra_args=['--wipe'])
# Dry run - should not create a program
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--ninja-args=-n'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
elif self.backend is Backend.vs:
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir])
# Explicitly clean the target through msbuild interface
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--vs-args=-t:{}:Clean'.format(re.sub(r'[\%\$\@\;\.\(\)\']', '_', get_exe_name('trivialprog')))])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
def test_spurious_reconfigure_built_dep_file(self):
testdir = os.path.join(self.unit_test_dir, '75 dep files')
# Regression test: Spurious reconfigure was happening when build
# directory is inside source directory.
# See https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/85.
srcdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, srcdir)
builddir = os.path.join(srcdir, '_build')
self.change_builddir(builddir)
self.init(srcdir)
self.build()
# During first configure the file did not exist so no dependency should
# have been set. A rebuild should not trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
self.init(srcdir, extra_args=['--reconfigure'])
# During the reconfigure the file did exist, but is inside build
# directory, so no dependency should have been set. A rebuild should not
# trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
def _test_junit(self, case: str) -> None:
try:
import lxml.etree as et
except ImportError:
raise unittest.SkipTest('lxml required, but not found.')
schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd')))
self.init(case)
self.run_tests()
junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml'))
try:
schema.assertValid(junit)
except et.DocumentInvalid as e:
self.fail(e.error_log)
def test_junit_valid_tap(self):
self._test_junit(os.path.join(self.common_test_dir, '207 tap tests'))
def test_junit_valid_exitcode(self):
self._test_junit(os.path.join(self.common_test_dir, '42 test args'))
def test_junit_valid_gtest(self):
self._test_junit(os.path.join(self.framework_test_dir, '2 gtest'))
def test_link_language_linker(self):
# TODO: there should be some way to query how we're linking things
# without resorting to reading the ninja.build file
if self.backend is not Backend.ninja:
raise unittest.SkipTest('This test reads the ninja file')
testdir = os.path.join(self.common_test_dir, '226 link language')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
self.assertRegex(contents, r'build main(\.exe)?.*: c_LINKER')
self.assertRegex(contents, r'build (lib|cyg)?mylib.*: c_LINKER')
def test_commands_documented(self):
'''
Test that all listed meson commands are documented in Commands.md.
'''
# The docs directory is not in release tarballs.
if not os.path.isdir('docs'):
raise unittest.SkipTest('Doc directory does not exist.')
doc_path = 'docs/markdown/Commands.md'
md = None
with open(doc_path, encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
## Get command sections
section_pattern = re.compile(r'^### (.+)$', re.MULTILINE)
md_command_section_matches = [i for i in section_pattern.finditer(md)]
md_command_sections = dict()
for i, s in enumerate(md_command_section_matches):
section_end = len(md) if i == len(md_command_section_matches) - 1 else md_command_section_matches[i + 1].start()
md_command_sections[s.group(1)] = (s.start(), section_end)
## Validate commands
md_commands = set(k for k,v in md_command_sections.items())
help_output = self._run(self.meson_command + ['--help'])
help_commands = set(c.strip() for c in re.findall(r'usage:(?:.+)?{((?:[a-z]+,*)+?)}', help_output, re.MULTILINE|re.DOTALL)[0].split(','))
self.assertEqual(md_commands | {'help'}, help_commands, 'Doc file: `{}`'.format(doc_path))
## Validate that each section has proper placeholders
def get_data_pattern(command):
return re.compile(
r'{{ ' + command + r'_usage.inc }}[\r\n]'
r'.*?'
r'{{ ' + command + r'_arguments.inc }}[\r\n]',
flags = re.MULTILINE|re.DOTALL)
for command in md_commands:
m = get_data_pattern(command).search(md, pos=md_command_sections[command][0], endpos=md_command_sections[command][1])
self.assertIsNotNone(m, 'Command `{}` is missing placeholders for dynamic data. Doc file: `{}`'.format(command, doc_path))
def _check_coverage_files(self, types=('text', 'xml', 'html')):
covdir = Path(self.builddir) / 'meson-logs'
files = []
if 'text' in types:
files.append('coverage.txt')
if 'xml' in types:
files.append('coverage.xml')
if 'html' in types:
files.append('coveragereport/index.html')
for f in files:
self.assertTrue((covdir / f).is_file(), msg='{} is not a file'.format(f))
def test_coverage(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_complex(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '106 generatorcustom')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_html(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
self._check_coverage_files(['html'])
def test_coverage_text(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-text')
self._check_coverage_files(['text'])
def test_coverage_xml(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-xml')
self._check_coverage_files(['xml'])
def test_cross_file_constants(self):
with temp_filename() as crossfile1, temp_filename() as crossfile2:
with open(crossfile1, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
compiler = 'gcc'
'''))
with open(crossfile2, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
toolchain = '/toolchain/'
common_flags = ['--sysroot=' + toolchain / 'sysroot']
[properties]
c_args = common_flags + ['-DSOMETHING']
cpp_args = c_args + ['-DSOMETHING_ELSE']
[binaries]
c = toolchain / compiler
'''))
values = mesonbuild.coredata.parse_machine_files([crossfile1, crossfile2])
self.assertEqual(values['binaries']['c'], '/toolchain/gcc')
self.assertEqual(values['properties']['c_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING'])
self.assertEqual(values['properties']['cpp_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING', '-DSOMETHING_ELSE'])
@unittest.skipIf(is_windows(), 'Directory cleanup fails for some reason')
def test_wrap_git(self):
with tempfile.TemporaryDirectory() as tmpdir:
srcdir = os.path.join(tmpdir, 'src')
shutil.copytree(os.path.join(self.unit_test_dir, '82 wrap-git'), srcdir)
upstream = os.path.join(srcdir, 'subprojects', 'wrap_git_upstream')
upstream_uri = Path(upstream).as_uri()
_git_init(upstream)
with open(os.path.join(srcdir, 'subprojects', 'wrap_git.wrap'), 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = {}
patch_directory = wrap_git_builddef
revision = master
'''.format(upstream_uri)))
self.init(srcdir)
self.build()
self.run_tests()
def test_multi_output_custom_target_no_warning(self):
testdir = os.path.join(self.common_test_dir, '229 custom_target source')
out = self.init(testdir)
self.assertNotRegex(out, 'WARNING:.*Using the first one.')
self.build()
self.run_tests()
@unittest.skipUnless(is_linux() and (re.search('^i.86$|^x86$|^x64$|^x86_64$|^amd64$', platform.processor()) is not None),
'Requires ASM compiler for x86 or x86_64 platform currently only available on Linux CI runners')
def test_nostdlib(self):
testdir = os.path.join(self.unit_test_dir, '79 nostdlib')
machinefile = os.path.join(self.builddir, 'machine.txt')
with open(machinefile, 'w') as f:
f.write(textwrap.dedent('''
[properties]
c_stdlib = 'mylibc'
'''))
# Test native C stdlib
self.meson_native_file = machinefile
self.init(testdir)
self.build()
# Test cross C stdlib
self.new_builddir()
self.meson_native_file = None
self.meson_cross_file = machinefile
self.init(testdir)
self.build()
def test_meson_version_compare(self):
testdir = os.path.join(self.unit_test_dir, '83 meson version compare')
out = self.init(testdir)
self.assertNotRegex(out, r'WARNING')
def test_wrap_redirect(self):
redirect_wrap = os.path.join(self.builddir, 'redirect.wrap')
real_wrap = os.path.join(self.builddir, 'foo/subprojects/real.wrap')
os.makedirs(os.path.dirname(real_wrap))
# Invalid redirect, filename must have .wrap extension
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrapper
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be a .wrap file'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename cannot be in parent directory
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = ../real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename cannot contain ".."'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename must be in foo/subprojects/real.wrap
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be in the form foo/subprojects/bar.wrap'):
wrap = PackageDefinition(redirect_wrap)
# Correct redirect
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrap
'''))
with open(real_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = http://invalid
'''))
wrap = PackageDefinition(redirect_wrap)
self.assertEqual(wrap.get('url'), 'http://invalid')
@skip_if_no_cmake
def test_nested_cmake_rebuild(self) -> None:
# This checks a bug where if a non-meson project is used as a third
# level (or deeper) subproject it doesn't cause a rebuild if the build
# files for that project are changed
testdir = os.path.join(self.unit_test_dir, '86 nested subproject regenerate depends')
cmakefile = Path(testdir) / 'subprojects' / 'sub2' / 'CMakeLists.txt'
self.init(testdir)
self.build()
with cmakefile.open('a') as f:
os.utime(str(cmakefile))
self.assertReconfiguredBuildIsNoop()
def test_version_file(self):
srcdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(srcdir)
projinfo = self.introspect('--projectinfo')
self.assertEqual(projinfo['version'], '1.0.0')
def test_cflags_cppflags(self):
envs = {'CPPFLAGS': '-DCPPFLAG',
'CFLAGS': '-DCFLAG',
'CXXFLAGS': '-DCXXFLAG'}
srcdir = os.path.join(self.unit_test_dir, '90 multiple envvars')
self.init(srcdir, override_envvars=envs)
self.build()
def test_build_b_options(self) -> None:
# Currently (0.57) these do nothing, but they've always been allowed
srcdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(srcdir, extra_args=['-Dbuild.b_lto=true'])
def test_install_skip_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '91 install skip subprojects')
self.init(testdir)
self.build()
main_expected = [
'',
'share',
'include',
'foo',
'bin',
'share/foo',
'share/foo/foo.dat',
'include/foo.h',
'foo/foofile',
'bin/foo' + exe_suffix,
]
bar_expected = [
'bar',
'share/foo/bar.dat',
'include/bar.h',
'bin/bar' + exe_suffix,
'bar/barfile'
]
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() == 'msvc':
main_expected.append('bin/foo.pdb')
bar_expected.append('bin/bar.pdb')
prefix = destdir_join(self.installdir, self.prefix)
main_expected = [Path(prefix, p) for p in main_expected]
bar_expected = [Path(prefix, p) for p in bar_expected]
all_expected = main_expected + bar_expected
def check_installed_files(extra_args, expected):
args = ['install', '--destdir', self.installdir] + extra_args
self._run(self.meson_command + args, workdir=self.builddir)
all_files = [p for p in Path(self.installdir).rglob('*')]
self.assertEqual(sorted(expected), sorted(all_files))
windows_proof_rmtree(self.installdir)
check_installed_files([], all_expected)
check_installed_files(['--skip-subprojects'], main_expected)
check_installed_files(['--skip-subprojects', 'bar'], main_expected)
check_installed_files(['--skip-subprojects', 'another'], all_expected)
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(boost_root.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. A wrap file from a subproject is used but fails because it does not
contain required keys.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Neither a subproject directory nor a .*nosubproj.wrap.* file was found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'Dependency .*somenotfounddep.* from subproject .*subprojects/somesubproj.* found: .*NO.*')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
self.assertRegex(out, r'Missing key .*source_filename.* in subsubproject.wrap')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
def test_override_dependency_twice(self):
self.assertMesonRaises("meson.override_dependency('foo', declare_dependency())\n" +
"meson.override_dependency('foo', declare_dependency())",
"""Tried to override dependency 'foo' which has already been resolved or overridden""")
@unittest.skipIf(is_windows(), 'zlib is not available on Windows')
def test_override_resolved_dependency(self):
self.assertMesonRaises("dependency('zlib')\n" +
"meson.override_dependency('zlib', declare_dependency())",
"""Tried to override dependency 'zlib' which has already been resolved or overridden""")
def test_error_func(self):
self.assertMesonRaises("error('a', 'b', ['c', ['d', {'e': 'f'}]], 'g')",
"Problem encountered: a b \['c', \['d', {'e' : 'f'}\]\] g")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
@mock.patch.dict(os.environ)
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd.exe with args without searching
prog = ExternalProgram('cmd', command=['cmd', '/C'])
self.assertTrue(prog.found(), msg='cmd not found with args')
self.assertPathEqual(prog.get_command()[0], 'cmd')
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# If `.PY` is in PATHEXT, scripts can be found as programs
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
# Finding a script in PATH w/o extension works and adds the interpreter
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script with an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script-ext.py', command=[os.path.join(testdir, 'test-script-ext.py'), '--help'])
self.assertTrue(prog.found(), msg='test-script-ext.py with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script without an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script', command=[os.path.join(testdir, 'test-script'), '--help'])
self.assertTrue(prog.found(), msg='test-script with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username)
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
envvars = [mesonbuild.envconfig.ENV_VAR_PROG_MAP['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if f'{lang}_ld' in mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP:
envvars.append(
mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP[f'{lang}_ld'])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('optlink', 'c', 'optlink')
@skip_if_not_language('rust')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
@skip_if_not_language('d')
def test_link_environment_variable_d(self):
env = get_fake_env()
comp = getattr(env, 'detect_d_compiler')(MachineChoice.HOST)
if comp.id == 'dmd':
raise unittest.SkipTest('meson cannot reliably make DMD use a different linker.')
self._check_ld('lld-link', 'd', 'lld-link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--buildtype=release'])
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id)
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
def test_qt5dependency_vscrt(self):
'''
Test that qt5 dependencies use the debug module suffix when b_vscrt is
set to 'mdd'
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if OptionKey('b_vscrt') not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake') and not is_ci():
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output and not is_ci():
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Setup with /MDd
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
# Verify that we're linking to the debug versions of Qt DLLs
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build qt5core.exe: cpp_LINKER.*Qt5Cored.lib', contents)
self.assertIsNotNone(m, msg=contents)
def test_compiler_checks_vscrt(self):
'''
Test that the correct VS CRT is used when running compiler checks
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if OptionKey('b_vscrt') not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
def sanitycheck_vscrt(vscrt):
checks = self.get_meson_log_sanitychecks()
self.assertTrue(len(checks) > 0)
for check in checks:
self.assertIn(vscrt, check)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=release'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=md'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mt'])
sanitycheck_vscrt('/MT')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mtd'])
sanitycheck_vscrt('/MTd')
def test_modules(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('C++ modules only work with the Ninja backend (not {}).'.format(self.backend.name))
if 'VSCMD_VER' not in os.environ:
raise unittest.SkipTest('C++ modules is only supported with Visual Studio.')
if version_compare(os.environ['VSCMD_VER'], '<16.9.0'):
raise unittest.SkipTest('C++ modules are only supported with VS 2019 Preview or newer.')
self.init(os.path.join(self.unit_test_dir, '87 cpp modules'))
self.build()
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '149 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
def test_removing_unused_linker_args(self):
testdir = os.path.join(self.common_test_dir, '105 has arg')
env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic -framework Foundation'}
self.init(testdir, override_envvars=env)
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
@mock.patch.dict(os.environ)
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs)
self.assertTrue(libhello_nolib.found())
self.assertEqual(libhello_nolib.get_link_args(), [])
self.assertEqual(libhello_nolib.get_compile_args(), [])
self.assertEqual(libhello_nolib.get_pkgconfig_variable('foo', {}), 'bar')
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
# See common/45 pkgconfig-gen/meson.build for description of the case this test
with open(os.path.join(privatedir1, 'simple2.pc')) as f:
content = f.read()
self.assertIn('Libs: -L${libdir} -lsimple2 -lsimple1', content)
self.assertIn('Libs.private: -lz', content)
with open(os.path.join(privatedir1, 'simple3.pc')) as f:
content = f.read()
self.assertEqual(1, content.count('-lsimple3'))
with open(os.path.join(privatedir1, 'simple5.pc')) as f:
content = f.read()
self.assertNotIn('-lstat2', content)
@mock.patch.dict(os.environ)
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def test_qt6dependency_qmake_detection(self):
'''
Test that qt6 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt6'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 6' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 6.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt6
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt6 \(modules: Core\) found: YES .* \((qmake|qmake-qt6)\)\n')
def glob_sofiles_without_privdir(self, g):
files = glob(g)
return [f for f in files if not f.endswith('.p')]
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(self.glob_sofiles_without_privdir(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(self.glob_sofiles_without_privdir(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(self.glob_sofiles_without_privdir(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '37 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '37 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir: str, compiler: 'Compiler') -> None:
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_cpp20 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=10.0.0', None) or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=10.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
key = OptionKey('std', lang=compiler.language)
for v in compiler.get_options()[key].choices:
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
elif '++20' in v and not has_cpp20:
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
self.init(testdir, extra_args=[f'-D{key!s}={v}'])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print(f'{key!s} was {v!r}')
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if compiler.language == 'c':
env_flag_name = 'CFLAGS'
elif compiler.language == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(compiler.language))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc)
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp)
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '43 subproject')
self.init(testdir, extra_args='--unity=subprojects')
pdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/simpletest*.p'))
self.assertEqual(len(pdirs), 1)
self.assertPathExists(os.path.join(pdirs[0], 'simpletest-unity0.c'))
sdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/*sublib*.p'))
self.assertEqual(len(sdirs), 1)
self.assertPathExists(os.path.join(sdirs[0], 'sublib-unity0.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '60 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '191 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_introspect_installed(self):
testdir = os.path.join(self.linuxlike_test_dir, '7 library versions')
self.init(testdir)
install = self.introspect('--installed')
install = {os.path.basename(k): v for k, v in install.items()}
print(install)
if is_osx():
the_truth = {
'libmodule.dylib': '/usr/lib/libmodule.dylib',
'libnoversion.dylib': '/usr/lib/libnoversion.dylib',
'libonlysoversion.5.dylib': '/usr/lib/libonlysoversion.5.dylib',
'libonlysoversion.dylib': '/usr/lib/libonlysoversion.dylib',
'libonlyversion.1.dylib': '/usr/lib/libonlyversion.1.dylib',
'libonlyversion.dylib': '/usr/lib/libonlyversion.dylib',
'libsome.0.dylib': '/usr/lib/libsome.0.dylib',
'libsome.dylib': '/usr/lib/libsome.dylib',
}
the_truth_2 = {'/usr/lib/libsome.dylib',
'/usr/lib/libsome.0.dylib',
}
else:
the_truth = {
'libmodule.so': '/usr/lib/libmodule.so',
'libnoversion.so': '/usr/lib/libnoversion.so',
'libonlysoversion.so': '/usr/lib/libonlysoversion.so',
'libonlysoversion.so.5': '/usr/lib/libonlysoversion.so.5',
'libonlyversion.so': '/usr/lib/libonlyversion.so',
'libonlyversion.so.1': '/usr/lib/libonlyversion.so.1',
'libonlyversion.so.1.4.5': '/usr/lib/libonlyversion.so.1.4.5',
'libsome.so': '/usr/lib/libsome.so',
'libsome.so.0': '/usr/lib/libsome.so.0',
'libsome.so.1.2.3': '/usr/lib/libsome.so.1.2.3',
}
the_truth_2 = {'/usr/lib/libsome.so',
'/usr/lib/libsome.so.0',
'/usr/lib/libsome.so.1.2.3'}
self.assertDictEqual(install, the_truth)
targets = self.introspect('--targets')
for t in targets:
if t['name'] != 'some':
continue
self.assertSetEqual(the_truth_2, set(t['install_filename']))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skipIfNoPkgconfig
def test_build_rpath_pkgconfig(self):
'''
Test that current build artefacts (libs) are found first on the rpath,
manually specified rpath comes second and additional rpath elements (from
pkg-config files) come last
'''
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '90 pkgconfig build rpath order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar:/foo/dummy')
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar:/foo/dummy')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz:/foo/dummy')
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz:/foo/dummy')
def test_global_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
if is_osx():
raise unittest.SkipTest('Global RPATHs via LDFLAGS not yet supported on MacOS (does anybody need it?)')
testdir = os.path.join(self.unit_test_dir, '81 global-rpath')
oldinstalldir = self.installdir
# Build and install an external library without DESTDIR.
# The external library generates a .pc file without an rpath.
yonder_dir = os.path.join(testdir, 'yonder')
yonder_prefix = os.path.join(oldinstalldir, 'yonder')
yonder_libdir = os.path.join(yonder_prefix, self.libdir)
self.prefix = yonder_prefix
self.installdir = yonder_prefix
self.init(yonder_dir)
self.build()
self.install(use_destdir=False)
# Since rpath has multiple valid formats we need to
# test that they are all properly used.
rpath_formats = [
('-Wl,-rpath=', False),
('-Wl,-rpath,', False),
('-Wl,--just-symbols=', True),
('-Wl,--just-symbols,', True),
('-Wl,-R', False),
('-Wl,-R,', False)
]
for rpath_format, exception in rpath_formats:
# Build an app that uses that installed library.
# Supply the rpath to the installed library via LDFLAGS
# (as systems like buildroot and guix are wont to do)
# and verify install preserves that rpath.
self.new_builddir()
env = {'LDFLAGS': rpath_format + yonder_libdir,
'PKG_CONFIG_PATH': os.path.join(yonder_libdir, 'pkgconfig')}
if exception:
with self.assertRaises(subprocess.CalledProcessError):
self.init(testdir, override_envvars=env)
continue
self.init(testdir, override_envvars=env)
self.build()
self.install(use_destdir=False)
got_rpath = get_rpath(os.path.join(yonder_prefix, 'bin/rpathified'))
self.assertEqual(got_rpath, yonder_libdir, rpath_format)
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
tool_path = os.path.join(testdir, 'some_cross_tool.py')
crossfile.write(textwrap.dedent(f'''\
[binaries]
c = '{shutil.which('gcc' if is_sunos() else 'cc')}'
ar = '{shutil.which('ar')}'
strip = '{shutil.which('strip')}'
sometool.py = ['{tool_path}']
someothertool.py = '{tool_path}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
'''))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({OptionKey('pkg_config_path'): pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.src_root, 'test cases', 'native', '9 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not (is_osx() or is_linux()):
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
if not is_osx():
# Rest of the workflow only works on macOS
return
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
@skipIfNoPkgconfig
def test_usage_pkgconfig_prefixes(self):
'''
Build and install two external libraries, to different prefixes,
then build and install a client program that finds them via pkgconfig,
and verify the installed client program runs.
'''
oldinstalldir = self.installdir
# Build and install both external libraries without DESTDIR
val1dir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'val1')
val1prefix = os.path.join(oldinstalldir, 'val1')
self.prefix = val1prefix
self.installdir = val1prefix
self.init(val1dir)
self.build()
self.install(use_destdir=False)
self.new_builddir()
env1 = {}
env1['PKG_CONFIG_PATH'] = os.path.join(val1prefix, self.libdir, 'pkgconfig')
val2dir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'val2')
val2prefix = os.path.join(oldinstalldir, 'val2')
self.prefix = val2prefix
self.installdir = val2prefix
self.init(val2dir, override_envvars=env1)
self.build()
self.install(use_destdir=False)
self.new_builddir()
# Build, install, and run the client program
env2 = {}
env2['PKG_CONFIG_PATH'] = os.path.join(val2prefix, self.libdir, 'pkgconfig')
testdir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'client')
testprefix = os.path.join(oldinstalldir, 'client')
self.prefix = testprefix
self.installdir = testprefix
self.init(testdir, override_envvars=env2)
self.build()
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'client')
env3 = {}
if is_cygwin():
env3['PATH'] = os.path.join(val1prefix, 'bin') + \
os.pathsep + \
os.path.join(val2prefix, 'bin') + \
os.pathsep + os.environ['PATH']
out = self._run([prog], override_envvars=env3).strip()
# Expected output is val1 + val2 = 3
self.assertEqual(out, '3')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('60 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('191 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('191 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
linker = cc.linker
if not linker.export_dynamic_args(env):
raise unittest.SkipTest('Not applicable for linkers without --export-dynamic')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
nativefile = tempfile.NamedTemporaryFile(mode='w')
nativefile.write(textwrap.dedent('''\
[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'build_wrapper.py'))))
nativefile.flush()
self.meson_native_file = nativefile.name
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write(textwrap.dedent('''\
[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir)
def test_identity_cross_env(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
env = {
'CC_FOR_BUILD': '"' + os.path.join(testdir, 'build_wrapper.py') + '"',
}
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write(textwrap.dedent('''\
[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
envvars = [mesonbuild.envconfig.ENV_VAR_PROG_MAP['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if f'{lang}_ld' in mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP:
envvars.append(
mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP[f'{lang}_ld'])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if isinstance(comp, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler,
mesonbuild.compilers.AppleClangObjCCompiler,
mesonbuild.compilers.AppleClangObjCPPCompiler)):
raise unittest.SkipTest('AppleClang is currently only supported with ld64')
if lang != 'rust' and comp.use_linker_args('bfd') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'ld.lld')
@skip_if_not_language('rust')
@skipIfNoExecutable('ld.gold') # need an additional check here because _check_ld checks for gcc
def test_ld_environment_variable_rust(self):
self._check_ld('gcc', 'gcc -fuse-ld=gold', 'rust', 'ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold')
@skip_if_not_language('objc')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold')
@skip_if_not_language('objcpp')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold')
@skip_if_not_language('fortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold')
@skip_if_not_language('d')
def test_ld_environment_variable_d(self):
# At least for me, ldc defaults to gold, and gdc defaults to bfd, so
# let's pick lld, which isn't the default for either (currently)
self._check_ld('ld.lld', 'lld', 'd', 'ld.lld')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '74 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = http://server.invalid/foo
source_fallback_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = http://server.invalid/foo
patch_fallback_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def test_no_rpath_for_static(self):
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertIsNone(build_rpath)
def test_lookup_system_after_broken_fallback(self):
# Just to generate libfoo.pc so we can test system dependency lookup.
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
privatedir = self.privatedir
# Write test project where the first dependency() returns not-found
# because 'broken' subproject does not exit, but that should not prevent
# the 2nd dependency() to lookup on system.
self.new_builddir()
with tempfile.TemporaryDirectory() as d:
with open(os.path.join(d, 'meson.build'), 'w') as f:
f.write(textwrap.dedent('''\
project('test')
dependency('notfound', fallback: 'broken', required: false)
dependency('libfoo', fallback: 'broken', required: true)
'''))
self.init(d, override_envvars={'PKG_CONFIG_LIBDIR': privatedir})
def test_as_link_whole(self):
testdir = os.path.join(self.unit_test_dir, '78 as link whole')
self.init(testdir)
with open(os.path.join(self.privatedir, 'bar1.pc')) as f:
content = f.read()
self.assertIn('-lfoo', content)
with open(os.path.join(self.privatedir, 'bar2.pc')) as f:
content = f.read()
self.assertNotIn('-lfoo', content)
def test_prelinking(self):
# Prelinking currently only works on recently new GNU toolchains.
# Skip everything else. When support for other toolchains is added,
# remove limitations as necessary.
if is_osx():
raise unittest.SkipTest('Prelinking not supported on Darwin.')
if 'clang' in os.environ.get('CC', 'dummy'):
raise unittest.SkipTest('Prelinking not supported with Clang.')
gccver = subprocess.check_output(['cc', '--version'])
if b'7.5.0' in gccver:
raise unittest.SkipTest('GCC on Bionic is too old to be supported.')
testdir = os.path.join(self.unit_test_dir, '88 prelinking')
self.init(testdir)
self.build()
outlib = os.path.join(self.builddir, 'libprelinked.a')
ar = shutil.which('ar')
self.assertTrue(os.path.exists(outlib))
self.assertTrue(ar is not None)
p = subprocess.run([ar, 't', outlib],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True, timeout=1)
obj_files = p.stdout.strip().split('\n')
self.assertEqual(len(obj_files), 1)
self.assertTrue(obj_files[0].endswith('-prelink.o'))
class BaseLinuxCrossTests(BasePlatformTests):
# Don't pass --libdir when cross-compiling. We have tests that
# check whether meson auto-detects it correctly.
libdir = None
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_cross_libdir_subproject(self):
# Guard against a regression where calling "subproject"
# would reset the value of libdir to its default value.
testdir = os.path.join(self.unit_test_dir, '77 subdir libdir')
self.init(testdir, extra_args=['--libdir=fuf'])
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'fuf')
return
self.assertTrue(False, 'Libdir specified on command line gets reset.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def test_run_native_test(self):
'''
https://github.com/mesonbuild/meson/issues/7997
check run native test in crossbuild without exe wrapper
'''
testdir = os.path.join(self.unit_test_dir, '89 run native test')
stamp_file = os.path.join(self.builddir, 'native_test_has_run.stamp')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(stamp_file)
self.run_tests()
self.assertPathExists(stamp_file)
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
error_message = "An exe_wrapper is needed but was not found. Please define one in cross file and check the command and/or add it to PATH."
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
self.assertEqual(str(cm.exception), error_message)
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.assertEqual(str(cm.exception), error_message)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
self.assertEqual(str(cm.exception),
"The exe_wrapper defined in the cross file 'broken' was not found. Please check the command and/or add it to PATH.")
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD', 'Boost']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': 'debug=true'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
if isinstance(v, (bool, int, float)):
f.write("{}={}\n".format(k, v))
elif isinstance(v, list):
f.write("{}=[{}]\n".format(k, ', '.join(["'{}'".format(w) for w in v])))
else:
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary, entry=None):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
elif is_osx():
binary = 'python'
else:
binary = 'python2'
# We not have python2, check for it
for v in ['2', '2.7', '-2.7']:
rc = subprocess.call(['pkg-config', '--cflags', 'python{}'.format(v)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc == 0:
break
else:
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', binary, entry='python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang: str, binary: str, version_str: str, version: str) -> None:
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = [wrapper]
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = [wrapper]
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overridden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
def test_user_options(self):
testcase = os.path.join(self.common_test_dir, '41 options')
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0),
('CaseSenSiTivE', 'SOME other Value'),
('CASESENSITIVE', 'some other Value')]:
config = self.helper_create_native_file({'project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_user_options_command_line_overrides(self):
testcase = os.path.join(self.common_test_dir, '41 options')
config = self.helper_create_native_file({'project options': {'other_one': True}})
self.init(testcase, extra_args=['--native-file', config, '-Dother_one=false'])
def test_user_options_subproject(self):
testcase = os.path.join(self.unit_test_dir, '80 user options for subproject')
s = os.path.join(testcase, 'subprojects')
if not os.path.exists(s):
os.mkdir(s)
s = os.path.join(s, 'sub')
if not os.path.exists(s):
sub = os.path.join(self.common_test_dir, '41 options')
shutil.copytree(sub, s)
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0)]:
config = self.helper_create_native_file({'sub:project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_option_bool(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'werror': True}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'werror' in each['name']:
self.assertEqual(each['value'], True)
break
else:
self.fail('Did not find werror in build options?')
def test_option_integer(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'unity_size': 100}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'unity_size' in each['name']:
self.assertEqual(each['value'], 100)
break
else:
self.fail('Did not find unity_size in build options?')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('Did not find werror in build options?')
def test_builtin_options_conf_overrides_env(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'pkg_config_path': '/foo'}})
self.init(testcase, extra_args=['--native-file', config], override_envvars={'PKG_CONFIG_PATH': '/bar'})
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/foo'])
break
else:
self.fail('Did not find pkg_config_path in build options?')
def test_builtin_options_subprojects(self):
testcase = os.path.join(self.common_test_dir, '99 subproject subdir')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both', 'c_args': ['-Dfoo']}, 'sub:built-in options': {'default_library': 'static'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'c_args' in each['name']:
# This path will be hit twice, once for build and once for host,
self.assertEqual(each['value'], ['-Dfoo'])
found += 1
elif each['name'] == 'default_library':
self.assertEqual(each['value'], 'both')
found += 1
elif each['name'] == 'sub:default_library':
self.assertEqual(each['value'], 'static')
found += 1
self.assertEqual(found, 4, 'Did not find all three sections')
def test_builtin_options_subprojects_overrides_buildfiles(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '224 persubproject options')
config = self.helper_create_native_file({'sub2:built-in options': {'default_library': 'shared'}})
with self.assertRaises((RuntimeError, subprocess.CalledProcessError)) as cm:
self.init(testcase, extra_args=['--native-file', config])
if isinstance(cm, RuntimeError):
check = str(cm.exception)
else:
check = cm.exception.stdout
self.assertIn(check, 'Parent should override default_library')
def test_builtin_options_subprojects_dont_inherits_parent_override(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '224 persubproject options')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both'}})
self.init(testcase, extra_args=['--native-file', config])
def test_builtin_options_compiler_properties(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'c_args': ['-DFOO']},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DFOO'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_compiler_properties_legacy(self):
# The legacy placement in properties is still valid if a 'built-in
# options' setting is present, but doesn't have the lang_args
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DBAR'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_paths(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'bindir': 'foo'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'foo')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def setUp(self):
super().setUp()
self.current_config = 0
self.current_wrapper = 0
def _cross_file_generator(self, *, needs_exe_wrapper: bool = False,
exe_wrapper: T.Optional[T.List[str]] = None) -> str:
if is_windows():
raise unittest.SkipTest('Cannot run this test on non-mingw/non-cygwin windows')
return textwrap.dedent(f"""\
[binaries]
c = '{shutil.which('gcc' if is_sunos() else 'cc')}'
ar = '{shutil.which('ar')}'
strip = '{shutil.which('strip')}'
exe_wrapper = {str(exe_wrapper) if exe_wrapper is not None else '[]'}
[properties]
needs_exe_wrapper = {needs_exe_wrapper}
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""")
def _stub_exe_wrapper(self) -> str:
return textwrap.dedent('''\
#!/usr/bin/env python3
import subprocess
import sys
sys.exit(subprocess.run(sys.argv[1:]).returncode)
''')
def test_needs_exe_wrapper_true(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
def test_needs_exe_wrapper_false(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=False))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertNotRegex(out, r'Skipped:\s*1\n')
def test_needs_exe_wrapper_true_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
s = Path(d) / 'wrapper.py'
with s.open('wt') as f:
f.write(self._stub_exe_wrapper())
s.chmod(0o774)
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(
needs_exe_wrapper=True,
exe_wrapper=[str(s)]))
self.init(testdir, extra_args=['--cross-file=' + str(p), '-Dexpect=true'])
out = self.run_target('test')
self.assertRegex(out, r'Ok:\s*3\s*\n')
def test_cross_exe_passed_no_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
self.build()
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
# The test uses mocking and thus requires that the current process is the
# one to run the Meson steps. If we are using an external test executable
# (most commonly in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = self._cross_file_generator()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def helper_create_cross_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overridden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_user_options(self):
# This is just a touch test for cross file, since the implementation
# shares code after loading from the files
testcase = os.path.join(self.common_test_dir, '41 options')
config = self.helper_create_cross_file({'project options': {'testoption': 'some other value'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--cross-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--cross-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('No c++ standard set?')
def test_builtin_options_per_machine(self):
"""Test options that are allowed to be set on a per-machine basis.
Such options could be passed twice, once for the build machine, and
once for the host machine. I've picked pkg-config path, but any would
do that can be set for both.
"""
testcase = os.path.join(self.common_test_dir, '2 cpp')
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross/path', 'cpp_std': 'c++17'}})
native = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native/path', 'cpp_std': 'c++14'}})
# Ensure that PKG_CONFIG_PATH is not set in the environment
with mock.patch.dict('os.environ'):
for k in ['PKG_CONFIG_PATH', 'PKG_CONFIG_PATH_FOR_BUILD']:
try:
del os.environ[k]
except KeyError:
pass
self.init(testcase, extra_args=['--cross-file', cross, '--native-file', native])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross/path'])
found += 1
elif each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++17')
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native/path'])
found += 1
elif each['name'] == 'build.cpp_std':
self.assertEqual(each['value'], 'c++14')
found += 1
if found == 4:
break
self.assertEqual(found, 4, 'Did not find all sections.')
def test_builtin_options_conf_overrides_env(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native'}})
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross'}})
self.init(testcase, extra_args=['--native-file', config, '--cross-file', cross],
override_envvars={'PKG_CONFIG_PATH': '/bar', 'PKG_CONFIG_PATH_FOR_BUILD': '/dir'})
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross'])
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native'])
found += 1
if found == 2:
break
self.assertEqual(found, 2, 'Did not find all sections.')
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser()
return iter(parser.parse(io.StringIO(s)))
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, num_tests=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, num_tests=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, num_tests=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, num_tests=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, num_tests=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, num_tests=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, num_tests=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, num_tests=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
class SubprojectsCommandTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.root_dir = Path(self.builddir)
self.project_dir = self.root_dir / 'src'
self._create_project(self.project_dir)
self.subprojects_dir = self.project_dir / 'subprojects'
os.makedirs(str(self.subprojects_dir))
def _create_project(self, path, project_name='dummy'):
os.makedirs(str(path), exist_ok=True)
with open(str(path / 'meson.build'), 'w') as f:
f.write("project('{}')".format(project_name))
def _git(self, cmd, workdir):
return git(cmd, str(workdir), check=True)[1].strip()
def _git_config(self, workdir):
self._git(['config', 'user.name', 'Meson Test'], workdir)
self._git(['config', 'user.email', 'meson.test@example.com'], workdir)
def _git_remote(self, cmd, name):
return self._git(cmd, self.root_dir / name)
def _git_local(self, cmd, name):
return self._git(cmd, self.subprojects_dir / name)
def _git_local_branch(self, name):
# Same as `git branch --show-current` but compatible with older git version
branch = self._git_local(['rev-parse', '--abbrev-ref', 'HEAD'], name)
return branch if branch != 'HEAD' else ''
def _git_local_commit(self, name, ref='HEAD'):
return self._git_local(['rev-parse', ref], name)
def _git_remote_commit(self, name, ref='HEAD'):
return self._git_remote(['rev-parse', ref], name)
def _git_create_repo(self, path):
# If a user has git configuration init.defaultBranch set we want to override that
with tempfile.TemporaryDirectory() as d:
out = git(['--version'], str(d))[1]
if version_compare(mesonbuild.environment.search_version(out), '>= 2.28'):
extra_cmd = ['--initial-branch', 'master']
else:
extra_cmd = []
self._create_project(path)
self._git(['init'] + extra_cmd, path)
self._git_config(path)
self._git(['add', '.'], path)
self._git(['commit', '-m', 'Initial commit'], path)
def _git_create_remote_repo(self, name):
self._git_create_repo(self.root_dir / name)
def _git_create_local_repo(self, name):
self._git_create_repo(self.subprojects_dir / name)
def _git_create_remote_commit(self, name, branch):
self._git_remote(['checkout', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name)
def _git_create_remote_branch(self, name, branch):
self._git_remote(['checkout', '-b', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name)
def _git_create_remote_tag(self, name, tag):
self._git_remote(['commit', '--allow-empty', '-m', 'tag {} commit'.format(tag)], name)
self._git_remote(['tag', tag], name)
def _wrap_create_git(self, name, revision='master'):
path = self.root_dir / name
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-git]
url={}
revision={}
'''.format(os.path.abspath(str(path)), revision)))
def _wrap_create_file(self, name, tarball='dummy.tar.gz'):
path = self.root_dir / tarball
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-file]
source_url={}
'''.format(os.path.abspath(str(path)))))
def _subprojects_cmd(self, args):
return self._run(self.meson_command + ['subprojects'] + args, workdir=str(self.project_dir))
def test_git_update(self):
subp_name = 'sub1'
# Create a fake remote git repository and a wrap file. Checks that
# "meson subprojects download" works.
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
self._subprojects_cmd(['download'])
self.assertPathExists(str(self.subprojects_dir / subp_name))
self._git_config(self.subprojects_dir / subp_name)
# Create a new remote branch and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new branch.
self._git_create_remote_branch(subp_name, 'newbranch')
self._wrap_create_git(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch. Checks the new commit is pulled into existing
# local newbranch. Make sure it does not print spurious 'git stash' message.
self._git_create_remote_commit(subp_name, 'newbranch')
out = self._subprojects_cmd(['update', '--reset'])
self.assertNotIn('No local changes to save', out)
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch and switch to another branch. Checks that it
# switch current branch to newbranch and pull latest commit.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Stage some local changes then update. Checks that local changes got
# stashed.
self._create_project(self.subprojects_dir / subp_name, 'new_project_name')
self._git_local(['add', '.'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
self.assertTrue(self._git_local(['stash', 'list'], subp_name))
# Create a new remote tag and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new tag in detached mode.
self._git_create_remote_tag(subp_name, 'newtag')
self._wrap_create_git(subp_name, 'newtag')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newtag'))
# Create a new remote commit and update the wrap file with the commit id.
# Checks that "meson subprojects update --reset" checkout the new commit
# in detached mode.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
new_commit = self._git_remote(['rev-parse', 'HEAD'], subp_name)
self._wrap_create_git(subp_name, new_commit)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), new_commit)
# Create a local project not in a git repository, then update it with
# a git wrap. Without --reset it should print error message and return
# failure. With --reset it should delete existing project and clone the
# new project.
subp_name = 'sub2'
self._create_project(self.subprojects_dir / subp_name)
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self._subprojects_cmd(['update'])
self.assertIn('Not a git repository', cm.exception.output)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name))
@skipIfNoExecutable('true')
def test_foreach(self):
self._create_project(self.subprojects_dir / 'sub_file')
self._wrap_create_file('sub_file')
self._git_create_local_repo('sub_git')
self._wrap_create_git('sub_git')
self._git_create_local_repo('sub_git_no_wrap')
def ran_in(s):
ret = []
prefix = 'Executing command in '
for l in s.splitlines():
if l.startswith(prefix):
ret.append(l[len(prefix):])
return sorted(ret)
dummy_cmd = ['true']
out = self._subprojects_cmd(['foreach'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git', 'subprojects/sub_git_no_wrap']))
out = self._subprojects_cmd(['foreach', '--types', 'git,file'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git']))
out = self._subprojects_cmd(['foreach', '--types', 'file'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_file'])
out = self._subprojects_cmd(['foreach', '--types', 'git'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_git'])
def _clang_at_least(compiler: 'Compiler', minver: str, apple_minver: T.Optional[str]) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
if apple_minver is None:
return False
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.CFLAGS_MAPPING.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
if arg in ('-f', '--failfast'):
arg = '--exitfirst'
pytest_args.append(arg)
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def running_single_tests(argv, cases):
'''
Check whether we only got arguments for running individual tests, not
entire testcases, and not all testcases (no test args).
'''
got_test_arg = False
for arg in argv:
if arg.startswith('-'):
continue
for case in cases:
if not arg.startswith(case):
continue
if '.' not in arg:
# Got a testcase, done
return False
got_test_arg = True
return got_test_arg
def main():
unset_envs()
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests', 'SubprojectsCommandTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = []
# Don't use pytest-xdist when running single unit tests since it wastes
# time spawning a lot of processes to distribute tests to in that case.
if not running_single_tests(sys.argv, cases):
pytest_args += ['-n', 'auto']
pytest_args += ['./run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# Fallback to plain unittest.
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
print('Meson build system', mesonbuild.coredata.version, 'Unit Tests')
start = time.monotonic()
try:
raise SystemExit(main())
finally:
print('Total time: {:.3f} seconds'.format(time.monotonic() - start))
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Utility functions to draw ASCII diagrams to the command line."""
from aiida.common.links import LinkType
__all__ = ('draw_children', 'draw_parents', 'format_call_graph')
TREE_LAST_ENTRY = '\u2514\u2500\u2500 '
TREE_MIDDLE_ENTRY = '\u251C\u2500\u2500 '
TREE_FIRST_ENTRY = TREE_MIDDLE_ENTRY
class NodeTreePrinter:
"""Utility functions for printing node trees.
.. deprecated:: 1.1.0
Will be removed in `v2.0.0`.
"""
# Note: when removing this code, also remove the `ete3` as a dependency as it will no longer be used.
@classmethod
def print_node_tree(cls, node, max_depth, follow_links=()):
"""Top-level function for printing node tree."""
import warnings
from aiida.common.warnings import AiidaDeprecationWarning
warnings.warn('class is deprecated and will be removed in `aiida-core==2.0.0`.', AiidaDeprecationWarning) # pylint: disable=no-member
from ete3 import Tree
from aiida.cmdline.utils.common import get_node_summary
from aiida.cmdline.utils import echo
echo.echo(get_node_summary(node))
tree_string = f'({cls._build_tree(node, max_depth=max_depth, follow_links=follow_links)});'
tmp = Tree(tree_string, format=1)
echo.echo(tmp.get_ascii(show_internal=True))
@staticmethod
def _ctime(link_triple):
return link_triple.node.ctime
@classmethod
def _build_tree(cls, node, show_pk=True, max_depth=None, follow_links=(), depth=0):
"""Return string with tree."""
if max_depth is not None and depth > max_depth:
return None
children = []
for entry in sorted(node.get_outgoing(link_type=follow_links).all(), key=cls._ctime):
child_str = cls._build_tree(
entry.node, show_pk, follow_links=follow_links, max_depth=max_depth, depth=depth + 1
)
if child_str:
children.append(child_str)
out_values = []
if children:
out_values.append('(')
out_values.append(', '.join(children))
out_values.append(')')
lab = node.__class__.__name__
if show_pk:
lab += f' [{node.pk}]'
out_values.append(lab)
return ''.join(out_values)
def draw_parents(node, node_label=None, show_pk=True, dist=2, follow_links_of_type=None):
"""
Print an ASCII tree of the parents of the given node.
.. deprecated:: 1.1.0
Will be removed in `v2.0.0`.
:param node: The node to draw for
:type node: :class:`aiida.orm.nodes.data.Data`
:param node_label: The label to use for the nodes
:type node_label: str
:param show_pk: Show the PK of nodes alongside the label
:type show_pk: bool
:param dist: The number of steps away from this node to branch out
:type dist: int
:param follow_links_of_type: Follow links of this type when making steps,
if None then it will follow CREATE and INPUT links
:type follow_links_of_type: str
"""
import warnings
from aiida.common.warnings import AiidaDeprecationWarning
warnings.warn('function is deprecated and will be removed in `aiida-core==2.0.0`.', AiidaDeprecationWarning) # pylint: disable=no-member
return get_ascii_tree(node, node_label, show_pk, dist, follow_links_of_type, False)
def draw_children(node, node_label=None, show_pk=True, dist=2, follow_links_of_type=None):
"""
Print an ASCII tree of the parents of the given node.
.. deprecated:: 1.1.0
Will be removed in `v2.0.0`.
:param node: The node to draw for
:type node: :class:`aiida.orm.nodes.data.Data`
:param node_label: The label to use for the nodes
:type node_label: str
:param show_pk: Show the PK of nodes alongside the label
:type show_pk: bool
:param dist: The number of steps away from this node to branch out
:type dist: int
:param follow_links_of_type: Follow links of this type when making steps,
if None then it will follow CREATE and INPUT links
:type follow_links_of_type: str
"""
import warnings
from aiida.common.warnings import AiidaDeprecationWarning
warnings.warn('function is deprecated and will be removed in `aiida-core==2.0.0`.', AiidaDeprecationWarning) # pylint: disable=no-member
return get_ascii_tree(node, node_label, show_pk, dist, follow_links_of_type, True)
def get_ascii_tree(node, node_label=None, show_pk=True, max_depth=1, follow_links_of_type=None, descend=True):
"""
Get a string representing an ASCII tree for the given node.
.. deprecated:: 1.1.0
Will be removed in `v2.0.0`.
:param node: The node to get the tree for
:type node: :class:`aiida.orm.nodes.node.Node`
:param node_label: What to label the nodes with (can be an attribute name)
:type node_label: str
:param show_pk: If True, show the pk with the node label
:type show_pk: bool
:param max_depth: The maximum depth to follow starting from the node
:type max_depth: int
:param follow_links_of_type: Follow links of a given type, can be None
:type follow_links_of_type: One of the members from
:class:`aiida.common.links.LinkType`
:param descend: if True will follow outputs, if False inputs
:type descend: bool
:return: The string giving an ASCII representation of the tree from the node
:rtype: str
"""
import warnings
from aiida.common.warnings import AiidaDeprecationWarning
warnings.warn('function is deprecated and will be removed in `aiida-core==2.0.0`.', AiidaDeprecationWarning) # pylint: disable=no-member
from ete3 import Tree
tree_string = build_tree(node, node_label, show_pk, max_depth, follow_links_of_type, descend)
tree = Tree(f'({tree_string});', format=1)
return tree.get_ascii(show_internal=True)
def build_tree(node, node_label=None, show_pk=True, max_depth=1, follow_links_of_type=None, descend=True, depth=0):
"""
Recursively build an ASCII string representation of the node tree
.. deprecated:: 1.1.0
Will be removed in `v2.0.0`.
:param node: The node to get the tree for
:type node: :class:`aiida.orm.nodes.node.Node`
:param node_label: What to label the nodes with (can be an attribute name)
:type node_label: str
:param show_pk: If True, show the pk with the node label
:type show_pk: bool
:param max_depth: The maximum depth to follow starting from the node
:type max_depth: int
:param follow_links_of_type: Follow links of a given type, can be None
:type follow_links_of_type: One of the members from
:class:`aiida.common.links.LinkType`
:param descend: if True will follow outputs, if False inputs
:type descend: bool
:param depth: the current depth
:type depth: int
:return: The string giving an ASCII representation of the tree from the node
:rtype: str
"""
# pylint: disable=too-many-arguments
import warnings
from aiida.common.warnings import AiidaDeprecationWarning
warnings.warn('function is deprecated and will be removed in `aiida-core==2.0.0`.', AiidaDeprecationWarning) # pylint: disable=no-member
out_values = []
if depth < max_depth:
relatives = []
if descend:
outputs = node.get_outgoing(link_type=follow_links_of_type).all_nodes()
else: # ascend
if follow_links_of_type is None:
follow_links_of_type = (LinkType.CREATE, LinkType.INPUT_CALC, LinkType.INPUT_WORK)
outputs = node.get_incoming(link_type=follow_links_of_type).all_nodes()
for child in sorted(outputs, key=lambda node: node.ctime):
relatives.append(
build_tree(child, node_label, show_pk, max_depth, follow_links_of_type, descend, depth + 1)
)
if relatives:
out_values.append(f"({", ".join(relatives)})")
out_values.append(_generate_node_label(node, node_label, show_pk))
return ''.join(out_values)
def _generate_node_label(node, node_attr, show_pk):
"""
Generate a label for the node.
.. deprecated:: 1.1.0
Will be removed in `v2.0.0`.
:param node: The node to generate the label for
:type node: :class:`aiida.orm.nodes.node.Node`
:param node_attr: The attribute to use as the label, can be None
:type node_attr: str
:param show_pk: if True, show the PK alongside the label
:type show_pk: bool
:return: The generated label
:rtype: str
"""
import warnings
from aiida.common.warnings import AiidaDeprecationWarning
warnings.warn('function is deprecated and will be removed in `aiida-core==2.0.0`.', AiidaDeprecationWarning) # pylint: disable=no-member
label = None
if node_attr is None:
try:
label = node.process_label
except AttributeError:
label = None
else:
try:
label = str(getattr(node, node_attr))
except AttributeError:
try:
label = node.get_attribute(node_attr)
except AttributeError:
pass
# Couldn't find one, so just use the class name
if label is None:
label = node.__class__.__name__
if show_pk:
label += f' [{node.pk}]'
return label
def calc_info(node):
"""Return a string with the summary of the state of a CalculationNode."""
from aiida.orm import ProcessNode, WorkChainNode
if not isinstance(node, ProcessNode):
raise TypeError(f'Unknown type: {type(node)}')
process_label = node.process_label
process_state = node.process_state.value.capitalize()
exit_status = node.exit_status
if exit_status is not None:
string = f'{process_label}<{node.pk}> {process_state} [{exit_status}]'
else:
string = f'{process_label}<{node.pk}> {process_state}'
if isinstance(node, WorkChainNode) and node.stepper_state_info:
string += f' [{node.stepper_state_info}]'
return string
def format_call_graph(calc_node, info_fn=calc_info):
"""
Print a tree like the POSIX tree command for the calculation call graph
:param calc_node: The calculation node
:param info_fn: An optional function that takes the node and returns a string
of information to be displayed for each node.
"""
call_tree = build_call_graph(calc_node, info_fn=info_fn)
return format_tree_descending(call_tree)
def build_call_graph(calc_node, info_fn=calc_info):
"""Build the call graph of a given node."""
info_string = info_fn(calc_node)
called = calc_node.called
called.sort(key=lambda x: x.ctime)
if called:
return info_string, [build_call_graph(child, info_fn) for child in called]
return info_string
def format_tree_descending(tree, prefix='', pos=-1):
"""Format a descending tree."""
# pylint: disable=too-many-branches
text = []
if isinstance(tree, tuple):
info = tree[0]
else:
info = tree
if pos == -1:
pre = ''
elif pos == 0:
pre = f'{prefix}{TREE_FIRST_ENTRY}'
elif pos == 1:
pre = f'{prefix}{TREE_MIDDLE_ENTRY}'
else:
pre = f'{prefix}{TREE_LAST_ENTRY}'
text.append(f'{pre}{info}')
if isinstance(tree, tuple):
_, value = tree
num_entries = len(value)
if pos in [-1, 2]:
new_prefix = f'{prefix} '
else:
new_prefix = f'{prefix}│ '
for i, entry in enumerate(value):
if i == num_entries - 1:
pos = 2
elif i == 0:
pos = 0
else:
pos = 1
text.append(format_tree_descending(entry, new_prefix, pos))
return '\n'.join(text)
| # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Utility functions to draw ASCII diagrams to the command line."""
from aiida.common.links import LinkType
__all__ = ('draw_children', 'draw_parents', 'format_call_graph')
TREE_LAST_ENTRY = '\u2514\u2500\u2500 '
TREE_MIDDLE_ENTRY = '\u251C\u2500\u2500 '
TREE_FIRST_ENTRY = TREE_MIDDLE_ENTRY
class NodeTreePrinter:
"""Utility functions for printing node trees.
.. deprecated:: 1.1.0
Will be removed in `v2.0.0`.
"""
# Note: when removing this code, also remove the `ete3` as a dependency as it will no longer be used.
@classmethod
def print_node_tree(cls, node, max_depth, follow_links=()):
"""Top-level function for printing node tree."""
import warnings
from aiida.common.warnings import AiidaDeprecationWarning
warnings.warn('class is deprecated and will be removed in `aiida-core==2.0.0`.', AiidaDeprecationWarning) # pylint: disable=no-member
from ete3 import Tree
from aiida.cmdline.utils.common import get_node_summary
from aiida.cmdline.utils import echo
echo.echo(get_node_summary(node))
tree_string = f'({cls._build_tree(node, max_depth=max_depth, follow_links=follow_links)});'
tmp = Tree(tree_string, format=1)
echo.echo(tmp.get_ascii(show_internal=True))
@staticmethod
def _ctime(link_triple):
return link_triple.node.ctime
@classmethod
def _build_tree(cls, node, show_pk=True, max_depth=None, follow_links=(), depth=0):
"""Return string with tree."""
if max_depth is not None and depth > max_depth:
return None
children = []
for entry in sorted(node.get_outgoing(link_type=follow_links).all(), key=cls._ctime):
child_str = cls._build_tree(
entry.node, show_pk, follow_links=follow_links, max_depth=max_depth, depth=depth + 1
)
if child_str:
children.append(child_str)
out_values = []
if children:
out_values.append('(')
out_values.append(', '.join(children))
out_values.append(')')
lab = node.__class__.__name__
if show_pk:
lab += f' [{node.pk}]'
out_values.append(lab)
return ''.join(out_values)
def draw_parents(node, node_label=None, show_pk=True, dist=2, follow_links_of_type=None):
"""
Print an ASCII tree of the parents of the given node.
.. deprecated:: 1.1.0
Will be removed in `v2.0.0`.
:param node: The node to draw for
:type node: :class:`aiida.orm.nodes.data.Data`
:param node_label: The label to use for the nodes
:type node_label: str
:param show_pk: Show the PK of nodes alongside the label
:type show_pk: bool
:param dist: The number of steps away from this node to branch out
:type dist: int
:param follow_links_of_type: Follow links of this type when making steps,
if None then it will follow CREATE and INPUT links
:type follow_links_of_type: str
"""
import warnings
from aiida.common.warnings import AiidaDeprecationWarning
warnings.warn('function is deprecated and will be removed in `aiida-core==2.0.0`.', AiidaDeprecationWarning) # pylint: disable=no-member
return get_ascii_tree(node, node_label, show_pk, dist, follow_links_of_type, False)
def draw_children(node, node_label=None, show_pk=True, dist=2, follow_links_of_type=None):
"""
Print an ASCII tree of the parents of the given node.
.. deprecated:: 1.1.0
Will be removed in `v2.0.0`.
:param node: The node to draw for
:type node: :class:`aiida.orm.nodes.data.Data`
:param node_label: The label to use for the nodes
:type node_label: str
:param show_pk: Show the PK of nodes alongside the label
:type show_pk: bool
:param dist: The number of steps away from this node to branch out
:type dist: int
:param follow_links_of_type: Follow links of this type when making steps,
if None then it will follow CREATE and INPUT links
:type follow_links_of_type: str
"""
import warnings
from aiida.common.warnings import AiidaDeprecationWarning
warnings.warn('function is deprecated and will be removed in `aiida-core==2.0.0`.', AiidaDeprecationWarning) # pylint: disable=no-member
return get_ascii_tree(node, node_label, show_pk, dist, follow_links_of_type, True)
def get_ascii_tree(node, node_label=None, show_pk=True, max_depth=1, follow_links_of_type=None, descend=True):
"""
Get a string representing an ASCII tree for the given node.
.. deprecated:: 1.1.0
Will be removed in `v2.0.0`.
:param node: The node to get the tree for
:type node: :class:`aiida.orm.nodes.node.Node`
:param node_label: What to label the nodes with (can be an attribute name)
:type node_label: str
:param show_pk: If True, show the pk with the node label
:type show_pk: bool
:param max_depth: The maximum depth to follow starting from the node
:type max_depth: int
:param follow_links_of_type: Follow links of a given type, can be None
:type follow_links_of_type: One of the members from
:class:`aiida.common.links.LinkType`
:param descend: if True will follow outputs, if False inputs
:type descend: bool
:return: The string giving an ASCII representation of the tree from the node
:rtype: str
"""
import warnings
from aiida.common.warnings import AiidaDeprecationWarning
warnings.warn('function is deprecated and will be removed in `aiida-core==2.0.0`.', AiidaDeprecationWarning) # pylint: disable=no-member
from ete3 import Tree
tree_string = build_tree(node, node_label, show_pk, max_depth, follow_links_of_type, descend)
tree = Tree(f'({tree_string});', format=1)
return tree.get_ascii(show_internal=True)
def build_tree(node, node_label=None, show_pk=True, max_depth=1, follow_links_of_type=None, descend=True, depth=0):
"""
Recursively build an ASCII string representation of the node tree
.. deprecated:: 1.1.0
Will be removed in `v2.0.0`.
:param node: The node to get the tree for
:type node: :class:`aiida.orm.nodes.node.Node`
:param node_label: What to label the nodes with (can be an attribute name)
:type node_label: str
:param show_pk: If True, show the pk with the node label
:type show_pk: bool
:param max_depth: The maximum depth to follow starting from the node
:type max_depth: int
:param follow_links_of_type: Follow links of a given type, can be None
:type follow_links_of_type: One of the members from
:class:`aiida.common.links.LinkType`
:param descend: if True will follow outputs, if False inputs
:type descend: bool
:param depth: the current depth
:type depth: int
:return: The string giving an ASCII representation of the tree from the node
:rtype: str
"""
# pylint: disable=too-many-arguments
import warnings
from aiida.common.warnings import AiidaDeprecationWarning
warnings.warn('function is deprecated and will be removed in `aiida-core==2.0.0`.', AiidaDeprecationWarning) # pylint: disable=no-member
out_values = []
if depth < max_depth:
relatives = []
if descend:
outputs = node.get_outgoing(link_type=follow_links_of_type).all_nodes()
else: # ascend
if follow_links_of_type is None:
follow_links_of_type = (LinkType.CREATE, LinkType.INPUT_CALC, LinkType.INPUT_WORK)
outputs = node.get_incoming(link_type=follow_links_of_type).all_nodes()
for child in sorted(outputs, key=lambda node: node.ctime):
relatives.append(
build_tree(child, node_label, show_pk, max_depth, follow_links_of_type, descend, depth + 1)
)
if relatives:
out_values.append(f"({', '.join(relatives)})")
out_values.append(_generate_node_label(node, node_label, show_pk))
return ''.join(out_values)
def _generate_node_label(node, node_attr, show_pk):
"""
Generate a label for the node.
.. deprecated:: 1.1.0
Will be removed in `v2.0.0`.
:param node: The node to generate the label for
:type node: :class:`aiida.orm.nodes.node.Node`
:param node_attr: The attribute to use as the label, can be None
:type node_attr: str
:param show_pk: if True, show the PK alongside the label
:type show_pk: bool
:return: The generated label
:rtype: str
"""
import warnings
from aiida.common.warnings import AiidaDeprecationWarning
warnings.warn('function is deprecated and will be removed in `aiida-core==2.0.0`.', AiidaDeprecationWarning) # pylint: disable=no-member
label = None
if node_attr is None:
try:
label = node.process_label
except AttributeError:
label = None
else:
try:
label = str(getattr(node, node_attr))
except AttributeError:
try:
label = node.get_attribute(node_attr)
except AttributeError:
pass
# Couldn't find one, so just use the class name
if label is None:
label = node.__class__.__name__
if show_pk:
label += f' [{node.pk}]'
return label
def calc_info(node):
"""Return a string with the summary of the state of a CalculationNode."""
from aiida.orm import ProcessNode, WorkChainNode
if not isinstance(node, ProcessNode):
raise TypeError(f'Unknown type: {type(node)}')
process_label = node.process_label
process_state = node.process_state.value.capitalize()
exit_status = node.exit_status
if exit_status is not None:
string = f'{process_label}<{node.pk}> {process_state} [{exit_status}]'
else:
string = f'{process_label}<{node.pk}> {process_state}'
if isinstance(node, WorkChainNode) and node.stepper_state_info:
string += f' [{node.stepper_state_info}]'
return string
def format_call_graph(calc_node, info_fn=calc_info):
"""
Print a tree like the POSIX tree command for the calculation call graph
:param calc_node: The calculation node
:param info_fn: An optional function that takes the node and returns a string
of information to be displayed for each node.
"""
call_tree = build_call_graph(calc_node, info_fn=info_fn)
return format_tree_descending(call_tree)
def build_call_graph(calc_node, info_fn=calc_info):
"""Build the call graph of a given node."""
info_string = info_fn(calc_node)
called = calc_node.called
called.sort(key=lambda x: x.ctime)
if called:
return info_string, [build_call_graph(child, info_fn) for child in called]
return info_string
def format_tree_descending(tree, prefix='', pos=-1):
"""Format a descending tree."""
# pylint: disable=too-many-branches
text = []
if isinstance(tree, tuple):
info = tree[0]
else:
info = tree
if pos == -1:
pre = ''
elif pos == 0:
pre = f'{prefix}{TREE_FIRST_ENTRY}'
elif pos == 1:
pre = f'{prefix}{TREE_MIDDLE_ENTRY}'
else:
pre = f'{prefix}{TREE_LAST_ENTRY}'
text.append(f'{pre}{info}')
if isinstance(tree, tuple):
_, value = tree
num_entries = len(value)
if pos in [-1, 2]:
new_prefix = f'{prefix} '
else:
new_prefix = f'{prefix}│ '
for i, entry in enumerate(value):
if i == num_entries - 1:
pos = 2
elif i == 0:
pos = 0
else:
pos = 1
text.append(format_tree_descending(entry, new_prefix, pos))
return '\n'.join(text)
|
import os
import easypost
# Builds a file containing every cURL request to add a Carrier Account via EasyPost
# USAGE: API_KEY=123... venv/bin/python build_carrier_curl_requests.py > carrier_curl_requests.sh
URL = os.getenv('URL', 'https://api.easypost.com/v2')
API_KEY = os.getenv('API_KEY')
def main():
carrier_types = get_carrier_types()
# TODO: this may have a side effect of ordering the items inside each object too
for carrier in sorted(carrier_types, key=lambda x: x['type']):
curl_request = build_carrier_curl_request(carrier)
print(curl_request)
def get_carrier_types():
"""Get the carrier_types from the EasyPost API."""
easypost.api_key = API_KEY
easypost.api_base = URL
carrier_accounts = easypost.CarrierAccount.types()
return carrier_accounts
def build_carrier_curl_request(carrier):
"""Builds a cURL request for a carrier via EasyPost."""
fedex_custom_workflow_carriers = ['FedexAccount', 'FedexSmartpostAccount']
ups_custom_workflow_carriers = ['UpsAccount', 'UpsDapAccount']
canadapost_custom_workflow_carriers = ['CanadaPostAccount'] # noqa
# Add carrier account title comment
carrier_output = f'# {carrier.get('type')}\n'
# Add curl command and registration url
if carrier.get('type') in (fedex_custom_workflow_carriers or ups_custom_workflow_carriers):
carrier_output += 'curl -X POST https://api.easypost.com/v2/carrier_accounts/register \\\n'
else:
carrier_output += 'curl -X POST https://api.easypost.com/v2/carrier_accounts \\\n'
# Add authentication, carrier account type and description
carrier_output += "-u 'API_KEY': \\\n"
carrier_output += f"-d 'carrier_account[type]={carrier.get("type")}' \\\n"
carrier_output += f"-d 'carrier_account[description]={carrier.get("type")}' \\\n"
# Iterate over the carrier fields and print the credential structure
carrier_fields = carrier.get('fields').to_dict()
if carrier.get('type') in fedex_custom_workflow_carriers:
for category in carrier_fields['creation_fields']:
for item in carrier_fields['creation_fields'][category]:
carrier_output += f"-d 'carrier_account[registration_data][{item}]=VALUE' \\\n"
carrier_output += '| json_pp\n'
elif carrier.get('type') in (ups_custom_workflow_carriers or canadapost_custom_workflow_carriers):
# TODO: Fix UPS carrier account
# TODO: Fix CanadaPost carrier account
pass
else:
end = '| json_pp\n'
for top_level in carrier_fields:
# If there is a custom_workflow such as 3rd party auth or a similar flow
# we should warn about that here. The credential structure will differ from
# a normal carrier account and is currently not automated
if top_level == 'custom_workflow':
end += '## REQUIRES CUSTOM WORKFLOW ##\n'
else:
for item in carrier_fields[top_level]:
carrier_output += f"-d 'carrier_account[{top_level}][{item}]=VALUE' \\\n"
carrier_output += end
return carrier_output
if __name__ == '__main__':
main()
| import os
import easypost
# Builds a file containing every cURL request to add a Carrier Account via EasyPost
# USAGE: API_KEY=123... venv/bin/python build_carrier_curl_requests.py > carrier_curl_requests.sh
URL = os.getenv('URL', 'https://api.easypost.com/v2')
API_KEY = os.getenv('API_KEY')
def main():
carrier_types = get_carrier_types()
# TODO: this may have a side effect of ordering the items inside each object too
for carrier in sorted(carrier_types, key=lambda x: x['type']):
curl_request = build_carrier_curl_request(carrier)
print(curl_request)
def get_carrier_types():
"""Get the carrier_types from the EasyPost API."""
easypost.api_key = API_KEY
easypost.api_base = URL
carrier_accounts = easypost.CarrierAccount.types()
return carrier_accounts
def build_carrier_curl_request(carrier):
"""Builds a cURL request for a carrier via EasyPost."""
fedex_custom_workflow_carriers = ['FedexAccount', 'FedexSmartpostAccount']
ups_custom_workflow_carriers = ['UpsAccount', 'UpsDapAccount']
canadapost_custom_workflow_carriers = ['CanadaPostAccount'] # noqa
# Add carrier account title comment
carrier_output = f'# {carrier.get("type")}\n'
# Add curl command and registration url
if carrier.get('type') in (fedex_custom_workflow_carriers or ups_custom_workflow_carriers):
carrier_output += 'curl -X POST https://api.easypost.com/v2/carrier_accounts/register \\\n'
else:
carrier_output += 'curl -X POST https://api.easypost.com/v2/carrier_accounts \\\n'
# Add authentication, carrier account type and description
carrier_output += "-u 'API_KEY': \\\n"
carrier_output += f"-d 'carrier_account[type]={carrier.get('type')}' \\\n"
carrier_output += f"-d 'carrier_account[description]={carrier.get('type')}' \\\n"
# Iterate over the carrier fields and print the credential structure
carrier_fields = carrier.get('fields').to_dict()
if carrier.get('type') in fedex_custom_workflow_carriers:
for category in carrier_fields['creation_fields']:
for item in carrier_fields['creation_fields'][category]:
carrier_output += f"-d 'carrier_account[registration_data][{item}]=VALUE' \\\n"
carrier_output += '| json_pp\n'
elif carrier.get('type') in (ups_custom_workflow_carriers or canadapost_custom_workflow_carriers):
# TODO: Fix UPS carrier account
# TODO: Fix CanadaPost carrier account
pass
else:
end = '| json_pp\n'
for top_level in carrier_fields:
# If there is a custom_workflow such as 3rd party auth or a similar flow
# we should warn about that here. The credential structure will differ from
# a normal carrier account and is currently not automated
if top_level == 'custom_workflow':
end += '## REQUIRES CUSTOM WORKFLOW ##\n'
else:
for item in carrier_fields[top_level]:
carrier_output += f"-d 'carrier_account[{top_level}][{item}]=VALUE' \\\n"
carrier_output += end
return carrier_output
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import threading
import maya
from auto_everything.base import IO
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler, MessageHandler, Filters
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
import os
import time
"""
export master_user_id=
export telegram_bot_token=
"""
master_user_id = int(os.getenv("master_user_id", "131513300"))
TOKEN = os.getenv("telegram_bot_token", "")
print(master_user_id)
print(TOKEN)
io = IO()
lock = threading.RLock()
ROOT_DIR = "."
logging.basicConfig(filename=os.path.join(ROOT_DIR, "__main.log"),
level=logging.DEBUG, filemode='w', format='%(levelname)s - %(message)s')
data = {
"question": "1+1=? (不回答会被踢出群)",
"answer": [
"2",
"3",
],
"right_answer_index": 1
}
data = io.read_settings("data", data)
waitting_for_master = False
people = {
# chat_id 324253: "maya timestring"
}
people = io.read_settings("people", people)
historical_message = {
"id_list": []
}
historical_message = io.read_settings(
"historical_message", historical_message)
def handle_useless_msg(bot, update, new_msg_id_list):
global historical_message
chat_type = update.message.chat.type
if "group" in chat_type:
logging.debug(f"new_msg_id_list: {new_msg_id_list}")
logging.debug(f"historical_message: {historical_message["id_list"]}")
lock.acquire()
historical_message.update(
{"id_list": historical_message["id_list"] + new_msg_id_list}
)
io.write_settings("historical_message", historical_message)
lock.release()
logging.debug(
f"new_historical_message: {historical_message["id_list"]}")
def clearn(bot, update):
chat_type = update.message.chat.type
if "group" in chat_type:
if len(historical_message["id_list"]) > 0:
lock.acquire()
for msg_id in historical_message["id_list"]:
try:
bot.delete_message(update.message.chat_id, msg_id)
except Exception as e:
print(e)
historical_message["id_list"] = []
io.write_settings("historical_message", historical_message)
lock.release()
bot.delete_message(update.message.chat_id, update.message.message_id)
def set(bot, update):
global waitting_for_master
if update.message.from_user.id != master_user_id:
Message = update.message.reply_text(
f"You are not admin!\nAdmin is @yingshaoxo ({master_user_id})\n\nYour user_id is:\n{str(update.message.from_user.id)}")
handle_useless_msg(
bot, update, [update.message.message_id, Message.message_id])
else:
Message1 = update.message.reply_text(
f"What's your question? \n\nExample:")
Message2 = update.message.reply_text(
f"you + me = ?\nNone\nWe\n2")
handle_useless_msg(
bot, update, [update.message.message_id, Message1.message_id, Message2.message_id])
if waitting_for_master == False:
waitting_for_master = True
else:
pass
def handle_text_msg(bot, update):
global waitting_for_master
global io
chat_type = update.message.chat.type
logging.debug(f"chat_type is {chat_type}")
if update.message.from_user.id != master_user_id:
if "group" in chat_type:
kick_them_out_if_possible(bot, update)
else:
if "group" in chat_type:
kick_them_out_if_possible(bot, update)
if waitting_for_master == True:
try:
text = update.message.text
text = text.strip()
lines = text.split("\n")
lines = [line.strip() for line in lines if line.strip() != ""]
question = lines[0]
answer = lines[1:-1]
index = int(lines[-1])
if index > len(answer):
Message = update.message.reply_text(
f"The last line should less than or equal to {len(answer)}")
handle_useless_msg(
bot, update, [update.message.message_id, Message.message_id])
raise Exception
lock.acquire()
new_data = {
"question": question + " (不回答会被踢出群)",
"answer": answer,
"right_answer_index": index
}
data.update(new_data)
io.write_settings("data", data)
lock.release()
waitting_for_master = False
Message = update.message.reply_text(
f"OK, I got it!\n\nQuestion: {question}\nAnswer: {answer[index-1]}")
handle_useless_msg(
bot, update, [update.message.message_id, Message.message_id])
except Exception as e:
Message1 = update.message.reply_text(
f"I got this error: {e} \n Can you try again?\n\nExample:")
handle_useless_msg(bot, update, [Message.message_id])
Message2 = update.message.reply_text(
f"you + me = ?\nNone\nWe\n2")
handle_useless_msg(
bot, update, [Message1.message_id, Message2.message_id])
def handle_all_msg(bot, update):
new_members = update.message.new_chat_members
if new_members:
lock.acquire()
for user in new_members:
people.update({
user.id: str(maya.now())
})
print(f"{user.id} came to this group")
io.write_settings("people", people)
ask(bot, update)
lock.release()
#left_member = update.message.left_chat_member
kick_them_out_if_possible(bot, update)
handle_useless_msg(bot, update, [update.message.message_id])
def kick_them_out_if_possible(bot, update):
if people != {}:
lock.acquire()
kicked_people = []
for user_id in people:
past = maya.parse(people[user_id])
now = maya.now()
time_passed = (now - past).seconds
logging.debug(
f"how long {user_id} haven't send a message: {str(time_passed)}")
if (time_passed > 60 * 3): # I will give you 3 minutes to answer my question
print(f"{user_id} has to be kicked out")
result = bot.kick_chat_member(update.message.chat_id, user_id)
if result == True:
kicked_people.append(user_id)
else:
bot.leave_chat(update.message.chat_id)
for user_id in kicked_people:
del people[user_id]
io.write_settings("people", people)
lock.release()
def ask(bot, update):
chat_type = update.message.chat.type
if "group" in chat_type:
keyboard = []
for text in data["answer"]:
keyboard.append(
[InlineKeyboardButton(text, callback_data=text)]
)
reply_markup = InlineKeyboardMarkup(keyboard)
Message = update.message.reply_text(
data["question"], reply_markup=reply_markup)
bot.pin_chat_message(update.message.chat_id,
Message.message_id, disable_notification=True)
handle_useless_msg(
bot, update, [update.message.message_id, Message.message_id])
else:
Message = update.message.reply_text(
f"You are not admin!\nAdmin is @yingshaoxo ({master_user_id})\n\nYour user_id is:\n{str(update.message.from_user.id)}")
def button(bot, update):
query = update.callback_query
right_answer = data['answer'][data["right_answer_index"]-1]
if query.data == right_answer:
user_id = query.from_user.id
if user_id in people:
lock.acquire()
del people[user_id]
io.write_settings("people", people)
lock.release()
Message = bot.send_message(
chat_id=query.message.chat_id, text="You're right.\n\nWelcome!")
time.sleep(3)
Message.delete()
else:
try:
if query.from_user.id in people.keys():
kicked_people = []
result = bot.kick_chat_member(
query.message.chat_id, query.from_user.id)
if result == True:
lock.acquire()
kicked_people.append(query.from_user.id)
for user_id in kicked_people:
del people[user_id]
io.write_settings("people", people)
lock.release()
else:
bot.leave_chat(query.message.chat_id)
except Exception as e:
print(e)
def error(bot, update, error):
"""Log Errors caused by Updates."""
print(f"{error}")
def main():
# Create the Updater and pass it your bot's token.
updater = Updater(TOKEN)
updater.dispatcher.add_handler(CommandHandler('set', set))
updater.dispatcher.add_handler(CommandHandler('ask', ask))
updater.dispatcher.add_handler(CommandHandler('clearn', clearn))
updater.dispatcher.add_handler(CallbackQueryHandler(button))
updater.dispatcher.add_handler(
MessageHandler(Filters.text, handle_text_msg))
updater.dispatcher.add_handler(
MessageHandler(Filters.all, handle_all_msg))
updater.dispatcher.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until the user presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT
updater.idle()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import threading
import maya
from auto_everything.base import IO
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler, MessageHandler, Filters
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
import os
import time
"""
export master_user_id=
export telegram_bot_token=
"""
master_user_id = int(os.getenv("master_user_id", "131513300"))
TOKEN = os.getenv("telegram_bot_token", "")
print(master_user_id)
print(TOKEN)
io = IO()
lock = threading.RLock()
ROOT_DIR = "."
logging.basicConfig(filename=os.path.join(ROOT_DIR, "__main.log"),
level=logging.DEBUG, filemode='w', format='%(levelname)s - %(message)s')
data = {
"question": "1+1=? (不回答会被踢出群)",
"answer": [
"2",
"3",
],
"right_answer_index": 1
}
data = io.read_settings("data", data)
waitting_for_master = False
people = {
# chat_id 324253: "maya timestring"
}
people = io.read_settings("people", people)
historical_message = {
"id_list": []
}
historical_message = io.read_settings(
"historical_message", historical_message)
def handle_useless_msg(bot, update, new_msg_id_list):
global historical_message
chat_type = update.message.chat.type
if "group" in chat_type:
logging.debug(f"new_msg_id_list: {new_msg_id_list}")
logging.debug(f"historical_message: {historical_message['id_list']}")
lock.acquire()
historical_message.update(
{"id_list": historical_message["id_list"] + new_msg_id_list}
)
io.write_settings("historical_message", historical_message)
lock.release()
logging.debug(
f"new_historical_message: {historical_message['id_list']}")
def clearn(bot, update):
chat_type = update.message.chat.type
if "group" in chat_type:
if len(historical_message["id_list"]) > 0:
lock.acquire()
for msg_id in historical_message["id_list"]:
try:
bot.delete_message(update.message.chat_id, msg_id)
except Exception as e:
print(e)
historical_message["id_list"] = []
io.write_settings("historical_message", historical_message)
lock.release()
bot.delete_message(update.message.chat_id, update.message.message_id)
def set(bot, update):
global waitting_for_master
if update.message.from_user.id != master_user_id:
Message = update.message.reply_text(
f"You are not admin!\nAdmin is @yingshaoxo ({master_user_id})\n\nYour user_id is:\n{str(update.message.from_user.id)}")
handle_useless_msg(
bot, update, [update.message.message_id, Message.message_id])
else:
Message1 = update.message.reply_text(
f"What's your question? \n\nExample:")
Message2 = update.message.reply_text(
f"you + me = ?\nNone\nWe\n2")
handle_useless_msg(
bot, update, [update.message.message_id, Message1.message_id, Message2.message_id])
if waitting_for_master == False:
waitting_for_master = True
else:
pass
def handle_text_msg(bot, update):
global waitting_for_master
global io
chat_type = update.message.chat.type
logging.debug(f"chat_type is {chat_type}")
if update.message.from_user.id != master_user_id:
if "group" in chat_type:
kick_them_out_if_possible(bot, update)
else:
if "group" in chat_type:
kick_them_out_if_possible(bot, update)
if waitting_for_master == True:
try:
text = update.message.text
text = text.strip()
lines = text.split("\n")
lines = [line.strip() for line in lines if line.strip() != ""]
question = lines[0]
answer = lines[1:-1]
index = int(lines[-1])
if index > len(answer):
Message = update.message.reply_text(
f"The last line should less than or equal to {len(answer)}")
handle_useless_msg(
bot, update, [update.message.message_id, Message.message_id])
raise Exception
lock.acquire()
new_data = {
"question": question + " (不回答会被踢出群)",
"answer": answer,
"right_answer_index": index
}
data.update(new_data)
io.write_settings("data", data)
lock.release()
waitting_for_master = False
Message = update.message.reply_text(
f"OK, I got it!\n\nQuestion: {question}\nAnswer: {answer[index-1]}")
handle_useless_msg(
bot, update, [update.message.message_id, Message.message_id])
except Exception as e:
Message1 = update.message.reply_text(
f"I got this error: {e} \n Can you try again?\n\nExample:")
handle_useless_msg(bot, update, [Message.message_id])
Message2 = update.message.reply_text(
f"you + me = ?\nNone\nWe\n2")
handle_useless_msg(
bot, update, [Message1.message_id, Message2.message_id])
def handle_all_msg(bot, update):
new_members = update.message.new_chat_members
if new_members:
lock.acquire()
for user in new_members:
people.update({
user.id: str(maya.now())
})
print(f"{user.id} came to this group")
io.write_settings("people", people)
ask(bot, update)
lock.release()
#left_member = update.message.left_chat_member
kick_them_out_if_possible(bot, update)
handle_useless_msg(bot, update, [update.message.message_id])
def kick_them_out_if_possible(bot, update):
if people != {}:
lock.acquire()
kicked_people = []
for user_id in people:
past = maya.parse(people[user_id])
now = maya.now()
time_passed = (now - past).seconds
logging.debug(
f"how long {user_id} haven't send a message: {str(time_passed)}")
if (time_passed > 60 * 3): # I will give you 3 minutes to answer my question
print(f"{user_id} has to be kicked out")
result = bot.kick_chat_member(update.message.chat_id, user_id)
if result == True:
kicked_people.append(user_id)
else:
bot.leave_chat(update.message.chat_id)
for user_id in kicked_people:
del people[user_id]
io.write_settings("people", people)
lock.release()
def ask(bot, update):
chat_type = update.message.chat.type
if "group" in chat_type:
keyboard = []
for text in data["answer"]:
keyboard.append(
[InlineKeyboardButton(text, callback_data=text)]
)
reply_markup = InlineKeyboardMarkup(keyboard)
Message = update.message.reply_text(
data["question"], reply_markup=reply_markup)
bot.pin_chat_message(update.message.chat_id,
Message.message_id, disable_notification=True)
handle_useless_msg(
bot, update, [update.message.message_id, Message.message_id])
else:
Message = update.message.reply_text(
f"You are not admin!\nAdmin is @yingshaoxo ({master_user_id})\n\nYour user_id is:\n{str(update.message.from_user.id)}")
def button(bot, update):
query = update.callback_query
right_answer = data['answer'][data["right_answer_index"]-1]
if query.data == right_answer:
user_id = query.from_user.id
if user_id in people:
lock.acquire()
del people[user_id]
io.write_settings("people", people)
lock.release()
Message = bot.send_message(
chat_id=query.message.chat_id, text="You're right.\n\nWelcome!")
time.sleep(3)
Message.delete()
else:
try:
if query.from_user.id in people.keys():
kicked_people = []
result = bot.kick_chat_member(
query.message.chat_id, query.from_user.id)
if result == True:
lock.acquire()
kicked_people.append(query.from_user.id)
for user_id in kicked_people:
del people[user_id]
io.write_settings("people", people)
lock.release()
else:
bot.leave_chat(query.message.chat_id)
except Exception as e:
print(e)
def error(bot, update, error):
"""Log Errors caused by Updates."""
print(f"{error}")
def main():
# Create the Updater and pass it your bot's token.
updater = Updater(TOKEN)
updater.dispatcher.add_handler(CommandHandler('set', set))
updater.dispatcher.add_handler(CommandHandler('ask', ask))
updater.dispatcher.add_handler(CommandHandler('clearn', clearn))
updater.dispatcher.add_handler(CallbackQueryHandler(button))
updater.dispatcher.add_handler(
MessageHandler(Filters.text, handle_text_msg))
updater.dispatcher.add_handler(
MessageHandler(Filters.all, handle_all_msg))
updater.dispatcher.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until the user presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT
updater.idle()
if __name__ == '__main__':
main()
|
# https://nachtimwald.com/2019/11/14/python-self-signed-cert-gen/
import socket
import logging
import datetime
from typing import cast
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from cryptography.x509 import NameOID, ExtensionOID, DNSName, ExtensionNotFound
from feeder import settings
logger = logging.getLogger(__name__)
sans = [
x509.DNSName(socket.getfqdn()),
x509.DNSName(socket.gethostname()),
x509.DNSName(f"{socket.gethostname()}"),
x509.DNSName("localhost"),
x509.DNSName("*.localhost"),
]
if settings.domain:
sans.append(x509.DNSName(settings.domain))
def generate_self_signed_certificate():
one_day = datetime.timedelta(1, 0, 0)
private_key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
public_key = private_key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(
x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, socket.gethostname())])
)
builder = builder.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, socket.gethostname())])
)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + (one_day * 365 * 5))
builder = builder.serial_number(x509.random_serial_number())
builder = builder.public_key(public_key)
logger.debug(
"Adding SANs for %(hostname)s, *.%(hostname)s, localhost, and *.localhost",
{"hostname": socket.gethostname()},
)
builder = builder.add_extension(
x509.SubjectAlternativeName(sans),
critical=False,
)
builder = builder.add_extension(
x509.BasicConstraints(ca=False, path_length=None), critical=True
)
certificate = builder.sign(
private_key=private_key, algorithm=hashes.SHA256(), backend=default_backend()
)
return (
certificate.public_bytes(serialization.Encoding.PEM),
private_key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
),
)
def domain_in_subjects(certificate_path: str, domain: str) -> bool:
with open(certificate_path, "r", encoding="utf-8") as pem_file:
pem_data = pem_file.read().encode("utf-8")
cert = x509.load_pem_x509_certificate(pem_data, default_backend())
try:
extension = cert.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_ALTERNATIVE_NAME
)
ext_value = cast(x509.SubjectAlternativeName, extension.value)
alt_names = ext_value.get_values_for_type(DNSName)
except ExtensionNotFound:
logger.warning(
"Failed to load SAN extension, cannot read certificate SANs!"
)
return False
# Check if domain is in list or parent domain with wildcard.
# Example: domain is pet.domain.com and list has *.domain.com
parent_wildcard = f"*.{".".join(domain.split(".")[1:])}"
return domain in alt_names or parent_wildcard in alt_names
| # https://nachtimwald.com/2019/11/14/python-self-signed-cert-gen/
import socket
import logging
import datetime
from typing import cast
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from cryptography.x509 import NameOID, ExtensionOID, DNSName, ExtensionNotFound
from feeder import settings
logger = logging.getLogger(__name__)
sans = [
x509.DNSName(socket.getfqdn()),
x509.DNSName(socket.gethostname()),
x509.DNSName(f"{socket.gethostname()}"),
x509.DNSName("localhost"),
x509.DNSName("*.localhost"),
]
if settings.domain:
sans.append(x509.DNSName(settings.domain))
def generate_self_signed_certificate():
one_day = datetime.timedelta(1, 0, 0)
private_key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
public_key = private_key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(
x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, socket.gethostname())])
)
builder = builder.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, socket.gethostname())])
)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + (one_day * 365 * 5))
builder = builder.serial_number(x509.random_serial_number())
builder = builder.public_key(public_key)
logger.debug(
"Adding SANs for %(hostname)s, *.%(hostname)s, localhost, and *.localhost",
{"hostname": socket.gethostname()},
)
builder = builder.add_extension(
x509.SubjectAlternativeName(sans),
critical=False,
)
builder = builder.add_extension(
x509.BasicConstraints(ca=False, path_length=None), critical=True
)
certificate = builder.sign(
private_key=private_key, algorithm=hashes.SHA256(), backend=default_backend()
)
return (
certificate.public_bytes(serialization.Encoding.PEM),
private_key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
),
)
def domain_in_subjects(certificate_path: str, domain: str) -> bool:
with open(certificate_path, "r", encoding="utf-8") as pem_file:
pem_data = pem_file.read().encode("utf-8")
cert = x509.load_pem_x509_certificate(pem_data, default_backend())
try:
extension = cert.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_ALTERNATIVE_NAME
)
ext_value = cast(x509.SubjectAlternativeName, extension.value)
alt_names = ext_value.get_values_for_type(DNSName)
except ExtensionNotFound:
logger.warning(
"Failed to load SAN extension, cannot read certificate SANs!"
)
return False
# Check if domain is in list or parent domain with wildcard.
# Example: domain is pet.domain.com and list has *.domain.com
parent_wildcard = f"*.{'.'.join(domain.split('.')[1:])}"
return domain in alt_names or parent_wildcard in alt_names
|
import datetime
from typing import Optional
from BinanceWatch.storage.DataBase import DataBase, SQLConditionEnum
from BinanceWatch.storage import tables
from BinanceWatch.utils.time_utils import datetime_to_millistamp
class BinanceDataBase(DataBase):
"""
Handles the recording of the binance account in a local database
"""
def __init__(self, name: str = 'binance_db'):
super().__init__(name)
def add_universal_transfer(self, transfer_id: int, transfer_type: str, transfer_time: int, asset: str,
amount: float, auto_commit: bool = True):
"""
add a universal transfer to the database
:param transfer_id: id of the transfer
:type transfer_id: int
:param transfer_type: enum of the transfer type (ex: 'MAIN_MARGIN')
:type transfer_type: str
:param transfer_time: millistamp of the operation
:type transfer_time: int
:param asset: asset that got transferred
:type asset: str
:param amount: amount transferred
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
table = tables.UNIVERSAL_TRANSFER_TABLE
row = (transfer_id, transfer_type, transfer_time, asset, amount)
self.add_row(table, row, auto_commit=auto_commit)
def get_universal_transfers(self, transfer_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
"""
return universal transfers stored in the database. Transfer type, Asset type and time filters can be used
:param transfer_type: enum of the transfer type (ex: 'MAIN_MARGIN')
:type transfer_type: Optional[str]
:param asset: fetch only interests in this asset
:type asset: Optional[str]
:param start_time: fetch only interests after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only interests before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(1206491332, # transfer id
'MAIN_MARGIN', # transfer type
1589121841000, # time
'BNB', # asset
10.594112), # amount
]
"""
table = tables.UNIVERSAL_TRANSFER_TABLE
conditions_list = []
if transfer_type is not None:
conditions_list.append((table.trfType,
SQLConditionEnum.equal,
transfer_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.trfTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.trfTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_universal_transfer_time(self, transfer_type: str) -> int:
"""
return the latest time when a universal transfer was made
If None, return the millistamp corresponding to 2017/01/01
:param transfer_type: enum of the transfer type (ex: 'MAIN_MARGIN')
:type transfer_type: str
:return: millistamp
:rtype: int
"""
table = tables.UNIVERSAL_TRANSFER_TABLE
conditions_list = [(table.trfType,
SQLConditionEnum.equal,
transfer_type)]
selection = f"MAX({table.trfTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_margin_interest(self, margin_type: str, interest_time: int, asset: str, interest: float,
interest_type: str, auto_commit: bool = True):
"""
add a repay to the database
:param margin_type: either 'cross' or 'isolated'
:type margin_type: str
:param interest_time: millistamp of the operation
:type interest_time: int
:param asset: asset that got repaid
:type asset: str
:param interest: amount of interest accrued
:type interest: float
:param interest_type: one of (PERIODIC, ON_BORROW, PERIODIC_CONVERTED, ON_BORROW_CONVERTED)
:type interest_type: str
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_INTEREST_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
row = (interest_time, asset, interest, interest_type)
self.add_row(table, row, auto_commit=auto_commit)
def get_margin_interests(self, margin_type: str, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return margin interests stored in the database. Asset type and time filters can be used
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param asset: fetch only interests in this asset
:type asset: Optional[str]
:param start_time: fetch only interests after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only interests before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
1559415215400, # time
'BNB', # asset
0.51561, # interest
'PERIODIC_CONVERTED'), # interest type
]
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_INTEREST_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_margin_interest_time(self, margin_type: str, asset: Optional[str] = None):
"""
return the latest time when a margin interest was accured on a defined asset or on all assets
If None, return the millistamp corresponding to 2017/01/01
:param asset: name of the asset charged as interest
:type asset: Optional[str]
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:return: millistamp
:rtype: int
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_INTEREST_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list = [(table.asset,
SQLConditionEnum.equal,
asset)]
selection = f"MAX({table.interestTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_repay(self, margin_type: str, tx_id: int, repay_time: int, asset: str, principal: float,
interest: float, auto_commit: bool = True):
"""
add a repay to the database
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param tx_id: binance id for the transaction (uniqueness?)
:type tx_id: int
:param repay_time: millitstamp of the operation
:type repay_time: int
:param asset: asset that got repaid
:type asset: str
:param principal: principal amount repaid for the loan
:type principal: float
:param interest: amount of interest repaid for the loan
:type interest:
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_REPAY_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
row = (tx_id, repay_time, asset, principal, interest)
self.add_row(table, row, auto_commit=auto_commit)
def get_repays(self, margin_type: str, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return repays stored in the database. Asset type and time filters can be used
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param asset: fetch only repays of this asset
:type asset: Optional[str]
:param start_time: fetch only repays after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only repays before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(8289451654, # transaction id
1559415215400, # time
'USDT', # asset
145.5491462, # principal
0.51561), # interest
]
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_REPAY_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.repayTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.repayTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_repay_time(self, asset: str, margin_type: str) -> int:
"""
return the latest time when a repay was made on a defined asset
If None, return the millistamp corresponding to 2017/01/01
:param asset: name of the asset repaid
:type asset: str
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:return: millistamp
:rtype: int
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_REPAY_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = [(table.asset,
SQLConditionEnum.equal,
asset)]
selection = f"MAX({table.repayTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_loan(self, margin_type: str, tx_id: int, loan_time: int, asset: str, principal: float,
auto_commit: bool = True):
"""
add a loan to the database
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param tx_id: binance id for the transaction (uniqueness?)
:type tx_id: int
:param loan_time: millitstamp of the operation
:type loan_time: int
:param asset: asset that got loaned
:type asset: str
:param principal: amount of loaned asset
:type principal: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_LOAN_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
row = (tx_id, loan_time, asset, principal)
self.add_row(table, row, auto_commit=auto_commit)
def get_loans(self, margin_type: str, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return loans stored in the database. Asset type and time filters can be used
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param asset: fetch only loans of this asset
:type asset: Optional[str]
:param start_time: fetch only loans after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only loans before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(8289451654, # transaction id
1559415215400, # time
'USDT', # asset
145.5491462), # amount
]
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_LOAN_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.loanTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.loanTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_loan_time(self, asset: str, margin_type: str) -> int:
"""
return the latest time when an loan was made on a defined asset
If None, return the millistamp corresponding to 2017/01/01
:param asset: name of the asset loaned
:type asset: str
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:return: millistamp
:rtype: int
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_LOAN_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = [(table.asset,
SQLConditionEnum.equal,
asset)]
selection = f"MAX({table.loanTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_lending_redemption(self, redemption_time: int, lending_type: str, asset: str, amount: float,
auto_commit: bool = True):
"""
add a lending redemption to the database
:param redemption_time: millitstamp of the operation
:type redemption_time: int
:param lending_type: either 'DAILY', 'ACTIVITY' or 'CUSTOMIZED_FIXED'
:type lending_type: str
:param asset: asset lent
:type asset: str
:param amount: amount of asset redeemed
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (redemption_time, lending_type, asset, amount)
self.add_row(tables.LENDING_REDEMPTION_TABLE, row, auto_commit=auto_commit)
def get_lending_redemptions(self, lending_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
"""
return lending redemptions stored in the database. Asset type and time filters can be used
:param lending_type: fetch only redemptions from this lending type
:type lending_type: Optional[str]
:param asset: fetch only redemptions from this asset
:type asset: Optional[str]
:param start_time: fetch only redemptions after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only redemptions before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
1612841562000, # time
'DAILY', # lending type
'LTC', # asset
1.89151684), # amount
]
"""
conditions_list = []
table = tables.LENDING_REDEMPTION_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.redemptionTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.redemptionTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_lending_redemption_time(self, lending_type: Optional[str] = None) -> int:
"""
return the latest time when an lending redemption was made.
If None, return the millistamp corresponding to 2017/01/01
:param lending_type: type of lending
:type lending_type: str
:return: millistamp
:rtype: int
"""
conditions_list = []
table = tables.LENDING_REDEMPTION_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
selection = f"MAX({table.redemptionTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_lending_purchase(self, purchase_id: int, purchase_time: int, lending_type: str, asset: str, amount: float,
auto_commit: bool = True):
"""
add a lending purchase to the database
:param purchase_id: id of the purchase
:type purchase_id: int
:param purchase_time: millitstamp of the operation
:type purchase_time: int
:param lending_type: either 'DAILY', 'ACTIVITY' or 'CUSTOMIZED_FIXED'
:type lending_type: str
:param asset: asset lent
:type asset: str
:param amount: amount of asset lent
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (purchase_id, purchase_time, lending_type, asset, amount)
self.add_row(tables.LENDING_PURCHASE_TABLE, row, auto_commit=auto_commit)
def get_lending_purchases(self, lending_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
"""
return lending purchases stored in the database. Asset type and time filters can be used
:param lending_type: fetch only purchases from this lending type
:type lending_type: Optional[str]
:param asset: fetch only purchases from this asset
:type asset: Optional[str]
:param start_time: fetch only purchases after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only purchases before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(58516828, # purchase id
1612841562000, # time
'DAILY', # lending type
'LTC', # asset
1.89151684), # amount
]
"""
conditions_list = []
table = tables.LENDING_PURCHASE_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.purchaseTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.purchaseTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_lending_purchase_time(self, lending_type: Optional[str] = None) -> int:
"""
return the latest time when an lending purchase was made.
If None, return the millistamp corresponding to 2017/01/01
:param lending_type: type of lending
:type lending_type: str
:return: millistamp
:rtype: int
"""
conditions_list = []
table = tables.LENDING_PURCHASE_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
selection = f"MAX({table.purchaseTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_lending_interest(self, time: int, lending_type: str, asset: str, amount: float,
auto_commit: bool = True):
"""
add an lending interest to the database
:param time: millitstamp of the operation
:type time: int
:param lending_type: either 'DAILY', 'ACTIVITY' or 'CUSTOMIZED_FIXED'
:type lending_type: str
:param asset: asset that was received
:type asset: str
:param amount: amount of asset received
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (time, lending_type, asset, amount)
self.add_row(tables.LENDING_INTEREST_TABLE, row, auto_commit=auto_commit)
def get_lending_interests(self, lending_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
"""
return lending interests stored in the database. Asset type and time filters can be used
:param lending_type: fetch only interests from this lending type
:type lending_type: Optional[str]
:param asset: fetch only interests from this asset
:type asset: Optional[str]
:param start_time: fetch only interests after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only interests before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(1619846515000, # time
'DAILY', # lending type
'DOT', # asset
0.00490156) # amount
]
"""
conditions_list = []
table = tables.LENDING_INTEREST_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_lending_interest_time(self, lending_type: Optional[str] = None) -> int:
"""
return the latest time when an interest was received.
If None, return the millistamp corresponding to 2017/01/01
:param lending_type: type of lending
:type lending_type: str
:return: millistamp
:rtype: int
"""
conditions_list = []
table = tables.LENDING_INTEREST_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
selection = f"MAX({table.interestTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_dust(self, tran_id: str, time: int, asset: str, asset_amount: float, bnb_amount: float, bnb_fee: float,
auto_commit: bool = True):
"""
add dust operation to the database
:param tran_id: id of the transaction (non unique)
:type tran_id: str
:param time: millitstamp of the operation
:type time: int
:param asset: asset that got converted to BNB
:type asset: str
:param asset_amount: amount of asset that got converted
:type asset_amount: float
:param bnb_amount: amount received from the conversion
:type bnb_amount: float
:param bnb_fee: fee amount in BNB
:type bnb_fee: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (tran_id, time, asset, asset_amount, bnb_amount, bnb_fee)
self.add_row(tables.SPOT_DUST_TABLE, row, auto_commit=auto_commit)
def get_spot_dusts(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return dusts stored in the database. Asset type and time filters can be used
:param asset: fetch only dusts from this asset
:type asset: Optional[str]
:param start_time: fetch only dusts after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only dusts before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(82156485284, # transaction id
1605489113400, # time
'TRX', # asset
102.78415879, # asset amount
0.09084498, # bnb amount
0.00171514), # bnb fee
]
"""
conditions_list = []
table = tables.SPOT_DUST_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.dustTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.dustTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def add_dividend(self, div_id: int, div_time: int, asset: str, amount: float, auto_commit: bool = True):
"""
add a dividend to the database
:param div_id: dividend id
:type div_id: int
:param div_time: millistamp of dividend reception
:type div_time: int
:param asset: name of the dividend unit
:type asset: str
:param amount: amount of asset distributed
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (div_id, div_time, asset, amount)
self.add_row(tables.SPOT_DIVIDEND_TABLE, row, auto_commit=auto_commit)
def get_spot_dividends(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return dividends stored in the database. Asset type and time filters can be used
:param asset: fetch only dividends of this asset
:type asset: Optional[str]
:param start_time: fetch only dividends after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only dividends before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(8945138941, # dividend id
1594513589000, # time
'TRX', # asset
0.18745654), # amount
]
"""
conditions_list = []
table = tables.SPOT_DIVIDEND_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.divTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.divTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_spot_dividend_time(self) -> int:
"""
fetch the latest time a dividend has been distributed on the spot account. If None is found,
return the millistamp corresponding to 2017/1/1
:return:
"""
table = tables.SPOT_DIVIDEND_TABLE
selection = f"MAX({table.divTime})"
result = self.get_conditions_rows(table,
selection=selection)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_withdraw(self, withdraw_id: str, tx_id: str, apply_time: int, asset: str, amount: float, fee: float,
auto_commit: bool = True):
"""
add a withdraw to the database
:param withdraw_id: binance if of the withdraw
:type withdraw_id: str
:param tx_id: transaction id
:type tx_id: str
:param apply_time: millistamp when the withdraw was requested
:type apply_time: int
:param asset: name of the token
:type asset: str
:param amount: amount of token withdrawn
:type amount: float
:param fee: amount of the asset paid for the withdraw
:type fee: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (withdraw_id, tx_id, apply_time, asset, amount, fee)
self.add_row(tables.SPOT_WITHDRAW_TABLE, row, auto_commit=auto_commit)
def get_spot_withdraws(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return withdraws stored in the database. Asset type and time filters can be used
:param asset: fetch only withdraws of this asset
:type asset: Optional[str]
:param start_time: fetch only withdraws after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only withdraws before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
('84984dcqq5z11gyjfa', # withdraw id
'aazd8949vredqs56dz', # transaction id
1599138389000, # withdraw time
'XTZ', # asset
57.0194, # amount
0.5), # fee
]
"""
conditions_list = []
table = tables.SPOT_WITHDRAW_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.applyTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.applyTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_spot_withdraw_time(self) -> int:
"""
fetch the latest time a withdraw has been made on the spot account. If None is found, return the millistamp
corresponding to 2017/1/1
:return:
"""
table = tables.SPOT_WITHDRAW_TABLE
selection = f"MAX({table.applyTime})"
result = self.get_conditions_rows(table,
selection=selection)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_deposit(self, tx_id: str, insert_time: int, amount: float, asset: str, auto_commit=True):
"""
add a deposit to the database
:param tx_id: transaction id
:type tx_id: str
:param insert_time: millistamp when the deposit arrived on binance
:type insert_time: int
:param amount: amount of token deposited
:type amount: float
:param asset: name of the token
:type asset: str
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (tx_id, insert_time, asset, amount)
self.add_row(tables.SPOT_DEPOSIT_TABLE, row, auto_commit)
def get_spot_deposits(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return deposits stored in the database. Asset type and time filters can be used
:param asset: fetch only deposits of this asset
:type asset: Optional[str]
:param start_time: fetch only deposits after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only deposits before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
('azdf5e6a1d5z', # transaction id
1589479004000, # deposit time
'LTC', # asset
14.25), # amount
]
"""
conditions_list = []
table = tables.SPOT_DEPOSIT_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.insertTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.insertTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_spot_deposit_time(self) -> int:
"""
fetch the latest time a deposit has been made on the spot account. If None is found, return the millistamp
corresponding to 2017/1/1
:return: last deposit millistamp
:rtype: int
"""
table = tables.SPOT_DEPOSIT_TABLE
selection = f"MAX({table.insertTime})"
result = self.get_conditions_rows(table,
selection=selection)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_trade(self, trade_type: str, trade_id: int, trade_time: int, asset: str, ref_asset: str, qty: float,
price: float, fee: float, fee_asset: str, is_buyer: bool, auto_commit=True):
"""
add a trade to the database
:param trade_type: type trade executed
:type trade_type: string, must be one of {'spot', 'cross_margin'}
:param trade_id: id of the trade (binance id, unique per trading pair)
:type trade_id: int
:param trade_time: millistamp of the trade
:type trade_time: int
:param asset: name of the asset in the trading pair (ex 'BTC' for 'BTCUSDT')
:type asset: string
:param ref_asset: name of the reference asset in the trading pair (ex 'USDT' for 'BTCUSDT')
:type ref_asset: string
:param qty: quantity of asset exchanged
:type qty: float
:param price: price of the asset regarding the ref_asset
:type price: float
:param fee: amount kept by the exchange
:type fee: float
:param fee_asset: token unit for the fee
:type fee_asset: str
:param is_buyer: if the trade is a buy or a sell
:type is_buyer: bool
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (trade_id, trade_time, asset, ref_asset, qty, price, fee, fee_asset, int(is_buyer))
if trade_type == 'spot':
table = tables.SPOT_TRADE_TABLE
elif trade_type == 'cross_margin':
table = tables.CROSS_MARGIN_TRADE_TABLE
else:
raise ValueError(f"trade type should be one of ('spot', 'cross_margin') but {trade_type} was received")
self.add_row(table, row, auto_commit)
def get_trades(self, trade_type: str, start_time: Optional[int] = None, end_time: Optional[int] = None,
asset: Optional[str] = None, ref_asset: Optional[str] = None):
"""
return trades stored in the database. asset type, ref_asset type and time filters can be used
:param trade_type: type trade executed
:type trade_type: string, must be one of ('spot', 'cross_margin')
:param start_time: fetch only trades after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only trades before this millistamp
:type end_time: Optional[int]
:param asset: fetch only trades with this asset
:type asset: Optional[str]
:param ref_asset: fetch only trades with this ref_asset
:type ref_asset: Optional[str]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(384518832, # trade_id
1582892988052, # trade time
'BTC', # asset
'USDT', # ref asset
0.0015, # asset quantity
9011.2, # asset price to ref asset
0.01425, # fee
'USDT', # fee asset
0), # is_buyer
]
"""
if trade_type == 'spot':
table = tables.SPOT_TRADE_TABLE
elif trade_type == 'cross_margin':
table = tables.CROSS_MARGIN_TRADE_TABLE
else:
raise ValueError(f"trade type should be one of ('spot', 'cross_margin') but {trade_type} was received")
conditions_list = []
if start_time is not None:
conditions_list.append((table.tdTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.tdTime,
SQLConditionEnum.lower,
end_time))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if ref_asset is not None:
conditions_list.append((table.refAsset,
SQLConditionEnum.equal,
ref_asset))
return self.get_conditions_rows(table, conditions_list=conditions_list, order_list=[table.tdTime])
def get_max_trade_id(self, asset: str, ref_asset: str, trade_type: str) -> int:
"""
return the latest trade id for a trading pair. If none is found, return -1
:param asset: name of the asset in the trading pair (ex 'BTC' for 'BTCUSDT')
:type asset: string
:param ref_asset: name of the reference asset in the trading pair (ex 'USDT' for 'BTCUSDT')
:type ref_asset: string
:param trade_type: type trade executed
:type trade_type: string, must be one of {'spot', 'cross_margin'}
:return: latest trade id
:rtype: int
"""
if trade_type == 'spot':
table = tables.SPOT_TRADE_TABLE
elif trade_type == 'cross_margin':
table = tables.CROSS_MARGIN_TRADE_TABLE
else:
raise ValueError(f"trade type should be one of {"spot", "cross_margin"} but {trade_type} was received")
selection = f"MAX({table.tradeId})"
conditions_list = [
(table.asset,
SQLConditionEnum.equal,
asset),
(table.refAsset,
SQLConditionEnum.equal,
ref_asset)
]
result = self.get_conditions_rows(table, selection=selection, conditions_list=conditions_list)
try:
result = result[0][0]
except IndexError:
return -1
if result is None:
return -1
return result
| import datetime
from typing import Optional
from BinanceWatch.storage.DataBase import DataBase, SQLConditionEnum
from BinanceWatch.storage import tables
from BinanceWatch.utils.time_utils import datetime_to_millistamp
class BinanceDataBase(DataBase):
"""
Handles the recording of the binance account in a local database
"""
def __init__(self, name: str = 'binance_db'):
super().__init__(name)
def add_universal_transfer(self, transfer_id: int, transfer_type: str, transfer_time: int, asset: str,
amount: float, auto_commit: bool = True):
"""
add a universal transfer to the database
:param transfer_id: id of the transfer
:type transfer_id: int
:param transfer_type: enum of the transfer type (ex: 'MAIN_MARGIN')
:type transfer_type: str
:param transfer_time: millistamp of the operation
:type transfer_time: int
:param asset: asset that got transferred
:type asset: str
:param amount: amount transferred
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
table = tables.UNIVERSAL_TRANSFER_TABLE
row = (transfer_id, transfer_type, transfer_time, asset, amount)
self.add_row(table, row, auto_commit=auto_commit)
def get_universal_transfers(self, transfer_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
"""
return universal transfers stored in the database. Transfer type, Asset type and time filters can be used
:param transfer_type: enum of the transfer type (ex: 'MAIN_MARGIN')
:type transfer_type: Optional[str]
:param asset: fetch only interests in this asset
:type asset: Optional[str]
:param start_time: fetch only interests after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only interests before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(1206491332, # transfer id
'MAIN_MARGIN', # transfer type
1589121841000, # time
'BNB', # asset
10.594112), # amount
]
"""
table = tables.UNIVERSAL_TRANSFER_TABLE
conditions_list = []
if transfer_type is not None:
conditions_list.append((table.trfType,
SQLConditionEnum.equal,
transfer_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.trfTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.trfTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_universal_transfer_time(self, transfer_type: str) -> int:
"""
return the latest time when a universal transfer was made
If None, return the millistamp corresponding to 2017/01/01
:param transfer_type: enum of the transfer type (ex: 'MAIN_MARGIN')
:type transfer_type: str
:return: millistamp
:rtype: int
"""
table = tables.UNIVERSAL_TRANSFER_TABLE
conditions_list = [(table.trfType,
SQLConditionEnum.equal,
transfer_type)]
selection = f"MAX({table.trfTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_margin_interest(self, margin_type: str, interest_time: int, asset: str, interest: float,
interest_type: str, auto_commit: bool = True):
"""
add a repay to the database
:param margin_type: either 'cross' or 'isolated'
:type margin_type: str
:param interest_time: millistamp of the operation
:type interest_time: int
:param asset: asset that got repaid
:type asset: str
:param interest: amount of interest accrued
:type interest: float
:param interest_type: one of (PERIODIC, ON_BORROW, PERIODIC_CONVERTED, ON_BORROW_CONVERTED)
:type interest_type: str
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_INTEREST_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
row = (interest_time, asset, interest, interest_type)
self.add_row(table, row, auto_commit=auto_commit)
def get_margin_interests(self, margin_type: str, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return margin interests stored in the database. Asset type and time filters can be used
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param asset: fetch only interests in this asset
:type asset: Optional[str]
:param start_time: fetch only interests after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only interests before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
1559415215400, # time
'BNB', # asset
0.51561, # interest
'PERIODIC_CONVERTED'), # interest type
]
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_INTEREST_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_margin_interest_time(self, margin_type: str, asset: Optional[str] = None):
"""
return the latest time when a margin interest was accured on a defined asset or on all assets
If None, return the millistamp corresponding to 2017/01/01
:param asset: name of the asset charged as interest
:type asset: Optional[str]
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:return: millistamp
:rtype: int
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_INTEREST_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list = [(table.asset,
SQLConditionEnum.equal,
asset)]
selection = f"MAX({table.interestTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_repay(self, margin_type: str, tx_id: int, repay_time: int, asset: str, principal: float,
interest: float, auto_commit: bool = True):
"""
add a repay to the database
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param tx_id: binance id for the transaction (uniqueness?)
:type tx_id: int
:param repay_time: millitstamp of the operation
:type repay_time: int
:param asset: asset that got repaid
:type asset: str
:param principal: principal amount repaid for the loan
:type principal: float
:param interest: amount of interest repaid for the loan
:type interest:
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_REPAY_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
row = (tx_id, repay_time, asset, principal, interest)
self.add_row(table, row, auto_commit=auto_commit)
def get_repays(self, margin_type: str, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return repays stored in the database. Asset type and time filters can be used
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param asset: fetch only repays of this asset
:type asset: Optional[str]
:param start_time: fetch only repays after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only repays before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(8289451654, # transaction id
1559415215400, # time
'USDT', # asset
145.5491462, # principal
0.51561), # interest
]
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_REPAY_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.repayTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.repayTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_repay_time(self, asset: str, margin_type: str) -> int:
"""
return the latest time when a repay was made on a defined asset
If None, return the millistamp corresponding to 2017/01/01
:param asset: name of the asset repaid
:type asset: str
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:return: millistamp
:rtype: int
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_REPAY_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = [(table.asset,
SQLConditionEnum.equal,
asset)]
selection = f"MAX({table.repayTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_loan(self, margin_type: str, tx_id: int, loan_time: int, asset: str, principal: float,
auto_commit: bool = True):
"""
add a loan to the database
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param tx_id: binance id for the transaction (uniqueness?)
:type tx_id: int
:param loan_time: millitstamp of the operation
:type loan_time: int
:param asset: asset that got loaned
:type asset: str
:param principal: amount of loaned asset
:type principal: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_LOAN_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
row = (tx_id, loan_time, asset, principal)
self.add_row(table, row, auto_commit=auto_commit)
def get_loans(self, margin_type: str, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return loans stored in the database. Asset type and time filters can be used
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param asset: fetch only loans of this asset
:type asset: Optional[str]
:param start_time: fetch only loans after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only loans before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(8289451654, # transaction id
1559415215400, # time
'USDT', # asset
145.5491462), # amount
]
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_LOAN_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.loanTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.loanTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_loan_time(self, asset: str, margin_type: str) -> int:
"""
return the latest time when an loan was made on a defined asset
If None, return the millistamp corresponding to 2017/01/01
:param asset: name of the asset loaned
:type asset: str
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:return: millistamp
:rtype: int
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_LOAN_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = [(table.asset,
SQLConditionEnum.equal,
asset)]
selection = f"MAX({table.loanTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_lending_redemption(self, redemption_time: int, lending_type: str, asset: str, amount: float,
auto_commit: bool = True):
"""
add a lending redemption to the database
:param redemption_time: millitstamp of the operation
:type redemption_time: int
:param lending_type: either 'DAILY', 'ACTIVITY' or 'CUSTOMIZED_FIXED'
:type lending_type: str
:param asset: asset lent
:type asset: str
:param amount: amount of asset redeemed
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (redemption_time, lending_type, asset, amount)
self.add_row(tables.LENDING_REDEMPTION_TABLE, row, auto_commit=auto_commit)
def get_lending_redemptions(self, lending_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
"""
return lending redemptions stored in the database. Asset type and time filters can be used
:param lending_type: fetch only redemptions from this lending type
:type lending_type: Optional[str]
:param asset: fetch only redemptions from this asset
:type asset: Optional[str]
:param start_time: fetch only redemptions after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only redemptions before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
1612841562000, # time
'DAILY', # lending type
'LTC', # asset
1.89151684), # amount
]
"""
conditions_list = []
table = tables.LENDING_REDEMPTION_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.redemptionTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.redemptionTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_lending_redemption_time(self, lending_type: Optional[str] = None) -> int:
"""
return the latest time when an lending redemption was made.
If None, return the millistamp corresponding to 2017/01/01
:param lending_type: type of lending
:type lending_type: str
:return: millistamp
:rtype: int
"""
conditions_list = []
table = tables.LENDING_REDEMPTION_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
selection = f"MAX({table.redemptionTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_lending_purchase(self, purchase_id: int, purchase_time: int, lending_type: str, asset: str, amount: float,
auto_commit: bool = True):
"""
add a lending purchase to the database
:param purchase_id: id of the purchase
:type purchase_id: int
:param purchase_time: millitstamp of the operation
:type purchase_time: int
:param lending_type: either 'DAILY', 'ACTIVITY' or 'CUSTOMIZED_FIXED'
:type lending_type: str
:param asset: asset lent
:type asset: str
:param amount: amount of asset lent
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (purchase_id, purchase_time, lending_type, asset, amount)
self.add_row(tables.LENDING_PURCHASE_TABLE, row, auto_commit=auto_commit)
def get_lending_purchases(self, lending_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
"""
return lending purchases stored in the database. Asset type and time filters can be used
:param lending_type: fetch only purchases from this lending type
:type lending_type: Optional[str]
:param asset: fetch only purchases from this asset
:type asset: Optional[str]
:param start_time: fetch only purchases after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only purchases before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(58516828, # purchase id
1612841562000, # time
'DAILY', # lending type
'LTC', # asset
1.89151684), # amount
]
"""
conditions_list = []
table = tables.LENDING_PURCHASE_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.purchaseTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.purchaseTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_lending_purchase_time(self, lending_type: Optional[str] = None) -> int:
"""
return the latest time when an lending purchase was made.
If None, return the millistamp corresponding to 2017/01/01
:param lending_type: type of lending
:type lending_type: str
:return: millistamp
:rtype: int
"""
conditions_list = []
table = tables.LENDING_PURCHASE_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
selection = f"MAX({table.purchaseTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_lending_interest(self, time: int, lending_type: str, asset: str, amount: float,
auto_commit: bool = True):
"""
add an lending interest to the database
:param time: millitstamp of the operation
:type time: int
:param lending_type: either 'DAILY', 'ACTIVITY' or 'CUSTOMIZED_FIXED'
:type lending_type: str
:param asset: asset that was received
:type asset: str
:param amount: amount of asset received
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (time, lending_type, asset, amount)
self.add_row(tables.LENDING_INTEREST_TABLE, row, auto_commit=auto_commit)
def get_lending_interests(self, lending_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
"""
return lending interests stored in the database. Asset type and time filters can be used
:param lending_type: fetch only interests from this lending type
:type lending_type: Optional[str]
:param asset: fetch only interests from this asset
:type asset: Optional[str]
:param start_time: fetch only interests after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only interests before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(1619846515000, # time
'DAILY', # lending type
'DOT', # asset
0.00490156) # amount
]
"""
conditions_list = []
table = tables.LENDING_INTEREST_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_lending_interest_time(self, lending_type: Optional[str] = None) -> int:
"""
return the latest time when an interest was received.
If None, return the millistamp corresponding to 2017/01/01
:param lending_type: type of lending
:type lending_type: str
:return: millistamp
:rtype: int
"""
conditions_list = []
table = tables.LENDING_INTEREST_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
selection = f"MAX({table.interestTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_dust(self, tran_id: str, time: int, asset: str, asset_amount: float, bnb_amount: float, bnb_fee: float,
auto_commit: bool = True):
"""
add dust operation to the database
:param tran_id: id of the transaction (non unique)
:type tran_id: str
:param time: millitstamp of the operation
:type time: int
:param asset: asset that got converted to BNB
:type asset: str
:param asset_amount: amount of asset that got converted
:type asset_amount: float
:param bnb_amount: amount received from the conversion
:type bnb_amount: float
:param bnb_fee: fee amount in BNB
:type bnb_fee: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (tran_id, time, asset, asset_amount, bnb_amount, bnb_fee)
self.add_row(tables.SPOT_DUST_TABLE, row, auto_commit=auto_commit)
def get_spot_dusts(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return dusts stored in the database. Asset type and time filters can be used
:param asset: fetch only dusts from this asset
:type asset: Optional[str]
:param start_time: fetch only dusts after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only dusts before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(82156485284, # transaction id
1605489113400, # time
'TRX', # asset
102.78415879, # asset amount
0.09084498, # bnb amount
0.00171514), # bnb fee
]
"""
conditions_list = []
table = tables.SPOT_DUST_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.dustTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.dustTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def add_dividend(self, div_id: int, div_time: int, asset: str, amount: float, auto_commit: bool = True):
"""
add a dividend to the database
:param div_id: dividend id
:type div_id: int
:param div_time: millistamp of dividend reception
:type div_time: int
:param asset: name of the dividend unit
:type asset: str
:param amount: amount of asset distributed
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (div_id, div_time, asset, amount)
self.add_row(tables.SPOT_DIVIDEND_TABLE, row, auto_commit=auto_commit)
def get_spot_dividends(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return dividends stored in the database. Asset type and time filters can be used
:param asset: fetch only dividends of this asset
:type asset: Optional[str]
:param start_time: fetch only dividends after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only dividends before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(8945138941, # dividend id
1594513589000, # time
'TRX', # asset
0.18745654), # amount
]
"""
conditions_list = []
table = tables.SPOT_DIVIDEND_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.divTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.divTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_spot_dividend_time(self) -> int:
"""
fetch the latest time a dividend has been distributed on the spot account. If None is found,
return the millistamp corresponding to 2017/1/1
:return:
"""
table = tables.SPOT_DIVIDEND_TABLE
selection = f"MAX({table.divTime})"
result = self.get_conditions_rows(table,
selection=selection)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_withdraw(self, withdraw_id: str, tx_id: str, apply_time: int, asset: str, amount: float, fee: float,
auto_commit: bool = True):
"""
add a withdraw to the database
:param withdraw_id: binance if of the withdraw
:type withdraw_id: str
:param tx_id: transaction id
:type tx_id: str
:param apply_time: millistamp when the withdraw was requested
:type apply_time: int
:param asset: name of the token
:type asset: str
:param amount: amount of token withdrawn
:type amount: float
:param fee: amount of the asset paid for the withdraw
:type fee: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (withdraw_id, tx_id, apply_time, asset, amount, fee)
self.add_row(tables.SPOT_WITHDRAW_TABLE, row, auto_commit=auto_commit)
def get_spot_withdraws(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return withdraws stored in the database. Asset type and time filters can be used
:param asset: fetch only withdraws of this asset
:type asset: Optional[str]
:param start_time: fetch only withdraws after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only withdraws before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
('84984dcqq5z11gyjfa', # withdraw id
'aazd8949vredqs56dz', # transaction id
1599138389000, # withdraw time
'XTZ', # asset
57.0194, # amount
0.5), # fee
]
"""
conditions_list = []
table = tables.SPOT_WITHDRAW_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.applyTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.applyTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_spot_withdraw_time(self) -> int:
"""
fetch the latest time a withdraw has been made on the spot account. If None is found, return the millistamp
corresponding to 2017/1/1
:return:
"""
table = tables.SPOT_WITHDRAW_TABLE
selection = f"MAX({table.applyTime})"
result = self.get_conditions_rows(table,
selection=selection)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_deposit(self, tx_id: str, insert_time: int, amount: float, asset: str, auto_commit=True):
"""
add a deposit to the database
:param tx_id: transaction id
:type tx_id: str
:param insert_time: millistamp when the deposit arrived on binance
:type insert_time: int
:param amount: amount of token deposited
:type amount: float
:param asset: name of the token
:type asset: str
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (tx_id, insert_time, asset, amount)
self.add_row(tables.SPOT_DEPOSIT_TABLE, row, auto_commit)
def get_spot_deposits(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return deposits stored in the database. Asset type and time filters can be used
:param asset: fetch only deposits of this asset
:type asset: Optional[str]
:param start_time: fetch only deposits after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only deposits before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
('azdf5e6a1d5z', # transaction id
1589479004000, # deposit time
'LTC', # asset
14.25), # amount
]
"""
conditions_list = []
table = tables.SPOT_DEPOSIT_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.insertTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.insertTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_spot_deposit_time(self) -> int:
"""
fetch the latest time a deposit has been made on the spot account. If None is found, return the millistamp
corresponding to 2017/1/1
:return: last deposit millistamp
:rtype: int
"""
table = tables.SPOT_DEPOSIT_TABLE
selection = f"MAX({table.insertTime})"
result = self.get_conditions_rows(table,
selection=selection)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_trade(self, trade_type: str, trade_id: int, trade_time: int, asset: str, ref_asset: str, qty: float,
price: float, fee: float, fee_asset: str, is_buyer: bool, auto_commit=True):
"""
add a trade to the database
:param trade_type: type trade executed
:type trade_type: string, must be one of {'spot', 'cross_margin'}
:param trade_id: id of the trade (binance id, unique per trading pair)
:type trade_id: int
:param trade_time: millistamp of the trade
:type trade_time: int
:param asset: name of the asset in the trading pair (ex 'BTC' for 'BTCUSDT')
:type asset: string
:param ref_asset: name of the reference asset in the trading pair (ex 'USDT' for 'BTCUSDT')
:type ref_asset: string
:param qty: quantity of asset exchanged
:type qty: float
:param price: price of the asset regarding the ref_asset
:type price: float
:param fee: amount kept by the exchange
:type fee: float
:param fee_asset: token unit for the fee
:type fee_asset: str
:param is_buyer: if the trade is a buy or a sell
:type is_buyer: bool
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (trade_id, trade_time, asset, ref_asset, qty, price, fee, fee_asset, int(is_buyer))
if trade_type == 'spot':
table = tables.SPOT_TRADE_TABLE
elif trade_type == 'cross_margin':
table = tables.CROSS_MARGIN_TRADE_TABLE
else:
raise ValueError(f"trade type should be one of ('spot', 'cross_margin') but {trade_type} was received")
self.add_row(table, row, auto_commit)
def get_trades(self, trade_type: str, start_time: Optional[int] = None, end_time: Optional[int] = None,
asset: Optional[str] = None, ref_asset: Optional[str] = None):
"""
return trades stored in the database. asset type, ref_asset type and time filters can be used
:param trade_type: type trade executed
:type trade_type: string, must be one of ('spot', 'cross_margin')
:param start_time: fetch only trades after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only trades before this millistamp
:type end_time: Optional[int]
:param asset: fetch only trades with this asset
:type asset: Optional[str]
:param ref_asset: fetch only trades with this ref_asset
:type ref_asset: Optional[str]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(384518832, # trade_id
1582892988052, # trade time
'BTC', # asset
'USDT', # ref asset
0.0015, # asset quantity
9011.2, # asset price to ref asset
0.01425, # fee
'USDT', # fee asset
0), # is_buyer
]
"""
if trade_type == 'spot':
table = tables.SPOT_TRADE_TABLE
elif trade_type == 'cross_margin':
table = tables.CROSS_MARGIN_TRADE_TABLE
else:
raise ValueError(f"trade type should be one of ('spot', 'cross_margin') but {trade_type} was received")
conditions_list = []
if start_time is not None:
conditions_list.append((table.tdTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.tdTime,
SQLConditionEnum.lower,
end_time))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if ref_asset is not None:
conditions_list.append((table.refAsset,
SQLConditionEnum.equal,
ref_asset))
return self.get_conditions_rows(table, conditions_list=conditions_list, order_list=[table.tdTime])
def get_max_trade_id(self, asset: str, ref_asset: str, trade_type: str) -> int:
"""
return the latest trade id for a trading pair. If none is found, return -1
:param asset: name of the asset in the trading pair (ex 'BTC' for 'BTCUSDT')
:type asset: string
:param ref_asset: name of the reference asset in the trading pair (ex 'USDT' for 'BTCUSDT')
:type ref_asset: string
:param trade_type: type trade executed
:type trade_type: string, must be one of {'spot', 'cross_margin'}
:return: latest trade id
:rtype: int
"""
if trade_type == 'spot':
table = tables.SPOT_TRADE_TABLE
elif trade_type == 'cross_margin':
table = tables.CROSS_MARGIN_TRADE_TABLE
else:
raise ValueError(f"trade type should be one of {'spot', 'cross_margin'} but {trade_type} was received")
selection = f"MAX({table.tradeId})"
conditions_list = [
(table.asset,
SQLConditionEnum.equal,
asset),
(table.refAsset,
SQLConditionEnum.equal,
ref_asset)
]
result = self.get_conditions_rows(table, selection=selection, conditions_list=conditions_list)
try:
result = result[0][0]
except IndexError:
return -1
if result is None:
return -1
return result
|
from BiblioAlly import catalog as cat, domain, translator as bibtex
class ScopusTranslator(bibtex.Translator):
def _document_from_proto_document(self, proto_document):
bibtex.Translator._translate_kind(proto_document)
kind = proto_document['type']
fields = proto_document['field']
title = self._unbroken(self._uncurlied(fields['title']))
if 'abstract' in fields:
abstract = self._unbroken(self._uncurlied(fields['abstract']))
else:
abstract = ''
year = int(fields['year'])
if 'author' in fields:
author_field = self._unbroken(self._uncurlied(fields['author']))
else:
author_field = ''
authors = self._authors_from_field(author_field)
if 'affiliation' in fields:
affiliations = self._affiliations_from_field(self._all_uncurly(fields['affiliation']))
else:
affiliations = None
affiliations = self._expand_affiliations(affiliations, authors)
keywords = []
if 'author_keywords' in fields:
all_keywords = self._all_uncurly(fields['author_keywords']).split(';')
keyword_names = set()
for keyword_name in all_keywords:
name = keyword_name.strip().capitalize()
if name not in keyword_names:
keyword_names.add(name)
keyword_names = list(keyword_names)
for keyword_name in keyword_names:
keywords.append(domain.Keyword(name=keyword_name))
document = domain.Document(proto_document['id'].strip(), kind, title, abstract, keywords, year, affiliations)
document.generator = "Scopus"
if 'document_type' in fields:
document.document_type = self._uncurlied(fields['document_type'])
for name in ['doi', 'pages', 'url', 'volume', 'number', 'language', 'journal']:
if name in fields:
value = self._uncurlied(fields[name])
if len(value) > 0:
setattr(document, name, value)
return document
return document
def _proto_document_from_document(self, document: domain.Document):
kind = document.kind
if kind == 'proceedings':
kind = 'conference'
fields = dict()
fields['external_key'] = document.external_key
doc_authors = document.authors
doc_authors.sort(key=lambda doc_author: doc_author.first)
doc_authors.reverse()
all_authors = [(doc_author.author.long_name if doc_author.author.long_name is not None
else doc_author.author.short_name) for doc_author in doc_authors]
fields['author'] = self._curly(all_authors, separator=' and ')
fields['title'] = self._curly(document.title)
affiliations = []
for doc_author in doc_authors:
institution = doc_author.institution
if institution is not None:
affiliation = ', '.join([institution.name, institution.country])
affiliations.append(affiliation)
if len(affiliations) > 0:
fields['affiliation'] = self._curly(affiliations, '; ')
fields['year'] = self._curly(str(document.year))
if document.international_number is not None:
fields['issn'] = self._curly(str(document.international_number))
if document.publisher is not None:
fields['publisher'] = self._curly(str(document.publisher))
if document.address is not None:
fields['address'] = self._curly(str(document.address))
if document.doi is not None:
fields['doi'] = self._curly(str(document.doi))
if document.international_number is not None:
fields['url'] = self._curly(str(document.url))
fields['abstract'] = self._curly(document.abstract)
if document.journal is not None:
fields['journal'] = self._curly(str(document.journal))
if document.pages is not None:
fields['pages'] = self._curly(str(document.pages))
if document.volume is not None:
fields['volume'] = self._curly(str(document.volume))
if document.number is not None:
fields['number'] = self._curly(str(document.number))
if document.language is not None:
fields['language'] = self._curly(str(document.language))
keywords = [keyword.name for keyword in document.keywords]
fields['author_keywords'] = self._curly(keywords, '; ')
if len(document.references) > 0:
fields['references'] = self._curly('; '.join(document.references))
if document.document_type is not None:
fields['document_type'] = self._curly(document.document_type)
fields['source'] = self._curly(document.generator)
proto_document = {
'type': kind,
'fields': fields
}
return proto_document
def _as_bibtex(self, proto_document):
kind = proto_document['type'].upper()
fields = proto_document['fields']
external_key = fields['external_key']
del fields['external_key']
key_value = []
for key, value in fields.items():
key_value.append(f'{key}={value}')
bibtex = f'@{kind}' + '{' + f'{external_key},\n' + ',\n'.join(key_value) + '\n}\n'
return bibtex
Scopus = "Scopus"
cat.Catalog.translators[Scopus] = ScopusTranslator
| from BiblioAlly import catalog as cat, domain, translator as bibtex
class ScopusTranslator(bibtex.Translator):
def _document_from_proto_document(self, proto_document):
bibtex.Translator._translate_kind(proto_document)
kind = proto_document['type']
fields = proto_document['field']
title = self._unbroken(self._uncurlied(fields['title']))
if 'abstract' in fields:
abstract = self._unbroken(self._uncurlied(fields['abstract']))
else:
abstract = ''
year = int(fields['year'])
if 'author' in fields:
author_field = self._unbroken(self._uncurlied(fields['author']))
else:
author_field = ''
authors = self._authors_from_field(author_field)
if 'affiliation' in fields:
affiliations = self._affiliations_from_field(self._all_uncurly(fields['affiliation']))
else:
affiliations = None
affiliations = self._expand_affiliations(affiliations, authors)
keywords = []
if 'author_keywords' in fields:
all_keywords = self._all_uncurly(fields['author_keywords']).split(';')
keyword_names = set()
for keyword_name in all_keywords:
name = keyword_name.strip().capitalize()
if name not in keyword_names:
keyword_names.add(name)
keyword_names = list(keyword_names)
for keyword_name in keyword_names:
keywords.append(domain.Keyword(name=keyword_name))
document = domain.Document(proto_document['id'].strip(), kind, title, abstract, keywords, year, affiliations)
document.generator = "Scopus"
if 'document_type' in fields:
document.document_type = self._uncurlied(fields['document_type'])
for name in ['doi', 'pages', 'url', 'volume', 'number', 'language', 'journal']:
if name in fields:
value = self._uncurlied(fields[name])
if len(value) > 0:
setattr(document, name, value)
return document
return document
def _proto_document_from_document(self, document: domain.Document):
kind = document.kind
if kind == 'proceedings':
kind = 'conference'
fields = dict()
fields['external_key'] = document.external_key
doc_authors = document.authors
doc_authors.sort(key=lambda doc_author: doc_author.first)
doc_authors.reverse()
all_authors = [(doc_author.author.long_name if doc_author.author.long_name is not None
else doc_author.author.short_name) for doc_author in doc_authors]
fields['author'] = self._curly(all_authors, separator=' and ')
fields['title'] = self._curly(document.title)
affiliations = []
for doc_author in doc_authors:
institution = doc_author.institution
if institution is not None:
affiliation = ', '.join([institution.name, institution.country])
affiliations.append(affiliation)
if len(affiliations) > 0:
fields['affiliation'] = self._curly(affiliations, '; ')
fields['year'] = self._curly(str(document.year))
if document.international_number is not None:
fields['issn'] = self._curly(str(document.international_number))
if document.publisher is not None:
fields['publisher'] = self._curly(str(document.publisher))
if document.address is not None:
fields['address'] = self._curly(str(document.address))
if document.doi is not None:
fields['doi'] = self._curly(str(document.doi))
if document.international_number is not None:
fields['url'] = self._curly(str(document.url))
fields['abstract'] = self._curly(document.abstract)
if document.journal is not None:
fields['journal'] = self._curly(str(document.journal))
if document.pages is not None:
fields['pages'] = self._curly(str(document.pages))
if document.volume is not None:
fields['volume'] = self._curly(str(document.volume))
if document.number is not None:
fields['number'] = self._curly(str(document.number))
if document.language is not None:
fields['language'] = self._curly(str(document.language))
keywords = [keyword.name for keyword in document.keywords]
fields['author_keywords'] = self._curly(keywords, '; ')
if len(document.references) > 0:
fields['references'] = self._curly('; '.join(document.references))
if document.document_type is not None:
fields['document_type'] = self._curly(document.document_type)
fields['source'] = self._curly(document.generator)
proto_document = {
'type': kind,
'fields': fields
}
return proto_document
def _as_bibtex(self, proto_document):
kind = proto_document['type'].upper()
fields = proto_document['fields']
external_key = fields['external_key']
del fields['external_key']
key_value = []
for key, value in fields.items():
key_value.append(f'{key}={value}')
bibtex = f'@{kind}' + '{' + f'{external_key},\n' + ',\n'.join(key_value) + '\n}\n'
return bibtex
Scopus = "Scopus"
cat.Catalog.translators[Scopus] = ScopusTranslator
|
import csv
import datetime
from crawler.models import Medicine, Generic, DosageForm, DrugClass, Indication, Manufacturer
from django.core.management import BaseCommand
from django.utils.autoreload import logger
class Command(BaseCommand): # see https://gist.github.com/2724472
help = "Mapping the generics with medicines"
def add_arguments(self, parser):
parser.add_argument('model_name',
type=str,
help='model name for the csv export, e.g. medicine, generic, dosage_form, drug_class, '
'indication, manufacturer')
parser.add_argument('outfile',
nargs='?',
type=str,
help='Save path, like </path/to/outfile.csv> or "/data/medicine.csv"')
def handle(self, *args, **options):
model_name = options['model_name']
export_file = f"{options["outfile"]}.csv" if options['outfile'] else '{}.csv'.format(model_name)
logger.info("Exporting... %s" % model_name)
model_dict = {'medicine': Medicine, 'generic': Generic, 'dosage_form': DosageForm, 'drug_class': DrugClass,
'indication': Indication, 'manufacturer': Manufacturer}
model_class = model_dict[model_name]
with open('%s' % export_file, 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
fields = [field for field in model_class._meta.get_fields() if not field.many_to_many \
and not field.one_to_many]
# Write a first row with header information
writer.writerow([field.verbose_name for field in fields])
# Write data rows
for obj in model_class.objects.all():
data_row = []
for field in fields:
value = getattr(obj, field.name)
if isinstance(value, datetime.datetime):
value = value.strftime('%d/%m/%Y')
data_row.append(value)
writer.writerow(data_row)
logger.info(f.name, "exported")
| import csv
import datetime
from crawler.models import Medicine, Generic, DosageForm, DrugClass, Indication, Manufacturer
from django.core.management import BaseCommand
from django.utils.autoreload import logger
class Command(BaseCommand): # see https://gist.github.com/2724472
help = "Mapping the generics with medicines"
def add_arguments(self, parser):
parser.add_argument('model_name',
type=str,
help='model name for the csv export, e.g. medicine, generic, dosage_form, drug_class, '
'indication, manufacturer')
parser.add_argument('outfile',
nargs='?',
type=str,
help='Save path, like </path/to/outfile.csv> or "/data/medicine.csv"')
def handle(self, *args, **options):
model_name = options['model_name']
export_file = f"{options['outfile']}.csv" if options['outfile'] else '{}.csv'.format(model_name)
logger.info("Exporting... %s" % model_name)
model_dict = {'medicine': Medicine, 'generic': Generic, 'dosage_form': DosageForm, 'drug_class': DrugClass,
'indication': Indication, 'manufacturer': Manufacturer}
model_class = model_dict[model_name]
with open('%s' % export_file, 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
fields = [field for field in model_class._meta.get_fields() if not field.many_to_many \
and not field.one_to_many]
# Write a first row with header information
writer.writerow([field.verbose_name for field in fields])
# Write data rows
for obj in model_class.objects.all():
data_row = []
for field in fields:
value = getattr(obj, field.name)
if isinstance(value, datetime.datetime):
value = value.strftime('%d/%m/%Y')
data_row.append(value)
writer.writerow(data_row)
logger.info(f.name, "exported")
|
import os
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from .seq2seq_model import Seq2SeqModel
from .vectorizer import TokenVectorizer
class WhatWhyPredictor():
"""
Predicts a sequence of text which answers the question 'why?' given some input 'what'.
The prediction model is trained by vectorizing lists of token sequences and passing
the results to a Seq2SeqModel and calling its fit() method. After training, the
predict() methods can be used to predict 'why' text from 'what' text.
The Seq2SeqModel, vectorizers, and vectorized data sets can be specified manually
or saved and loaded from files using the save/load methods.
"""
def __init__( self, word2vec_model=None,
max_num_tokens_per_sample=10,
vocab_index=None ):
"""
Creates a WhatWhyPredictor instance using the specified parameters.
If no parameters are specified, then they should be loaded from
a file using the load() methods.
Params:
word2vec_model : [Optional] A pre-trained gensim Word2Vec model.
max_num_tokens_per_sample : [Optional] Maximum number of tokens to include in a sample sequence.
Any extra tokens will be truncated.
vocab_index : [Optional] A pre-built VocabularyIndex of the data set. This can
help reduce the size of one-hot encoded words in the
vocabulary, compared to that of pre-trained word2vec models.
"""
self.word2vec_model = word2vec_model
self.max_num_tokens_per_sample = max_num_tokens_per_sample
self.vocab_index = vocab_index
self.what_token_vectorizer = None
self.why_token_vectorizer = None
self.X_train = None
self.X_test = None
self.Y_train = None
self.Y_test = None
self.indeces_train = None
self.indeces_test = None
# If word2vec_model is None, then the decoder should be loaded from a pickle file instead.
if word2vec_model is not None:
self.decoder = TokenVectorizer( word2vec_model=word2vec_model,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index )
@staticmethod
def load_from_pickle_file(dir_name):
"""
Loads a WhatWhyPredictor instance from a pickle
file 'whatwhy_predictor.p' in the specified directory.
"""
with open( os.path.join(dir_name, "whatwhy_predictor.p") , "rb" ) as in_file:
return pickle.load(in_file)
def fit_tokens( self, lists_of_what_tokens=None,
lists_of_why_tokens=None,
epochs=1,
batch_size=None ):
"""Trains a Seq2SeqModel on lists that contain sequences (lists) of 'what' and 'why' tokens."""
X_train, X_test, Y_train, Y_test, indeces_train, indeces_test = self.get_train_and_test_data( lists_of_what_tokens=lists_of_what_tokens,
lists_of_why_tokens=lists_of_why_tokens )
self.seq2seq_model = Seq2SeqModel(X_train, X_test, Y_train, Y_test)
self.seq2seq_model.fit(epochs=epochs, batch_size=batch_size)
def predict(self, list_of_what_tokens):
"""
Predicts a string of 'why' text from an input sequence of 'what' tokens.
The following instance fields should be initialized or loaded before calling this method.
word2vec_model
max_num_tokens_per_sample
seq2seq_model
decoder
"""
lists_of_what_tokens = [list_of_what_tokens]
return self.predict_all(lists_of_what_tokens)[0]
def predict_all(self, lists_of_what_tokens):
"""
Predicts strings of 'why' text from input sequences of 'what' tokens.
The following instance fields should be initialized or loaded before calling this method.
word2vec_model
max_num_tokens_per_sample
seq2seq_model
decoder
"""
embedded_what_tokens = TokenVectorizer( word2vec_model=self.word2vec_model,
tokens_lists=lists_of_what_tokens,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index ).get_embeddings()
one_hot_predictions = self.seq2seq_model.predict_all(embedded_what_tokens)
predictions = self.decoder.decode_multiple_one_hot_samples(one_hot_predictions)
return predictions
def compare_predictions_to_actual(self, input_tokens, predictions, actual_vals):
for i, prediction in enumerate(predictions):
print(f"'What' Input : { " ".join(input_tokens[i]) }")
print(f"'Why' Actual : { actual_vals[i] }")
print(f"'Why' Predicted : { prediction }")
print("---------------------------------------------")
def compare_test_set_to_predictions(self, max_num_examples=None):
if max_num_examples is None:
max_num_examples = self.X_test.shape[0]
X_test = self.X_test[:max_num_examples,:,:]
Y_test = self.Y_test[:max_num_examples,:,:]
indeces_test = self.indeces_test[:max_num_examples]
input_tokens_test = [ self.what_token_vectorizer.tokens_lists[index] for index in indeces_test ]
actual_vals = self.decoder.decode_multiple_one_hot_samples(Y_test)
one_hot_predictions = self.seq2seq_model.predict_all(X_test)
predictions = self.decoder.decode_multiple_one_hot_samples(one_hot_predictions)
self.compare_predictions_to_actual(input_tokens_test, predictions, actual_vals)
def compare_train_set_to_predictions(self, max_num_examples=None):
if max_num_examples is None:
max_num_examples = self.X_train.shape[0]
X_train = self.X_train[:max_num_examples,:,:]
Y_train = self.Y_train[:max_num_examples,:,:]
indeces_train = self.indeces_train[:max_num_examples]
input_tokens_train = [ self.what_token_vectorizer.tokens_lists[index] for index in indeces_train ]
actual_vals = self.decoder.decode_multiple_one_hot_samples(Y_train)
one_hot_predictions = self.seq2seq_model.predict_all(X_train)
predictions = self.decoder.decode_multiple_one_hot_samples(one_hot_predictions)
self.compare_predictions_to_actual(input_tokens_train, predictions, actual_vals)
def get_what_and_why_token_vectorizers(self, lists_of_what_tokens=None, lists_of_why_tokens=None):
"""
Returns TokenVectorizers for the lists of what/why token sequences.
The instance fields 'word2vec_model', 'max_num_tokens_per_sample', and
optionally 'vocab_index' should be initialized before calling this method.
"""
if self.what_token_vectorizer is None or self.why_token_vectorizer is None:
self.set_what_and_why_token_vectorizers_from_lists(lists_of_what_tokens, lists_of_why_tokens)
return self.what_token_vectorizer, self.why_token_vectorizer
def set_what_and_why_token_vectorizers_from_lists(self, lists_of_what_tokens, lists_of_why_tokens):
"""
Initializes TokenVectorizers for the lists of what/why token sequences.
The instance fields 'word2vec_model', 'max_num_tokens_per_sample', and
optionally 'vocab_index' should be initialized before calling this method.
"""
self.what_token_vectorizer = TokenVectorizer( word2vec_model=self.word2vec_model,
tokens_lists=lists_of_what_tokens,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index )
self.why_token_vectorizer = TokenVectorizer( word2vec_model=self.word2vec_model,
tokens_lists=lists_of_why_tokens,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index )
def get_train_and_test_data( self, lists_of_what_tokens=None,
lists_of_why_tokens=None,
test_size=0.20,
random_state=42 ):
"""
Splits a data set of what/why tokens into test and train sets
if they have not already been separated.
"""
if self.X_train is None or self.X_test is None or self.Y_train is None or self.Y_test is None:
what_token_vectorizer, why_token_vectorizer = self.get_what_and_why_token_vectorizers(lists_of_what_tokens, lists_of_why_tokens)
embedded_what_tokens = what_token_vectorizer.get_embeddings()
one_hot_why_tokens = why_token_vectorizer.get_one_hot_encodings()
indeces = np.arange( len(what_token_vectorizer.tokens_lists) )
self.X_train, self.X_test, self.Y_train, self.Y_test, self.indeces_train, self.indeces_test = train_test_split( embedded_what_tokens,
one_hot_why_tokens,
indeces,
test_size=test_size,
random_state=random_state )
return self.X_train, self.X_test, self.Y_train, self.Y_test, self.indeces_train, self.indeces_test
def save_to_pickle_file(self, dir_name):
"""
Saves the WhatWhyPredictor instance to a pickle
file 'whatwhy_predictor.p' in the specified directory.
"""
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
with open( os.path.join(dir_name, "whatwhy_predictor.p") , "wb" ) as out_file:
pickle.dump(self, out_file, protocol=4)
def save_seq2seq_model(self, model_dir):
"""
Saves the underlying tensorflow.keras model's weights to
a file 'model.h5' in the specified directory.
"""
self.seq2seq_model.save_model(model_dir)
def load_seq2seq_model_from_saved_tf_model(self, model_dir):
"""
Intializes the Seq2SeqModel by loading weights from
a file 'model.h5' in the specified directory.
"""
X_train, X_test, Y_train, Y_test, indeces_train, indeces_test = self.get_train_and_test_data()
self.seq2seq_model = Seq2SeqModel(X_train, X_test, Y_train, Y_test).load_from_saved_tf_model(model_dir)
def save_train_and_test_data_to_pickle_files(self, dir_name, lists_of_what_tokens=None, lists_of_why_tokens=None):
"""
Splits a data set of what/why tokens into test and train sets
if they have not already been separated and saves them in pickle files.
"""
X_train, X_test, Y_train, Y_test, indeces_train, indeces_test = self.get_train_and_test_data( lists_of_what_tokens=lists_of_what_tokens,
lists_of_why_tokens=lists_of_why_tokens )
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
with open( os.path.join(dir_name, "X_train.p") , "wb" ) as out_file:
pickle.dump(X_train, out_file, protocol=4)
with open( os.path.join(dir_name, "X_test.p") , "wb" ) as out_file:
pickle.dump(X_test, out_file, protocol=4)
with open( os.path.join(dir_name, "Y_train.p") , "wb" ) as out_file:
pickle.dump(Y_train, out_file, protocol=4)
with open( os.path.join(dir_name, "Y_test.p") , "wb" ) as out_file:
pickle.dump(Y_test, out_file, protocol=4)
with open( os.path.join(dir_name, "indeces_train.p") , "wb" ) as out_file:
pickle.dump(indeces_train, out_file, protocol=4)
with open( os.path.join(dir_name, "indeces_test.p") , "wb" ) as out_file:
pickle.dump(indeces_test, out_file, protocol=4)
def load_train_and_test_data_from_pickle_files(self, dir_name):
with open( os.path.join(dir_name, "X_train.p") , "rb" ) as in_file:
self.X_train = pickle.load(in_file)
with open( os.path.join(dir_name, "X_test.p") , "rb" ) as in_file:
self.X_test = pickle.load(in_file)
with open( os.path.join(dir_name, "Y_train.p") , "rb" ) as in_file:
self.Y_train = pickle.load(in_file)
with open( os.path.join(dir_name, "Y_test.p") , "rb" ) as in_file:
self.Y_test = pickle.load(in_file)
with open( os.path.join(dir_name, "indeces_train.p") , "rb" ) as in_file:
self.indeces_train = pickle.load(in_file)
with open( os.path.join(dir_name, "indeces_test.p") , "rb" ) as in_file:
self.indeces_test = pickle.load(in_file)
def save_token_vectorizers_to_pickle_files(self, target_dir, lists_of_what_tokens=None, lists_of_why_tokens=None):
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
what_token_vectorizer, why_token_vectorizer = self.get_what_and_why_token_vectorizers(lists_of_what_tokens, lists_of_why_tokens)
what_token_vectorizer.get_embeddings()
why_token_vectorizer.get_one_hot_encodings()
what_token_vectorizer.save_to_pickle_file( os.path.join(target_dir, "what_tokenizer.p") )
why_token_vectorizer.save_to_pickle_file( os.path.join(target_dir, "why_tokenizer.p") )
self.decoder.save_to_pickle_file( os.path.join(target_dir, "decoder.p") )
def load_token_vectorizers_from_pickle_files(self, dir_name):
self.what_token_vectorizer = TokenVectorizer.load_from_pickle_file( os.path.join(dir_name, "what_tokenizer.p") )
self.why_token_vectorizer = TokenVectorizer.load_from_pickle_file( os.path.join(dir_name, "why_tokenizer.p") )
self.decoder = TokenVectorizer.load_from_pickle_file( os.path.join(dir_name, "decoder.p") )
self.word2vec_model = self.decoder.word2vec_model
self.vocab_index = self.decoder.vocab_index
self.max_num_tokens_per_sample = self.decoder.num_tokens_per_sample
| import os
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from .seq2seq_model import Seq2SeqModel
from .vectorizer import TokenVectorizer
class WhatWhyPredictor():
"""
Predicts a sequence of text which answers the question 'why?' given some input 'what'.
The prediction model is trained by vectorizing lists of token sequences and passing
the results to a Seq2SeqModel and calling its fit() method. After training, the
predict() methods can be used to predict 'why' text from 'what' text.
The Seq2SeqModel, vectorizers, and vectorized data sets can be specified manually
or saved and loaded from files using the save/load methods.
"""
def __init__( self, word2vec_model=None,
max_num_tokens_per_sample=10,
vocab_index=None ):
"""
Creates a WhatWhyPredictor instance using the specified parameters.
If no parameters are specified, then they should be loaded from
a file using the load() methods.
Params:
word2vec_model : [Optional] A pre-trained gensim Word2Vec model.
max_num_tokens_per_sample : [Optional] Maximum number of tokens to include in a sample sequence.
Any extra tokens will be truncated.
vocab_index : [Optional] A pre-built VocabularyIndex of the data set. This can
help reduce the size of one-hot encoded words in the
vocabulary, compared to that of pre-trained word2vec models.
"""
self.word2vec_model = word2vec_model
self.max_num_tokens_per_sample = max_num_tokens_per_sample
self.vocab_index = vocab_index
self.what_token_vectorizer = None
self.why_token_vectorizer = None
self.X_train = None
self.X_test = None
self.Y_train = None
self.Y_test = None
self.indeces_train = None
self.indeces_test = None
# If word2vec_model is None, then the decoder should be loaded from a pickle file instead.
if word2vec_model is not None:
self.decoder = TokenVectorizer( word2vec_model=word2vec_model,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index )
@staticmethod
def load_from_pickle_file(dir_name):
"""
Loads a WhatWhyPredictor instance from a pickle
file 'whatwhy_predictor.p' in the specified directory.
"""
with open( os.path.join(dir_name, "whatwhy_predictor.p") , "rb" ) as in_file:
return pickle.load(in_file)
def fit_tokens( self, lists_of_what_tokens=None,
lists_of_why_tokens=None,
epochs=1,
batch_size=None ):
"""Trains a Seq2SeqModel on lists that contain sequences (lists) of 'what' and 'why' tokens."""
X_train, X_test, Y_train, Y_test, indeces_train, indeces_test = self.get_train_and_test_data( lists_of_what_tokens=lists_of_what_tokens,
lists_of_why_tokens=lists_of_why_tokens )
self.seq2seq_model = Seq2SeqModel(X_train, X_test, Y_train, Y_test)
self.seq2seq_model.fit(epochs=epochs, batch_size=batch_size)
def predict(self, list_of_what_tokens):
"""
Predicts a string of 'why' text from an input sequence of 'what' tokens.
The following instance fields should be initialized or loaded before calling this method.
word2vec_model
max_num_tokens_per_sample
seq2seq_model
decoder
"""
lists_of_what_tokens = [list_of_what_tokens]
return self.predict_all(lists_of_what_tokens)[0]
def predict_all(self, lists_of_what_tokens):
"""
Predicts strings of 'why' text from input sequences of 'what' tokens.
The following instance fields should be initialized or loaded before calling this method.
word2vec_model
max_num_tokens_per_sample
seq2seq_model
decoder
"""
embedded_what_tokens = TokenVectorizer( word2vec_model=self.word2vec_model,
tokens_lists=lists_of_what_tokens,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index ).get_embeddings()
one_hot_predictions = self.seq2seq_model.predict_all(embedded_what_tokens)
predictions = self.decoder.decode_multiple_one_hot_samples(one_hot_predictions)
return predictions
def compare_predictions_to_actual(self, input_tokens, predictions, actual_vals):
for i, prediction in enumerate(predictions):
print(f"'What' Input : { ' '.join(input_tokens[i]) }")
print(f"'Why' Actual : { actual_vals[i] }")
print(f"'Why' Predicted : { prediction }")
print("---------------------------------------------")
def compare_test_set_to_predictions(self, max_num_examples=None):
if max_num_examples is None:
max_num_examples = self.X_test.shape[0]
X_test = self.X_test[:max_num_examples,:,:]
Y_test = self.Y_test[:max_num_examples,:,:]
indeces_test = self.indeces_test[:max_num_examples]
input_tokens_test = [ self.what_token_vectorizer.tokens_lists[index] for index in indeces_test ]
actual_vals = self.decoder.decode_multiple_one_hot_samples(Y_test)
one_hot_predictions = self.seq2seq_model.predict_all(X_test)
predictions = self.decoder.decode_multiple_one_hot_samples(one_hot_predictions)
self.compare_predictions_to_actual(input_tokens_test, predictions, actual_vals)
def compare_train_set_to_predictions(self, max_num_examples=None):
if max_num_examples is None:
max_num_examples = self.X_train.shape[0]
X_train = self.X_train[:max_num_examples,:,:]
Y_train = self.Y_train[:max_num_examples,:,:]
indeces_train = self.indeces_train[:max_num_examples]
input_tokens_train = [ self.what_token_vectorizer.tokens_lists[index] for index in indeces_train ]
actual_vals = self.decoder.decode_multiple_one_hot_samples(Y_train)
one_hot_predictions = self.seq2seq_model.predict_all(X_train)
predictions = self.decoder.decode_multiple_one_hot_samples(one_hot_predictions)
self.compare_predictions_to_actual(input_tokens_train, predictions, actual_vals)
def get_what_and_why_token_vectorizers(self, lists_of_what_tokens=None, lists_of_why_tokens=None):
"""
Returns TokenVectorizers for the lists of what/why token sequences.
The instance fields 'word2vec_model', 'max_num_tokens_per_sample', and
optionally 'vocab_index' should be initialized before calling this method.
"""
if self.what_token_vectorizer is None or self.why_token_vectorizer is None:
self.set_what_and_why_token_vectorizers_from_lists(lists_of_what_tokens, lists_of_why_tokens)
return self.what_token_vectorizer, self.why_token_vectorizer
def set_what_and_why_token_vectorizers_from_lists(self, lists_of_what_tokens, lists_of_why_tokens):
"""
Initializes TokenVectorizers for the lists of what/why token sequences.
The instance fields 'word2vec_model', 'max_num_tokens_per_sample', and
optionally 'vocab_index' should be initialized before calling this method.
"""
self.what_token_vectorizer = TokenVectorizer( word2vec_model=self.word2vec_model,
tokens_lists=lists_of_what_tokens,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index )
self.why_token_vectorizer = TokenVectorizer( word2vec_model=self.word2vec_model,
tokens_lists=lists_of_why_tokens,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index )
def get_train_and_test_data( self, lists_of_what_tokens=None,
lists_of_why_tokens=None,
test_size=0.20,
random_state=42 ):
"""
Splits a data set of what/why tokens into test and train sets
if they have not already been separated.
"""
if self.X_train is None or self.X_test is None or self.Y_train is None or self.Y_test is None:
what_token_vectorizer, why_token_vectorizer = self.get_what_and_why_token_vectorizers(lists_of_what_tokens, lists_of_why_tokens)
embedded_what_tokens = what_token_vectorizer.get_embeddings()
one_hot_why_tokens = why_token_vectorizer.get_one_hot_encodings()
indeces = np.arange( len(what_token_vectorizer.tokens_lists) )
self.X_train, self.X_test, self.Y_train, self.Y_test, self.indeces_train, self.indeces_test = train_test_split( embedded_what_tokens,
one_hot_why_tokens,
indeces,
test_size=test_size,
random_state=random_state )
return self.X_train, self.X_test, self.Y_train, self.Y_test, self.indeces_train, self.indeces_test
def save_to_pickle_file(self, dir_name):
"""
Saves the WhatWhyPredictor instance to a pickle
file 'whatwhy_predictor.p' in the specified directory.
"""
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
with open( os.path.join(dir_name, "whatwhy_predictor.p") , "wb" ) as out_file:
pickle.dump(self, out_file, protocol=4)
def save_seq2seq_model(self, model_dir):
"""
Saves the underlying tensorflow.keras model's weights to
a file 'model.h5' in the specified directory.
"""
self.seq2seq_model.save_model(model_dir)
def load_seq2seq_model_from_saved_tf_model(self, model_dir):
"""
Intializes the Seq2SeqModel by loading weights from
a file 'model.h5' in the specified directory.
"""
X_train, X_test, Y_train, Y_test, indeces_train, indeces_test = self.get_train_and_test_data()
self.seq2seq_model = Seq2SeqModel(X_train, X_test, Y_train, Y_test).load_from_saved_tf_model(model_dir)
def save_train_and_test_data_to_pickle_files(self, dir_name, lists_of_what_tokens=None, lists_of_why_tokens=None):
"""
Splits a data set of what/why tokens into test and train sets
if they have not already been separated and saves them in pickle files.
"""
X_train, X_test, Y_train, Y_test, indeces_train, indeces_test = self.get_train_and_test_data( lists_of_what_tokens=lists_of_what_tokens,
lists_of_why_tokens=lists_of_why_tokens )
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
with open( os.path.join(dir_name, "X_train.p") , "wb" ) as out_file:
pickle.dump(X_train, out_file, protocol=4)
with open( os.path.join(dir_name, "X_test.p") , "wb" ) as out_file:
pickle.dump(X_test, out_file, protocol=4)
with open( os.path.join(dir_name, "Y_train.p") , "wb" ) as out_file:
pickle.dump(Y_train, out_file, protocol=4)
with open( os.path.join(dir_name, "Y_test.p") , "wb" ) as out_file:
pickle.dump(Y_test, out_file, protocol=4)
with open( os.path.join(dir_name, "indeces_train.p") , "wb" ) as out_file:
pickle.dump(indeces_train, out_file, protocol=4)
with open( os.path.join(dir_name, "indeces_test.p") , "wb" ) as out_file:
pickle.dump(indeces_test, out_file, protocol=4)
def load_train_and_test_data_from_pickle_files(self, dir_name):
with open( os.path.join(dir_name, "X_train.p") , "rb" ) as in_file:
self.X_train = pickle.load(in_file)
with open( os.path.join(dir_name, "X_test.p") , "rb" ) as in_file:
self.X_test = pickle.load(in_file)
with open( os.path.join(dir_name, "Y_train.p") , "rb" ) as in_file:
self.Y_train = pickle.load(in_file)
with open( os.path.join(dir_name, "Y_test.p") , "rb" ) as in_file:
self.Y_test = pickle.load(in_file)
with open( os.path.join(dir_name, "indeces_train.p") , "rb" ) as in_file:
self.indeces_train = pickle.load(in_file)
with open( os.path.join(dir_name, "indeces_test.p") , "rb" ) as in_file:
self.indeces_test = pickle.load(in_file)
def save_token_vectorizers_to_pickle_files(self, target_dir, lists_of_what_tokens=None, lists_of_why_tokens=None):
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
what_token_vectorizer, why_token_vectorizer = self.get_what_and_why_token_vectorizers(lists_of_what_tokens, lists_of_why_tokens)
what_token_vectorizer.get_embeddings()
why_token_vectorizer.get_one_hot_encodings()
what_token_vectorizer.save_to_pickle_file( os.path.join(target_dir, "what_tokenizer.p") )
why_token_vectorizer.save_to_pickle_file( os.path.join(target_dir, "why_tokenizer.p") )
self.decoder.save_to_pickle_file( os.path.join(target_dir, "decoder.p") )
def load_token_vectorizers_from_pickle_files(self, dir_name):
self.what_token_vectorizer = TokenVectorizer.load_from_pickle_file( os.path.join(dir_name, "what_tokenizer.p") )
self.why_token_vectorizer = TokenVectorizer.load_from_pickle_file( os.path.join(dir_name, "why_tokenizer.p") )
self.decoder = TokenVectorizer.load_from_pickle_file( os.path.join(dir_name, "decoder.p") )
self.word2vec_model = self.decoder.word2vec_model
self.vocab_index = self.decoder.vocab_index
self.max_num_tokens_per_sample = self.decoder.num_tokens_per_sample
|
import multiprocessing
import os
accesslog = '-'
bind = f'{os.getenv('GUNICORN_HOST', '0.0.0.0')}:{os.getenv('GUNICORN_PORT', '8000')}' # noqa
capture_output = True
syslog = os.getenv('LOG_SYSLOG', 'false').lower() in ['true', '1', 'yes', 'on']
threads = int(os.getenv('GUNICORN_THREADS', multiprocessing.cpu_count() * 2 + 1)) # noqa
workers = int(os.getenv('GUNICORN_WORKERS', 1))
worker_class = 'gthread'
| import multiprocessing
import os
accesslog = '-'
bind = f'{os.getenv("GUNICORN_HOST", "0.0.0.0")}:{os.getenv("GUNICORN_PORT", "8000")}' # noqa
capture_output = True
syslog = os.getenv('LOG_SYSLOG', 'false').lower() in ['true', '1', 'yes', 'on']
threads = int(os.getenv('GUNICORN_THREADS', multiprocessing.cpu_count() * 2 + 1)) # noqa
workers = int(os.getenv('GUNICORN_WORKERS', 1))
worker_class = 'gthread'
|
"""
MIT License
Copyright (c) 2021-present Obi-Wan3
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import typing
from datetime import datetime, timedelta, timezone
import discord
from redbot.core import commands, Config, bank
from redbot.core.utils.chat_formatting import humanize_list
class UploadStreaks(commands.Cog):
"""
Streaks & Points for Uploads
A leaderboard with points and streaks for uploading attachments in specific channels per interval of time.
"""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=14000605, force_registration=True)
default_guild = {
"challenges": {}
}
self.config.register_guild(**default_guild)
@commands.Cog.listener("on_message")
async def _message_listener(self, message: discord.Message):
# Ignore these messages
if (
not message.guild or # Message not in a guild
await self.bot.cog_disabled_in_guild(self, message.guild) or # Cog disabled in guild
message.author.bot or # Message author is a bot
not message.attachments # There are no attachments in this message
):
return
async with self.config.guild(message.guild).challenges() as settings:
for challenge in settings.values():
if (
not challenge['active'] or # Challenge not active
message.channel.id not in challenge['channels'] or # Message not in challenge channel
(challenge['role'] and challenge['role'] not in [r.id for r in message.author.roles]) or # Author does not have role
datetime.utcfromtimestamp(challenge['interval'][1]) > datetime.utcnow() # Challenge not started
):
continue
orig = challenge['users'].get(str(message.author.id))
if orig:
interval_before = (datetime.utcnow() - timedelta(days=challenge['interval'][0])).replace(microsecond=0, second=0, minute=0, hour=challenge['interval'][2], tzinfo=timezone.utc).timestamp()
interval_start = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=challenge['interval'][2], tzinfo=timezone.utc).timestamp()
interval_end = (datetime.utcnow() + timedelta(days=challenge['interval'][0])).replace(microsecond=0, second=0, minute=0, hour=challenge['interval'][2], tzinfo=timezone.utc).timestamp()
# Last entry was also in this interval
if interval_start <= challenge['users'][str(message.author.id)][2] <= interval_end:
challenge['users'][str(message.author.id)] = (orig[0], orig[1], message.created_at.timestamp())
continue
# Streak continued
if interval_before <= challenge['users'][str(message.author.id)][2] <= interval_start:
challenge['users'][str(message.author.id)] = (orig[0]+1, orig[1]+1, message.created_at.timestamp())
# Streak restarted
else:
challenge['users'][str(message.author.id)] = (orig[0]+1, 1, message.created_at.timestamp())
else:
challenge['users'][str(message.author.id)] = (1, 1, message.created_at.timestamp())
if challenge['credits'] > 0:
await bank.deposit_credits(message.author, challenge['credits'])
@commands.guild_only()
@commands.group(name="uploadstreaks")
async def _upload_streaks(self, ctx: commands.Context):
"""UploadStreaks Settings"""
@commands.bot_has_permissions(embed_links=True)
@_upload_streaks.command(name="list")
async def _list(self, ctx: commands.Context):
"""List the current UploadStreaks challenges."""
settings = await self.config.guild(ctx.guild).challenges()
embed = discord.Embed(title=f"UploadStreaks Challenges", color=await ctx.embed_color())
if not settings:
embed.description = "No UploadStreaks Challenges Found"
else:
embed.description = ""
for count, name in enumerate(settings.keys()):
embed.description += f"**{count+1}.** {name}"
return await ctx.send(embed=embed)
@commands.bot_has_permissions(embed_links=True)
@_upload_streaks.command(name="leaderboard", aliases=['ldb'])
async def _leaderboard(self, ctx: commands.Context, challenge: str, num=10):
"""See the current UploadStreaks leaderboard for a challenge."""
settings = await self.config.guild(ctx.guild).challenges()
if challenge not in settings.keys():
return await ctx.send("No challenge was found with that name.")
embed = discord.Embed(title=f"UploadStreaks Challenge `{challenge}`", color=await ctx.embed_color())
if not settings[challenge]['users']:
embed.description = "No users have participated in this challenge yet."
else:
embed.description = "```Streak Points User\n"
ldb = sorted(settings[challenge]['users'].items(), key=lambda x: x[1][1], reverse=True)
for i in range(min(num, len(ldb))):
member = ctx.guild.get_member(int(ldb[i][0]))
if member:
name = member.display_name
else:
try:
name = (await self.bot.fetch_user(int(ldb[i][0]))).name
except discord.HTTPException:
continue
embed.description += f"{(str(ldb[i][1][1])+settings[challenge]["streak"]).center(6)} {str(ldb[i][1][0]).center(6)} {name}\n"
embed.description += "```"
return await ctx.send(embed=embed)
@commands.bot_has_permissions(embed_links=True)
@_upload_streaks.command(name="user")
async def _user(self, ctx: commands.Context, user: discord.Member):
"""See a user's UploadStreaks points."""
settings = await self.config.guild(ctx.guild).challenges()
embed = discord.Embed(title=f"UploadStreaks Info for {user.display_name}", color=await ctx.embed_color())
if not settings:
embed.description = "No UploadStreaks Challenges Found"
else:
for name, challenge in settings.items():
u = challenge['users'].get(str(user.id))
if u:
embed.add_field(name=f"Challenge `{name}`", inline=False, value=f"Points: {u[0]}\nStreak: {u[1]}{challenge["streak"]}")
if not embed.fields:
embed.description = "This user has not participated in any UploadStreaks challenges."
return await ctx.send(embed=embed)
@commands.admin_or_permissions(administrator=True)
@_upload_streaks.group(name="settings")
async def _settings(self, ctx: commands.Context):
"""UploadStreaks Settings"""
@_settings.command(name="new")
async def _settings_new(self, ctx: commands.Context, challenge: str, streak_name: str, interval: int, utc_day_start: int, credits: typing.Optional[int] = 0, role: typing.Optional[discord.Role] = None, *channels: discord.TextChannel):
"""
Start a new UploadStreaks challenge. See below for paramters:
`challenge`: the name of the challenge
`streak_name`: the name of the streak (e.g. `d` for days)
`interval`: a number representing the length in days for each interval (e.g. `5`)
`utc_day_start`: a number representing the UTC hour to start the day on (e.g. `2` or `23`)
`credits`: the amount of credits to be awarded to a user on post (optional, default 0)
`role`: the role to automatically detect challenge entries from (leave empty for everyone)
`channels`: the channels to listen in for entries
"""
# Test utc_day_start
if not(0 <= utc_day_start < 24):
return await ctx.send(f"`{utc_day_start}` is not a valid hour (in 24-hr format)!")
# Convert interval
if interval <= 0:
return await ctx.send(f"`{interval}` is not a positive integer!")
if datetime.utcnow().hour < utc_day_start:
ts = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=utc_day_start, tzinfo=timezone.utc).timestamp()
else:
ts = (datetime.utcnow() + timedelta(days=1)).replace(microsecond=0, second=0, minute=0, hour=utc_day_start, tzinfo=timezone.utc).timestamp()
# Test credit amount
if credits < 0:
return await ctx.send("The amount of credits must be a positive integer!")
async with self.config.guild(ctx.guild).challenges() as challenges:
challenges[challenge] = {
"active": True,
"streak": streak_name,
"interval": (interval, ts, utc_day_start),
"credits": credits,
"role": role.id if role else None,
"channels": [c.id for c in channels],
"users": {}
}
starts_in = datetime.utcfromtimestamp(ts) - datetime.utcnow()
return await ctx.send(f"A new challenge `{challenge}` was successfully added! If you want to edit anything, use `{ctx.clean_prefix}uploadstreaks settings edit`. The challenge will start in {starts_in.seconds//3600} hrs {(starts_in.seconds//60)%60} mins at {datetime.utcfromtimestamp(ts)} UTC.")
@_settings.command(name="toggle")
async def _settings_toggle(self, ctx: commands.Context, challenge_name: str, true_or_false: bool):
"""
Toggle whether an UploadStreaks challenge is active.
**Warning:** this *may* break users' streaks if a challenge is toggled off for longer than the interval.
"""
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["active"] = true_or_false
return await ctx.tick()
@_settings.command(name="reset")
async def _settings_reset(self, ctx: commands.Context, challenge_name: str, enter_true_to_confirm: bool):
"""Reset all streaks & points of an UploadStreaks challenge."""
if not enter_true_to_confirm:
return await ctx.send("Please provide `true` as the parameter to confirm.")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["users"] = {}
return await ctx.tick()
@_settings.command(name="delete")
async def _settings_delete(self, ctx: commands.Context, challenge_name: str, enter_true_to_confirm: bool):
"""Delete an UploadStreaks challenge."""
if not enter_true_to_confirm:
return await ctx.send("Please provide `true` as the parameter to confirm.")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
del challenges[challenge_name]
return await ctx.tick()
@_settings.group(name="edit")
async def _settings_edit(self, ctx: commands.Context):
"""Edit an UploadStreaks Challenge"""
@_settings_edit.command(name="streakname")
async def _settings_edit_streak_name(self, ctx: commands.Context, challenge_name: str, streak_name: str):
"""Edit the name of the streak for an UploadStreaks challenge."""
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["streak"] = streak_name
return await ctx.tick()
@_settings_edit.command(name="interval")
async def _settings_edit_interval(self, ctx: commands.Context, challenge_name: str, interval: int, utc_day_start: int):
"""Edit the interval of an UploadStreaks challenge."""
# Convert interval
if interval <= 0:
return await ctx.send(f"`{interval}` is not a positive integer!")
# Test utc_day_start
if not (0 <= utc_day_start < 24):
return await ctx.send(f"`{utc_day_start}` is not a valid hour (in 24-hr format)!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["interval"] = (interval, challenges[challenge_name]["interval"][1], utc_day_start)
return await ctx.tick()
@_settings_edit.command(name="credits")
async def _settings_edit_credits(self, ctx: commands.Context, challenge_name: str, credits: int):
"""Edit the awarded credits of an UploadStreaks challenge."""
if credits < 0:
return await ctx.send("The amount of credits must be a positive integer!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["credits"] = credits
return await ctx.tick()
@_settings_edit.command(name="role")
async def _settings_edit_role(self, ctx: commands.Context, challenge_name: str, role: discord.Role = None):
"""Edit the role of an UploadStreaks challenge (leave empty for everyone)."""
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["role"] = role.id if role else None
return await ctx.tick()
@_settings_edit.command(name="channels")
async def _settings_edit_channels(self, ctx: commands.Context, challenge_name: str, *channels: discord.TextChannel):
"""Edit the channels of an UploadStreaks challenge."""
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["channels"] = [c.id for c in channels]
return await ctx.tick()
@_settings.group(name="set")
async def _settings_set(self, ctx: commands.Context):
"""Manually Set User Streaks & Points"""
@_settings_set.command(name="points")
async def _settings_set_points(self, ctx: commands.Context, user: discord.Member, challenge_name: str, points: int):
"""Manually set a user's points in an UploadStreaks challenge."""
if points < 1:
return await ctx.send("The points must be at least `1`!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
orig = challenges[challenge_name]['users'].get(str(user.id))
if not orig:
return await ctx.send("That user has not participated in the challenge yet!")
challenges[challenge_name]['users'][str(user.id)] = (points, orig[1], orig[2])
return await ctx.tick()
@_settings_set.command(name="streak")
async def _settings_set_streak(self, ctx: commands.Context, user: discord.Member, challenge_name: str, streak: int):
"""Manually set a user's streak in an UploadStreaks challenge."""
if streak < 1:
return await ctx.send("The streak must be at least `1`!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
orig = challenges[challenge_name]['users'].get(str(user.id))
if not orig:
return await ctx.send("That user has not participated in the challenge yet!")
challenges[challenge_name]['users'][str(user.id)] = (orig[0], streak, orig[2])
return await ctx.tick()
@commands.bot_has_permissions(embed_links=True)
@_settings.command(name="view")
async def _settings_view(self, ctx: commands.Context):
"""View the settings of UploadStreaks challenges in this server."""
settings = await self.config.guild(ctx.guild).challenges()
embed = discord.Embed(title="UploadStreaks Settings", color=await ctx.embed_color())
if not settings:
embed.description = "No UploadStreaks Challenges Found"
else:
for name, challenge in settings.items():
channels = []
for c in challenge['channels']:
if ch := ctx.guild.get_channel(c):
channels.append(ch.mention)
embed.add_field(
name=f"Challenge `{name}`",
inline=False,
value=f"""
**Active:** {challenge['active']}
**Streak Name:** {challenge['streak']}
**Interval:** {challenge['interval'][0]} days (started on {datetime.utcfromtimestamp(challenge['interval'][1])})
**Credits:** {challenge['streak']}
**Role:** {ctx.guild.get_role(challenge['role']).mention if challenge['role'] and ctx.guild.get_role(challenge['role']) else None }
**Channels:** {humanize_list(channels)}
"""
)
return await ctx.send(embed=embed)
| """
MIT License
Copyright (c) 2021-present Obi-Wan3
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import typing
from datetime import datetime, timedelta, timezone
import discord
from redbot.core import commands, Config, bank
from redbot.core.utils.chat_formatting import humanize_list
class UploadStreaks(commands.Cog):
"""
Streaks & Points for Uploads
A leaderboard with points and streaks for uploading attachments in specific channels per interval of time.
"""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=14000605, force_registration=True)
default_guild = {
"challenges": {}
}
self.config.register_guild(**default_guild)
@commands.Cog.listener("on_message")
async def _message_listener(self, message: discord.Message):
# Ignore these messages
if (
not message.guild or # Message not in a guild
await self.bot.cog_disabled_in_guild(self, message.guild) or # Cog disabled in guild
message.author.bot or # Message author is a bot
not message.attachments # There are no attachments in this message
):
return
async with self.config.guild(message.guild).challenges() as settings:
for challenge in settings.values():
if (
not challenge['active'] or # Challenge not active
message.channel.id not in challenge['channels'] or # Message not in challenge channel
(challenge['role'] and challenge['role'] not in [r.id for r in message.author.roles]) or # Author does not have role
datetime.utcfromtimestamp(challenge['interval'][1]) > datetime.utcnow() # Challenge not started
):
continue
orig = challenge['users'].get(str(message.author.id))
if orig:
interval_before = (datetime.utcnow() - timedelta(days=challenge['interval'][0])).replace(microsecond=0, second=0, minute=0, hour=challenge['interval'][2], tzinfo=timezone.utc).timestamp()
interval_start = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=challenge['interval'][2], tzinfo=timezone.utc).timestamp()
interval_end = (datetime.utcnow() + timedelta(days=challenge['interval'][0])).replace(microsecond=0, second=0, minute=0, hour=challenge['interval'][2], tzinfo=timezone.utc).timestamp()
# Last entry was also in this interval
if interval_start <= challenge['users'][str(message.author.id)][2] <= interval_end:
challenge['users'][str(message.author.id)] = (orig[0], orig[1], message.created_at.timestamp())
continue
# Streak continued
if interval_before <= challenge['users'][str(message.author.id)][2] <= interval_start:
challenge['users'][str(message.author.id)] = (orig[0]+1, orig[1]+1, message.created_at.timestamp())
# Streak restarted
else:
challenge['users'][str(message.author.id)] = (orig[0]+1, 1, message.created_at.timestamp())
else:
challenge['users'][str(message.author.id)] = (1, 1, message.created_at.timestamp())
if challenge['credits'] > 0:
await bank.deposit_credits(message.author, challenge['credits'])
@commands.guild_only()
@commands.group(name="uploadstreaks")
async def _upload_streaks(self, ctx: commands.Context):
"""UploadStreaks Settings"""
@commands.bot_has_permissions(embed_links=True)
@_upload_streaks.command(name="list")
async def _list(self, ctx: commands.Context):
"""List the current UploadStreaks challenges."""
settings = await self.config.guild(ctx.guild).challenges()
embed = discord.Embed(title=f"UploadStreaks Challenges", color=await ctx.embed_color())
if not settings:
embed.description = "No UploadStreaks Challenges Found"
else:
embed.description = ""
for count, name in enumerate(settings.keys()):
embed.description += f"**{count+1}.** {name}"
return await ctx.send(embed=embed)
@commands.bot_has_permissions(embed_links=True)
@_upload_streaks.command(name="leaderboard", aliases=['ldb'])
async def _leaderboard(self, ctx: commands.Context, challenge: str, num=10):
"""See the current UploadStreaks leaderboard for a challenge."""
settings = await self.config.guild(ctx.guild).challenges()
if challenge not in settings.keys():
return await ctx.send("No challenge was found with that name.")
embed = discord.Embed(title=f"UploadStreaks Challenge `{challenge}`", color=await ctx.embed_color())
if not settings[challenge]['users']:
embed.description = "No users have participated in this challenge yet."
else:
embed.description = "```Streak Points User\n"
ldb = sorted(settings[challenge]['users'].items(), key=lambda x: x[1][1], reverse=True)
for i in range(min(num, len(ldb))):
member = ctx.guild.get_member(int(ldb[i][0]))
if member:
name = member.display_name
else:
try:
name = (await self.bot.fetch_user(int(ldb[i][0]))).name
except discord.HTTPException:
continue
embed.description += f"{(str(ldb[i][1][1])+settings[challenge]['streak']).center(6)} {str(ldb[i][1][0]).center(6)} {name}\n"
embed.description += "```"
return await ctx.send(embed=embed)
@commands.bot_has_permissions(embed_links=True)
@_upload_streaks.command(name="user")
async def _user(self, ctx: commands.Context, user: discord.Member):
"""See a user's UploadStreaks points."""
settings = await self.config.guild(ctx.guild).challenges()
embed = discord.Embed(title=f"UploadStreaks Info for {user.display_name}", color=await ctx.embed_color())
if not settings:
embed.description = "No UploadStreaks Challenges Found"
else:
for name, challenge in settings.items():
u = challenge['users'].get(str(user.id))
if u:
embed.add_field(name=f"Challenge `{name}`", inline=False, value=f"Points: {u[0]}\nStreak: {u[1]}{challenge['streak']}")
if not embed.fields:
embed.description = "This user has not participated in any UploadStreaks challenges."
return await ctx.send(embed=embed)
@commands.admin_or_permissions(administrator=True)
@_upload_streaks.group(name="settings")
async def _settings(self, ctx: commands.Context):
"""UploadStreaks Settings"""
@_settings.command(name="new")
async def _settings_new(self, ctx: commands.Context, challenge: str, streak_name: str, interval: int, utc_day_start: int, credits: typing.Optional[int] = 0, role: typing.Optional[discord.Role] = None, *channels: discord.TextChannel):
"""
Start a new UploadStreaks challenge. See below for paramters:
`challenge`: the name of the challenge
`streak_name`: the name of the streak (e.g. `d` for days)
`interval`: a number representing the length in days for each interval (e.g. `5`)
`utc_day_start`: a number representing the UTC hour to start the day on (e.g. `2` or `23`)
`credits`: the amount of credits to be awarded to a user on post (optional, default 0)
`role`: the role to automatically detect challenge entries from (leave empty for everyone)
`channels`: the channels to listen in for entries
"""
# Test utc_day_start
if not(0 <= utc_day_start < 24):
return await ctx.send(f"`{utc_day_start}` is not a valid hour (in 24-hr format)!")
# Convert interval
if interval <= 0:
return await ctx.send(f"`{interval}` is not a positive integer!")
if datetime.utcnow().hour < utc_day_start:
ts = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=utc_day_start, tzinfo=timezone.utc).timestamp()
else:
ts = (datetime.utcnow() + timedelta(days=1)).replace(microsecond=0, second=0, minute=0, hour=utc_day_start, tzinfo=timezone.utc).timestamp()
# Test credit amount
if credits < 0:
return await ctx.send("The amount of credits must be a positive integer!")
async with self.config.guild(ctx.guild).challenges() as challenges:
challenges[challenge] = {
"active": True,
"streak": streak_name,
"interval": (interval, ts, utc_day_start),
"credits": credits,
"role": role.id if role else None,
"channels": [c.id for c in channels],
"users": {}
}
starts_in = datetime.utcfromtimestamp(ts) - datetime.utcnow()
return await ctx.send(f"A new challenge `{challenge}` was successfully added! If you want to edit anything, use `{ctx.clean_prefix}uploadstreaks settings edit`. The challenge will start in {starts_in.seconds//3600} hrs {(starts_in.seconds//60)%60} mins at {datetime.utcfromtimestamp(ts)} UTC.")
@_settings.command(name="toggle")
async def _settings_toggle(self, ctx: commands.Context, challenge_name: str, true_or_false: bool):
"""
Toggle whether an UploadStreaks challenge is active.
**Warning:** this *may* break users' streaks if a challenge is toggled off for longer than the interval.
"""
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["active"] = true_or_false
return await ctx.tick()
@_settings.command(name="reset")
async def _settings_reset(self, ctx: commands.Context, challenge_name: str, enter_true_to_confirm: bool):
"""Reset all streaks & points of an UploadStreaks challenge."""
if not enter_true_to_confirm:
return await ctx.send("Please provide `true` as the parameter to confirm.")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["users"] = {}
return await ctx.tick()
@_settings.command(name="delete")
async def _settings_delete(self, ctx: commands.Context, challenge_name: str, enter_true_to_confirm: bool):
"""Delete an UploadStreaks challenge."""
if not enter_true_to_confirm:
return await ctx.send("Please provide `true` as the parameter to confirm.")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
del challenges[challenge_name]
return await ctx.tick()
@_settings.group(name="edit")
async def _settings_edit(self, ctx: commands.Context):
"""Edit an UploadStreaks Challenge"""
@_settings_edit.command(name="streakname")
async def _settings_edit_streak_name(self, ctx: commands.Context, challenge_name: str, streak_name: str):
"""Edit the name of the streak for an UploadStreaks challenge."""
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["streak"] = streak_name
return await ctx.tick()
@_settings_edit.command(name="interval")
async def _settings_edit_interval(self, ctx: commands.Context, challenge_name: str, interval: int, utc_day_start: int):
"""Edit the interval of an UploadStreaks challenge."""
# Convert interval
if interval <= 0:
return await ctx.send(f"`{interval}` is not a positive integer!")
# Test utc_day_start
if not (0 <= utc_day_start < 24):
return await ctx.send(f"`{utc_day_start}` is not a valid hour (in 24-hr format)!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["interval"] = (interval, challenges[challenge_name]["interval"][1], utc_day_start)
return await ctx.tick()
@_settings_edit.command(name="credits")
async def _settings_edit_credits(self, ctx: commands.Context, challenge_name: str, credits: int):
"""Edit the awarded credits of an UploadStreaks challenge."""
if credits < 0:
return await ctx.send("The amount of credits must be a positive integer!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["credits"] = credits
return await ctx.tick()
@_settings_edit.command(name="role")
async def _settings_edit_role(self, ctx: commands.Context, challenge_name: str, role: discord.Role = None):
"""Edit the role of an UploadStreaks challenge (leave empty for everyone)."""
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["role"] = role.id if role else None
return await ctx.tick()
@_settings_edit.command(name="channels")
async def _settings_edit_channels(self, ctx: commands.Context, challenge_name: str, *channels: discord.TextChannel):
"""Edit the channels of an UploadStreaks challenge."""
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["channels"] = [c.id for c in channels]
return await ctx.tick()
@_settings.group(name="set")
async def _settings_set(self, ctx: commands.Context):
"""Manually Set User Streaks & Points"""
@_settings_set.command(name="points")
async def _settings_set_points(self, ctx: commands.Context, user: discord.Member, challenge_name: str, points: int):
"""Manually set a user's points in an UploadStreaks challenge."""
if points < 1:
return await ctx.send("The points must be at least `1`!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
orig = challenges[challenge_name]['users'].get(str(user.id))
if not orig:
return await ctx.send("That user has not participated in the challenge yet!")
challenges[challenge_name]['users'][str(user.id)] = (points, orig[1], orig[2])
return await ctx.tick()
@_settings_set.command(name="streak")
async def _settings_set_streak(self, ctx: commands.Context, user: discord.Member, challenge_name: str, streak: int):
"""Manually set a user's streak in an UploadStreaks challenge."""
if streak < 1:
return await ctx.send("The streak must be at least `1`!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
orig = challenges[challenge_name]['users'].get(str(user.id))
if not orig:
return await ctx.send("That user has not participated in the challenge yet!")
challenges[challenge_name]['users'][str(user.id)] = (orig[0], streak, orig[2])
return await ctx.tick()
@commands.bot_has_permissions(embed_links=True)
@_settings.command(name="view")
async def _settings_view(self, ctx: commands.Context):
"""View the settings of UploadStreaks challenges in this server."""
settings = await self.config.guild(ctx.guild).challenges()
embed = discord.Embed(title="UploadStreaks Settings", color=await ctx.embed_color())
if not settings:
embed.description = "No UploadStreaks Challenges Found"
else:
for name, challenge in settings.items():
channels = []
for c in challenge['channels']:
if ch := ctx.guild.get_channel(c):
channels.append(ch.mention)
embed.add_field(
name=f"Challenge `{name}`",
inline=False,
value=f"""
**Active:** {challenge['active']}
**Streak Name:** {challenge['streak']}
**Interval:** {challenge['interval'][0]} days (started on {datetime.utcfromtimestamp(challenge['interval'][1])})
**Credits:** {challenge['streak']}
**Role:** {ctx.guild.get_role(challenge['role']).mention if challenge['role'] and ctx.guild.get_role(challenge['role']) else None }
**Channels:** {humanize_list(channels)}
"""
)
return await ctx.send(embed=embed)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.