docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Populate the list of projects that belong to this experiment. Args: projects_to_filter (list(Project)): List of projects we want to assign to this experiment. We intersect the list of projects with the list of supported projects to get the list of projects that belong to this experiment. group (list(str)): In addition to the project filter, we provide a way to filter whole groups.
def populate(projects_to_filter=None, group=None): if projects_to_filter is None: projects_to_filter = [] import benchbuild.projects as all_projects all_projects.discover() prjs = ProjectRegistry.projects if projects_to_filter: prjs = {} for filter_project in set(projects_to_filter): try: prjs.update({ x: y for x, y in ProjectRegistry.projects.items( prefix=filter_project) }) except KeyError: pass if group: groupkeys = set(group) prjs = { name: cls for name, cls in prjs.items() if cls.GROUP in groupkeys } return { x: prjs[x] for x in prjs if prjs[x].DOMAIN != "debug" or x in projects_to_filter }
732,700
Strip prefix from path. Args: ipath: input path prefix: the prefix to remove, if it is found in :ipath: Examples: >>> strip_path_prefix("/foo/bar", "/bar") '/foo/bar' >>> strip_path_prefix("/foo/bar", "/") 'foo/bar' >>> strip_path_prefix("/foo/bar", "/foo") '/bar' >>> strip_path_prefix("/foo/bar", "None") '/foo/bar'
def strip_path_prefix(ipath, prefix): if prefix is None: return ipath return ipath[len(prefix):] if ipath.startswith(prefix) else ipath
732,711
Load a pickled obj from the filesystem. You better know what you expect from the given pickle, because we don't check it. Args: filename (str): The filename we load the object from. Returns: The object we were able to unpickle, else None.
def load(filename): if not os.path.exists(filename): LOG.error("load object - File '%s' does not exist.", filename) return None obj = None with open(filename, 'rb') as obj_file: obj = dill.load(obj_file) return obj
732,717
Extracts the build information from a given executable. The build information is expected to be in json format, which is parsed and returned as a dictionary. If no build information is found an empty dictionary is returned. This assumes binutils 2.25 to work. Args: exe_path (str): The full path to the executable to be examined Returns: dict: A dictionary of the extracted information.
def extract_build_info(exe_path, elf_section=ELF_SECTION): build_info = {} with mkdtemp() as tempd, pushd(tempd): proc = subprocess.Popen( [ OBJCOPY, DUMP_SECTION, "{secn}={ofile}".format(secn=elf_section, ofile=BUILDINFO_FILE), exe_path, ], stderr=subprocess.PIPE, ) proc.wait() errno = proc.returncode stderr = proc.stderr.read() if errno or len(stderr): # just return the empty dict LOGGER.warning('objcopy failed with errno %s.', errno) if len(stderr): LOGGER.warning('objcopy failed with following msg:\n%s', stderr) return build_info with open(BUILDINFO_FILE) as build_info_f: try: build_info = json.load(build_info_f, object_hook=byteify) except JSONDcdError as jsde: LOGGER.warning('benchmark executable build is not valid json:') LOGGER.warning(jsde.msg) LOGGER.warning('build info section content:') LOGGER.warning(jsde.doc) return build_info
732,755
Return a customizable uchroot command. Args: args: List of additional arguments for uchroot (typical: mounts) Return: chroot_cmd
def uchroot(*args, **kwargs): uchroot_cmd = with_mounts(*args, uchroot_cmd_fn=no_llvm, **kwargs) return uchroot_cmd["--"]
732,763
Return a customizable uchroot command. The command will be executed inside a uchroot environment. Args: args: List of additional arguments for uchroot (typical: mounts) Return: chroot_cmd
def no_llvm(*args, uid=0, gid=0, **kwargs): uchroot_cmd = no_args() uchroot_cmd = uchroot_cmd[__default_opts__(uid, gid)] return uchroot_cmd[args]
732,764
Compute the mountpoints of the current user. Args: prefix: Define where the job was running if it ran on a cluster. mounts: All mounts the user currently uses in his file system. Return: mntpoints
def mounts(prefix, __mounts): i = 0 mntpoints = [] for mount in __mounts: if not isinstance(mount, dict): mntpoint = "{0}/{1}".format(prefix, str(i)) mntpoints.append(mntpoint) i = i + 1 return mntpoints
732,770
Compute the environment of the change root for the user. Args: mounts: The mountpoints of the current user. Return: paths ld_libs
def env(mounts): f_mounts = [m.strip("/") for m in mounts] root = local.path("/") ld_libs = [root / m / "lib" for m in f_mounts] ld_libs.extend([root / m / "lib64" for m in f_mounts]) paths = [root / m / "bin" for m in f_mounts] paths.extend([root / m / "sbin" for m in f_mounts]) paths.extend([root / m for m in f_mounts]) return paths, ld_libs
732,772
Print a list of projects registered for that experiment. Args: exp: The experiment to print all projects for.
def print_projects(projects=None): grouped_by = {} if not projects: print( "Your selection didn't include any projects for this experiment.") return for name in projects: prj = projects[name] if prj.GROUP not in grouped_by: grouped_by[prj.GROUP] = [] grouped_by[prj.GROUP].append("{name}/{group}".format( name=prj.NAME, group=prj.GROUP)) for name in grouped_by: print("group: {0}".format(name)) group_projects = sorted(grouped_by[name]) for prj in group_projects: prj_cls = projects[prj] version_str = None if hasattr(prj_cls, 'versions'): version_str = ", ".join(prj_cls.versions()) project_id = "{0}/{1}".format(prj_cls.NAME, prj_cls.GROUP) project_str = \ " name: {id:<32} version: {version:<24} source: {src}".format( id=str(project_id), version=str(prj_cls.VERSION), src=str(prj_cls.SRC_FILE)) print(project_str) if prj_cls.__doc__: docstr = prj_cls.__doc__.strip("\n ") print(" description: {desc}".format(desc=docstr)) if version_str: print(" versions: {versions}".format(versions=version_str)) print()
732,823
Shell-Escape a yaml input string. Args: raw_str: The unescaped string.
def escape_yaml(raw_str: str) -> str: escape_list = [char for char in raw_str if char in ['!', '{', '[']] if len(escape_list) == 0: return raw_str str_quotes = '"' i_str_quotes = "'" if str_quotes in raw_str and str_quotes not in raw_str[1:-1]: return raw_str if str_quotes in raw_str[1:-1]: raw_str = i_str_quotes + raw_str + i_str_quotes else: raw_str = str_quotes + raw_str + str_quotes return raw_str
732,858
Create an environment variable from a name and a value. This generates a shell-compatible representation of an environment variable that is assigned a YAML representation of a value. Args: env_var (str): Name of the environment variable. value (Any): A value we convert from.
def to_env_var(env_var: str, value) -> str: val = to_yaml(value) ret_val = "%s=%s" % (env_var, escape_yaml(val)) return ret_val
732,860
Read a likwid struct from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing all likwid's struct info as key/value pairs.
def read_struct(fstream): line = fstream.readline().strip() fragments = line.split(",") fragments = [x for x in fragments if x is not None] partition = dict() if not len(fragments) >= 3: return None partition["struct"] = fragments[0] partition["info"] = fragments[1] partition["num_lines"] = fragments[2] struct = None if partition is not None and partition["struct"] == "STRUCT": num_lines = int(partition["num_lines"].strip()) struct = {} for _ in range(num_lines): cols = fetch_cols(fstream) struct.update({cols[0]: cols[1:]}) return struct
732,885
Read a likwid table info from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing likwid's table info as key/value pairs.
def read_table(fstream): pos = fstream.tell() line = fstream.readline().strip() fragments = line.split(",") fragments = [x for x in fragments if x is not None] partition = dict() if not len(fragments) >= 4: return None partition["table"] = fragments[0] partition["group"] = fragments[1] partition["set"] = fragments[2] partition["num_lines"] = fragments[3] struct = None if partition is not None and partition["table"] == "TABLE": num_lines = int(partition["num_lines"].strip()) struct = {} header = fetch_cols(fstream) struct.update({header[0]: header[1:]}) for _ in range(num_lines): cols = fetch_cols(fstream) struct.update({cols[0]: cols[1:]}) else: fstream.seek(pos) return struct
732,886
Read all structs from likwid's file stream. Args: fstream: Likwid's output file stream. Returns: A generator that can be used to iterate over all structs in the fstream.
def read_structs(fstream): struct = read_struct(fstream) while struct is not None: yield struct struct = read_struct(fstream)
732,887
Read all tables from likwid's file stream. Args: fstream: Likwid's output file stream. Returns: A generator that can be used to iterate over all tables in the fstream.
def read_tables(fstream): table = read_table(fstream) while table is not None: yield table table = read_table(fstream)
732,888
Get the complete measurement info from likwid's region info. Args: region: The region we took a measurement in. core_info: The core information. data: The raw data. extra_offset (int): default = 0 Returns (list((region, metric, core, value))): A list of measurement tuples, a tuple contains the information about the region, the metric, the core and the actual value.
def get_measurements(region, core_info, data, extra_offset=0): measurements = [] clean_core_info = [x for x in core_info if x] cores = len(clean_core_info) for k in data: if k not in ["1", "Region Info", "Event", "Metric", "CPU clock"]: slot = data[k] for i in range(cores): core = core_info[i] idx = extra_offset + i if core and slot[idx]: measurements.append((region, k, core, slot[idx])) return measurements
732,889
Get a complete list of all measurements. Args: infile: The filestream containing all likwid output. Returns: A list of all measurements extracted from likwid's file stream.
def perfcounters(infile): measurements = [] with open(infile, 'r') as in_file: read_struct(in_file) for region_struct in read_structs(in_file): region = region_struct["1"][1] core_info = region_struct["Region Info"] measurements += \ get_measurements(region, core_info, region_struct) for table_struct in read_tables(in_file): core_info = None if "Event" in table_struct: offset = 1 core_info = table_struct["Event"][offset:] measurements += get_measurements(region, core_info, table_struct, offset) elif "Metric" in table_struct: core_info = table_struct["Metric"] measurements += get_measurements(region, core_info, table_struct) return measurements
732,890
Create a new 'run_group' in the database. This creates a new transaction in the database and creates a new run_group within this transaction. Afterwards we return both the transaction as well as the run_group itself. The user is responsible for committing it when the time comes. Args: prj - The project for which we open the run_group. Returns: A tuple (group, session) containing both the newly created run_group and the transaction object.
def create_run_group(prj): from benchbuild.utils import schema as s session = s.Session() experiment = prj.experiment group = s.RunGroup(id=prj.run_uuid, experiment=experiment.id) session.add(group) session.commit() return (group, session)
732,975
Persist this project in the benchbuild database. Args: project: The project we want to persist.
def persist_project(project): from benchbuild.utils.schema import Project, Session session = Session() projects = session.query(Project) \ .filter(Project.name == project.name) \ .filter(Project.group_name == project.group) name = project.name desc = project.__doc__ domain = project.domain group_name = project.group version = project.version() \ if callable(project.version) else project.version try: src_url = project.src_uri except AttributeError: src_url = 'unknown' if projects.count() == 0: newp = Project() newp.name = name newp.description = desc newp.src_url = src_url newp.domain = domain newp.group_name = group_name newp.version = version session.add(newp) else: newp_value = { "name": name, "description": desc, "src_url": src_url, "domain": domain, "group_name": group_name, "version": version } projects.update(newp_value) session.commit() return (projects, session)
732,976
Persist this experiment in the benchbuild database. Args: experiment: The experiment we want to persist.
def persist_experiment(experiment): from benchbuild.utils.schema import Experiment, Session session = Session() cfg_exp = experiment.id LOG.debug("Using experiment ID stored in config: %s", cfg_exp) exps = session.query(Experiment).filter(Experiment.id == cfg_exp) desc = str(CFG["experiment_description"]) name = experiment.name if exps.count() == 0: newe = Experiment() newe.id = cfg_exp newe.name = name newe.description = desc session.add(newe) ret = newe else: exps.update({'name': name, 'description': desc}) ret = exps.first() try: session.commit() except IntegrityError: session.rollback() persist_experiment(experiment) return (ret, session)
732,977
Persist the run results in the database. Args: run: The run we attach this timing results to. session: The db transaction we belong to. timings: The timing measurements we want to store.
def persist_time(run, session, timings): from benchbuild.utils import schema as s for timing in timings: session.add( s.Metric(name="time.user_s", value=timing[0], run_id=run.id)) session.add( s.Metric(name="time.system_s", value=timing[1], run_id=run.id)) session.add( s.Metric(name="time.real_s", value=timing[2], run_id=run.id))
732,978
Persist the flamegraph in the database. The flamegraph exists as a SVG image on disk until we persist it in the database. Args: run: The run we attach these perf measurements to. session: The db transaction we belong to. svg_path: The path to the SVG file we want to store.
def persist_perf(run, session, svg_path): from benchbuild.utils import schema as s with open(svg_path, 'r') as svg_file: svg_data = svg_file.read() session.add( s.Metadata(name="perf.flamegraph", value=svg_data, run_id=run.id))
732,979
Persist the run results in the database. Args: run: The run we attach the compilestats to. session: The db transaction we belong to. stats: The stats we want to store in the database.
def persist_compilestats(run, session, stats): for stat in stats: stat.run_id = run.id session.add(stat)
732,980
Persist the configuration in as key-value pairs. Args: run: The run we attach the config to. session: The db transaction we belong to. cfg: The configuration we want to persist.
def persist_config(run, session, cfg): from benchbuild.utils import schema as s for cfg_elem in cfg: session.add( s.Config(name=cfg_elem, value=cfg[cfg_elem], run_id=run.id))
732,981
Return the current absolute coordinates of the pointer event, transformed to screen coordinates. For pointer events that are not of type :attr:`~libinput.constant.EventType.POINTER_MOTION_ABSOLUTE`, this method raises :exc:`AttributeError`. Args: width (int): The current output screen width. height (int): The current output screen height. Returns: (float, float): The current absolute (x, y) coordinates transformed to a screen coordinates. Raises: AttributeError
def transform_absolute_coords(self, width, height): if self.type != EventType.POINTER_MOTION_ABSOLUTE: raise AttributeError(_wrong_meth.format(self.type)) abs_x = self._libinput \ .libinput_event_pointer_get_absolute_x_transformed( self._handle, width) abs_y = self._libinput \ .libinput_event_pointer_get_absolute_y_transformed( self._handle, height) return abs_x, abs_y
733,030
Check if the event has a valid value for the given axis. If this method returns True for an axis and :meth:`get_axis_value` returns a value of 0, the event is a scroll stop event. For pointer events that are not of type :attr:`~libinput.constant.EventType.POINTER_AXIS`, this method raises :exc:`AttributeError`. Args: axis (~libinput.constant.PointerAxis): The axis to check. Returns: bool: True if this event contains a value for this axis. Raises: AttributeError
def has_axis(self, axis): if self.type != EventType.POINTER_AXIS: raise AttributeError(_wrong_meth.format(self.type)) return self._libinput.libinput_event_pointer_has_axis( self._handle, axis)
733,034
Return the current absolute coordinates of the touch event, transformed to screen coordinates. For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`, :attr:`~libinput.constant.EventType.TOUCH_MOTION`, this method raises :exc:`AttributeError`. Args: width (int): The current output screen width. height (int): The current output screen height. Returns: (float, float): The current absolute (x, y) coordinates transformed to screen coordinates.
def transform_coords(self, width, height): if self.type not in {EventType.TOUCH_DOWN, EventType.TOUCH_MOTION}: raise AttributeError(_wrong_meth.format(self.type)) x = self._libinput.libinput_event_touch_get_x_transformed( self._handle, width) y = self._libinput.libinput_event_touch_get_y_transformed( self._handle, height) return x, y
733,043
Handle SQLAlchemy exceptions in a sane way. Args: func: An arbitrary function to wrap. error_is_fatal: Should we exit the program on exception? reraise: Should we reraise the exception, after logging? Only makes sense if error_is_fatal is False. error_messages: A dictionary that assigns an exception class to a customized error message.
def exceptions(error_is_fatal=True, error_messages=None): def exception_decorator(func): nonlocal error_messages @functools.wraps(func) def exc_wrapper(*args, **kwargs): nonlocal error_messages try: result = func(*args, **kwargs) except sa.exc.SQLAlchemyError as err: result = None details = None err_type = err.__class__ if error_messages and err_type in error_messages: details = error_messages[err_type] if details: LOG.error(details) LOG.error("For developers: (%s) %s", err.__class__, str(err)) if error_is_fatal: sys.exit("Abort, SQL operation failed.") if not ui.ask( "I can continue at your own risk, do you want that?"): raise err return result return exc_wrapper return exception_decorator
733,099
Collect layers from input svg sources. Args: svg_sources (list) : A list of file-like objects, each containing one or more XML layers. Returns ------- (width, height), layers : (int, int), list The first item in the tuple is the shape of the largest layer, and the second item is a list of ``Element`` objects (from :mod:`lxml.etree` module), one per SVG layer.
def get_svg_layers(svg_sources): layers = [] width, height = None, None def extract_length(attr): 'Extract length in pixels.' match = CRE_MM_LENGTH.match(attr) if match: # Length is specified in millimeters. return INKSCAPE_PPmm.magnitude * float(match.group('length')) else: return float(attr) for svg_source_i in svg_sources: # Parse input file. xml_root = etree.parse(svg_source_i) svg_root = xml_root.xpath('/svg:svg', namespaces=INKSCAPE_NSMAP)[0] width = max(extract_length(svg_root.attrib['width']), width) height = max(extract_length(svg_root.attrib['height']), height) layers += svg_root.xpath('//svg:g[@inkscape:groupmode="layer"]', namespaces=INKSCAPE_NSMAP) for i, layer_i in enumerate(layers): layer_i.attrib['id'] = 'layer%d' % (i + 1) return (width, height), layers
733,160
Merge layers from input svg sources into a single XML document. Args: svg_sources (list) : A list of file-like objects, each containing one or more XML layers. share_transform (bool) : If exactly one layer has a transform, apply it to *all* other layers as well. Returns: StringIO.StringIO : File-like object containing merge XML document.
def merge_svg_layers(svg_sources, share_transform=True): # Get list of XML layers. (width, height), layers = get_svg_layers(svg_sources) if share_transform: transforms = [layer_i.attrib['transform'] for layer_i in layers if 'transform' in layer_i.attrib] if len(transforms) > 1: raise ValueError('Transform can only be shared if *exactly one* ' 'layer has a transform ({} layers have ' '`transform` attributes)'.format(len(transforms))) elif transforms: # Apply single common transform to all layers. for layer_i in layers: layer_i.attrib['transform'] = transforms[0] # Create blank XML output document. dwg = svgwrite.Drawing(profile='tiny', debug=False, size=(width, height)) # Add append layers to output XML root element. output_svg_root = etree.fromstring(dwg.tostring()) output_svg_root.extend(layers) # Write merged XML document to output file-like object. output = StringIO.StringIO() output.write(etree.tostring(output_svg_root)) output.seek(0) return output
733,161
Checks if a container exists and is unpacked. Args: path: The location where the container is expected. Returns: True if the container is valid, False if the container needs to unpacked or if the path does not exist yet.
def is_valid(container, path): try: tmp_hash_path = container.filename + ".hash" with open(tmp_hash_path, 'r') as tmp_file: tmp_hash = tmp_file.readline() except IOError: LOG.info("No .hash-file in the tmp-directory.") container_hash_path = local.path(path) / "gentoo.tar.bz2.hash" if container_hash_path.exists(): with open(container_hash_path, 'r') as hash_file: container_hash = hash_file.readline() return container_hash == tmp_hash return False
733,472
Unpack a container usable by uchroot. Method that checks if a directory for the container exists, checks if erlent support is needed and then unpacks the container accordingly. Args: path: The location where the container is, that needs to be unpacked.
def unpack(container, path): from benchbuild.utils.run import run from benchbuild.utils.uchroot import no_args path = local.path(path) c_filename = local.path(container.filename) name = c_filename.basename if not path.exists(): path.mkdir() with local.cwd(path): Wget(container.remote, name) uchroot = no_args() uchroot = uchroot["-E", "-A", "-C", "-r", "/", "-w", os.path.abspath("."), "--"] # Check, if we need erlent support for this archive. has_erlent = bash[ "-c", "tar --list -f './{0}' | grep --silent '.erlent'".format( name)] has_erlent = (has_erlent & TF) untar = local["/bin/tar"]["xf", "./" + name] if not has_erlent: untar = uchroot[untar] run(untar["--exclude=dev/*"]) if not os.path.samefile(name, container.filename): rm(name) else: LOG.warning("File contents do not match: %s != %s", name, container.filename) cp(container.filename + ".hash", path)
733,473
Begin a run_group in the database. A run_group groups a set of runs for a given project. This models a series of runs that form a complete binary runtime test. Args: project: The project we begin a new run_group for. Returns: ``(group, session)`` where group is the created group in the database and session is the database session this group lives in.
def begin_run_group(project): from benchbuild.utils.db import create_run_group from datetime import datetime group, session = create_run_group(project) group.begin = datetime.now() group.status = 'running' session.commit() return group, session
733,485
End the run_group successfully. Args: group: The run_group we want to complete. session: The database transaction we will finish.
def end_run_group(group, session): from datetime import datetime group.end = datetime.now() group.status = 'completed' session.commit()
733,486
End the run_group unsuccessfully. Args: group: The run_group we want to complete. session: The database transaction we will finish.
def fail_run_group(group, session): from datetime import datetime group.end = datetime.now() group.status = 'failed' session.commit()
733,487
Generate a single exit code from a list of RunInfo objects. Takes a list of RunInfos and returns the exit code that is furthest away from 0. Args: run_infos (t.List[RunInfo]): [description] Returns: int: [description]
def exit_code_from_run_infos(run_infos: t.List[RunInfo]) -> int: assert run_infos is not None if not hasattr(run_infos, "__iter__"): return run_infos.retcode rcs = [ri.retcode for ri in run_infos] max_rc = max(rcs) min_rc = min(rcs) if max_rc == 0: return min_rc return max_rc
733,488
Recursively updates the environment of cmd and all its subcommands. Args: cmd - A plumbum command-like object **envvars - The environment variables to update Returns: The updated command.
def with_env_recursive(cmd, **envvars): from plumbum.commands.base import BoundCommand, BoundEnvCommand if isinstance(cmd, BoundCommand): cmd.cmd = with_env_recursive(cmd.cmd, **envvars) elif isinstance(cmd, BoundEnvCommand): cmd.envvars.update(envvars) cmd.cmd = with_env_recursive(cmd.cmd, **envvars) return cmd
733,490
Decorate a project phase with a local working directory change. Args: sub: An optional subdirectory to change into.
def in_builddir(sub='.'): from functools import wraps def wrap_in_builddir(func): @wraps(func) def wrap_in_builddir_func(self, *args, **kwargs): p = local.path(self.builddir) / sub if not p.exists(): LOG.error("%s does not exist.", p) if p == local.cwd: LOG.debug("CWD already is %s", p) return func(self, *args, *kwargs) with local.cwd(p): return func(self, *args, **kwargs) return wrap_in_builddir_func return wrap_in_builddir
733,491
Pack a container image into a .tar.bz2 archive. Args: in_container (str): Path string to the container image. out_file (str): Output file name.
def pack_container(in_container, out_file): container_filename = local.path(out_file).basename out_container = local.cwd / "container-out" / container_filename out_dir = out_container.dirname # Pack the results to: container-out with local.cwd(in_container): tar("cjf", out_container, ".") c_hash = download.update_hash(out_container) if out_dir.exists(): mkdir("-p", out_dir) mv(out_container, out_file) mv(out_container + ".hash", out_file + ".hash") new_container = {"path": out_file, "hash": str(c_hash)} CFG["container"]["known"] += new_container
733,497
Decorator for registering handlers that convert text dates to dates. Args: date_specifier_patterns (str): the date specifier (in regex pattern format) for which the handler is registered
def register_date_conversion_handler(date_specifier_patterns): def _decorator(func): global DATE_SPECIFIERS_CONVERSION_HANDLERS DATE_SPECIFIERS_CONVERSION_HANDLERS[DATE_SPECIFIERS_REGEXES[date_specifier_patterns]] = func return func return _decorator
733,649
Calculates the next date from the given partial date. Args: partial_date (inspire_utils.date.PartialDate): The partial date whose next date should be calculated. Returns: PartialDate: The next date from the given partial date.
def _get_next_date_from_partial_date(partial_date): relativedelta_arg = 'years' if partial_date.month: relativedelta_arg = 'months' if partial_date.day: relativedelta_arg = 'days' next_date = parse(partial_date.dumps()) + relativedelta(**{relativedelta_arg: 1}) return PartialDate.from_parts( next_date.year, next_date.month if partial_date.month else None, next_date.day if partial_date.day else None )
733,655
Helper for wrapping a query into a nested if the fields within the query are nested Args: query : The query to be wrapped. field : The field that is being queried. nested_fields : List of fields which are nested. Returns: (dict): The nested query
def wrap_query_in_nested_if_field_is_nested(query, field, nested_fields): for element in nested_fields: match_pattern = r'^{}.'.format(element) if re.match(match_pattern, field): return generate_nested_query(element, query) return query
733,660
Returns a tree representation of a parse tree. Arguments: tree: the parse tree whose tree representation is to be generated verbose (bool): if True prints the parse tree to be formatted Returns: str: tree-like representation of the parse tree
def emit_tree_format(tree, verbose=False): if verbose: print("Converting: " + repr(tree)) ret_str = __recursive_formatter(tree) return ret_str
733,738
Drives the whole logic, by parsing, restructuring and finally, generating an ElasticSearch query. Args: query_str (six.text_types): the given query to be translated to an ElasticSearch query Returns: six.text_types: Return an ElasticSearch query. Notes: In case there's an error, an ElasticSearch `multi_match` query is generated with its `query` value, being the query_str argument.
def parse_query(query_str): def _generate_match_all_fields_query(): # Strip colon character (special character for ES) stripped_query_str = ' '.join(query_str.replace(':', ' ').split()) return {'multi_match': {'query': stripped_query_str, 'fields': ['_all'], 'zero_terms_query': 'all'}} if not isinstance(query_str, six.text_type): query_str = six.text_type(query_str.decode('utf-8')) logger.info('Parsing: "' + query_str + '\".') parser = StatefulParser() rst_visitor = RestructuringVisitor() es_visitor = ElasticSearchVisitor() try: unrecognized_text, parse_tree = parser.parse(query_str, Query) if unrecognized_text: # Usually, should never happen. msg = 'Parser returned unrecognized text: "' + unrecognized_text + \ '" for query: "' + query_str + '".' if query_str == unrecognized_text and parse_tree is None: # Didn't recognize anything. logger.warn(msg) return _generate_match_all_fields_query() else: msg += 'Continuing with recognized parse tree.' logger.warn(msg) except SyntaxError as e: logger.warn('Parser syntax error (' + six.text_type(e) + ') with query: "' + query_str + '". Continuing with a match_all with the given query.') return _generate_match_all_fields_query() # Try-Catch-all exceptions for visitors, so that search functionality never fails for the user. try: restructured_parse_tree = parse_tree.accept(rst_visitor) logger.debug('Parse tree: \n' + emit_tree_format(restructured_parse_tree)) except Exception as e: logger.exception( RestructuringVisitor.__name__ + " crashed" + (": " + six.text_type(e) + ".") if six.text_type(e) else '.' ) return _generate_match_all_fields_query() try: es_query = restructured_parse_tree.accept(es_visitor) except Exception as e: logger.exception( ElasticSearchVisitor.__name__ + " crashed" + (": " + six.text_type(e) + ".") if six.text_type(e) else '.' ) return _generate_match_all_fields_query() if not es_query: # Case where an empty query was generated (i.e. date query with malformed date, e.g. "d < 200"). return _generate_match_all_fields_query() return es_query
733,753
Generates a query on the ``_all`` field with all the query content. Args: data (six.text_type or list): The query in the format of ``six.text_type`` (when used from parsing driver) or ``list`` when used from withing the ES visitor.
def _generate_malformed_query(data): if isinstance(data, six.text_type): # Remove colon character (special character for ES) query_str = data.replace(':', ' ') else: query_str = ' '.join([word.strip(':') for word in data.children]) return { 'simple_query_string': { 'fields': ['_all'], 'query': query_str } }
733,824
Transforms the given journal query value (old publication info) to the new one. Args: third_journal_field (six.text_type): The final field to be used for populating the old publication info. old_publication_info_values (six.text_type): The old publication info. It must be one of {only title, title & volume, title & volume & artid/page_start}. Returns: (dict) The new publication info.
def _preprocess_journal_query_value(third_journal_field, old_publication_info_values): # Prepare old publication info for :meth:`inspire_schemas.utils.convert_old_publication_info_to_new`. publication_info_keys = [ ElasticSearchVisitor.JOURNAL_TITLE, ElasticSearchVisitor.JOURNAL_VOLUME, third_journal_field, ] values_list = [ value.strip() for value in old_publication_info_values.split(',') if value ] old_publication_info = [ { key: value for key, value in zip(publication_info_keys, values_list) if value } ] # We are always assuming that the returned list will not be empty. In the situation of a journal query with no # value, a malformed query will be generated instead. new_publication_info = convert_old_publication_info_to_new(old_publication_info)[0] return new_publication_info
733,825
Synchronises the schedule specified by the ID `schedule_id` to the scheduler service. Arguments: schedule_id {str} -- The ID of the schedule to sync
def run(self, schedule_id, **kwargs): log = self.get_logger(**kwargs) try: schedule = Schedule.objects.get(id=schedule_id) except Schedule.DoesNotExist: log.error("Missing Schedule %s", schedule_id, exc_info=True) if schedule.scheduler_schedule_id is None: # Create the new schedule result = self.scheduler.create_schedule(schedule.scheduler_format) schedule.scheduler_schedule_id = result["id"] # Disable update signal here to avoid calling twice post_save.disconnect(schedule_saved, sender=Schedule) schedule.save(update_fields=("scheduler_schedule_id",)) post_save.connect(schedule_saved, sender=Schedule) log.info( "Created schedule %s on scheduler for schedule %s", schedule.scheduler_schedule_id, schedule.id, ) else: # Update the existing schedule result = self.scheduler.update_schedule( str(schedule.scheduler_schedule_id), schedule.scheduler_format ) log.info( "Updated schedule %s on scheduler for schedule %s", schedule.scheduler_schedule_id, schedule.id, )
733,847
Deactivates the schedule specified by the ID `scheduler_schedule_id` in the scheduler service. Arguments: scheduler_schedule_id {str} -- The ID of the schedule to deactivate
def run(self, scheduler_schedule_id, **kwargs): log = self.get_logger(**kwargs) self.scheduler.update_schedule(scheduler_schedule_id, {"active": False}) log.info( "Deactivated schedule %s in the scheduler service", scheduler_schedule_id )
733,848
Calculates the expected lifecycle position the subscription in subscription_ids, and creates a BehindSubscription entry for them. Args: subscription_id (str): ID of subscription to calculate lifecycle for
def calculate_subscription_lifecycle(subscription_id): subscription = Subscription.objects.select_related("messageset", "schedule").get( id=subscription_id ) behind = subscription.messages_behind() if behind == 0: return current_messageset = subscription.messageset current_sequence_number = subscription.next_sequence_number end_subscription = Subscription.fast_forward_lifecycle(subscription, save=False)[-1] BehindSubscription.objects.create( subscription=subscription, messages_behind=behind, current_messageset=current_messageset, current_sequence_number=current_sequence_number, expected_messageset=end_subscription.messageset, expected_sequence_number=end_subscription.next_sequence_number, )
733,870
Fires off the celery task to ensure that this schedule is in the scheduler Arguments: sender {class} -- The model class, always Schedule instance {Schedule} -- The instance of the Schedule that we want to sync
def schedule_saved(sender, instance, **kwargs): from contentstore.tasks import sync_schedule sync_schedule.delay(str(instance.id))
734,092
Fires off the celery task to ensure that this schedule is deactivated Arguments: sender {class} -- The model class, always Schedule instance {Schedule} -- The instance of the schedule that we want to deactivate
def schedule_deleted(sender, instance, **kwargs): from contentstore.tasks import deactivate_schedule deactivate_schedule.delay(str(instance.scheduler_schedule_id))
734,093
Create a TAXII endpoint. Args: user (str): username for authentication (optional) password (str): password for authentication (optional) verify (bool): validate the entity credentials (default: True) conn (_HTTPConnection): A connection to reuse (optional) proxies (dict): key/value pair for http/https proxy settings. (optional)
def __init__(self, url, conn=None, user=None, password=None, verify=True, proxies=None): if conn and (user or password): raise InvalidArgumentsError("A connection and user/password may" " not both be provided.") elif conn: self._conn = conn else: self._conn = _HTTPConnection(user, password, verify, proxies) # Add trailing slash to TAXII endpoint if missing # https://github.com/oasis-open/cti-taxii-client/issues/50 if url[-1] == "/": self.url = url else: self.url = url + "/"
734,269
It will poll the URL to grab the latest status resource in a given timeout and time interval. Args: poll_interval (int): how often to poll the status service. timeout (int): how long to poll the URL until giving up. Use <= 0 to wait forever
def wait_until_final(self, poll_interval=1, timeout=60): start_time = time.time() elapsed = 0 while (self.status != "complete" and (timeout <= 0 or elapsed < timeout)): time.sleep(poll_interval) self.refresh() elapsed = time.time() - start_time
734,272
Check that the server is returning a valid Content-Type Args: content_type (str): ``Content-Type:`` header value accept (str): media type to include in the ``Accept:`` header.
def valid_content_type(self, content_type, accept): accept_tokens = accept.replace(' ', '').split(';') content_type_tokens = content_type.replace(' ', '').split(';') return ( all(elem in content_type_tokens for elem in accept_tokens) and (content_type_tokens[0] == 'application/vnd.oasis.taxii+json' or content_type_tokens[0] == 'application/vnd.oasis.stix+json') )
734,296
r"""Compute :math:`\frac{\partial B}{\partial s}`. .. note:: This is a helper for :func:`_jacobian_both`, which has an equivalent Fortran implementation. Args: nodes (numpy.ndarray): Array of nodes in a surface. degree (int): The degree of the surface. dimension (int): The dimension the surface lives in. Returns: numpy.ndarray: Nodes of the Jacobian surface in B |eacute| zier form.
def jacobian_s(nodes, degree, dimension): r num_nodes = (degree * (degree + 1)) // 2 result = np.empty((dimension, num_nodes), order="F") index = 0 i = 0 for num_vals in six.moves.xrange(degree, 0, -1): for _ in six.moves.xrange(num_vals): result[:, index] = nodes[:, i + 1] - nodes[:, i] # Update the indices index += 1 i += 1 # In between each row, the index gains an extra value. i += 1 return float(degree) * result
734,728
r"""Compute :math:`s` and :math:`t` partial of :math:`B`. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): Array of nodes in a surface. degree (int): The degree of the surface. dimension (int): The dimension the surface lives in. Returns: numpy.ndarray: Nodes of the Jacobian surfaces in B |eacute| zier form.
def _jacobian_both(nodes, degree, dimension): r _, num_nodes = nodes.shape result = np.empty((2 * dimension, num_nodes - degree - 1), order="F") result[:dimension, :] = jacobian_s(nodes, degree, dimension) result[dimension:, :] = jacobian_t(nodes, degree, dimension) return result
734,729
Compute the nodes of each edges of a surface. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): Control point nodes that define the surface. degree (int): The degree of the surface define by ``nodes``. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]: The nodes in the edges of the surface.
def _compute_edge_nodes(nodes, degree): dimension, _ = np.shape(nodes) nodes1 = np.empty((dimension, degree + 1), order="F") nodes2 = np.empty((dimension, degree + 1), order="F") nodes3 = np.empty((dimension, degree + 1), order="F") curr2 = degree curr3 = -1 for i in six.moves.xrange(degree + 1): nodes1[:, i] = nodes[:, i] nodes2[:, i] = nodes[:, curr2] nodes3[:, i] = nodes[:, curr3] # Update the indices. curr2 += degree - i curr3 -= i + 2 return nodes1, nodes2, nodes3
734,750
Save an image to the docs images directory. Args: filename (str): The name of the file (not containing directory info).
def save_image(figure, filename): path = os.path.join(IMAGES_DIR, filename) figure.savefig(path, bbox_inches="tight") plt.close(figure)
734,758
Fill out the columns of matrix with a series of points. This is because ``np.hstack()`` will just make another 1D vector out of them and ``np.vstack()`` will put them in the rows. Args: points (Tuple[numpy.ndarray, ...]): Tuple of 1D points (i.e. arrays with shape ``(2,)``. Returns: numpy.ndarray: The array with each point in ``points`` as its columns.
def stack1d(*points): result = np.empty((2, len(points)), order="F") for index, point in enumerate(points): result[:, index] = point return result
734,759
Checks if the current build is ``gfortran`` on macOS. Args: f90_compiler (numpy.distutils.fcompiler.FCompiler): A Fortran compiler instance. Returns: bool: Only :data:`True` if * Current OS is macOS (checked via ``sys.platform``). * ``f90_compiler`` corresponds to ``gfortran``.
def is_macos_gfortran(f90_compiler): # NOTE: NumPy may not be installed, but we don't want **this** module to # cause an import failure. from numpy.distutils.fcompiler import gnu # Only macOS. if sys.platform != MAC_OS: return False # Only ``gfortran``. if not isinstance(f90_compiler, gnu.Gnu95FCompiler): return False return True
734,818
Patch up ``f90_compiler.library_dirs``. On macOS, a Homebrew installed ``gfortran`` needs some help. The ``numpy.distutils`` "default" constructor for ``Gnu95FCompiler`` only has a single library search path, but there are many library paths included in the full ``gcc`` install. Args: f90_compiler (numpy.distutils.fcompiler.FCompiler): A Fortran compiler instance.
def patch_f90_compiler(f90_compiler): if not is_macos_gfortran(f90_compiler): return library_dirs = f90_compiler.library_dirs # ``library_dirs`` is a list (i.e. mutable), so we can update in place. library_dirs[:] = setup_helpers.gfortran_search_path(library_dirs)
734,819
Get the degree of the current surface. Args: num_nodes (int): The number of control points for a B |eacute| zier surface. Returns: int: The degree :math:`d` such that :math:`(d + 1)(d + 2)/2` equals ``num_nodes``. Raises: ValueError: If ``num_nodes`` isn't a triangular number.
def _get_degree(num_nodes): # 8 * num_nodes = 4(d + 1)(d + 2) # = 4d^2 + 12d + 8 # = (2d + 3)^2 - 1 d_float = 0.5 * (np.sqrt(8.0 * num_nodes + 1.0) - 3.0) d_int = int(np.round(d_float)) if (d_int + 1) * (d_int + 2) == 2 * num_nodes: return d_int else: raise ValueError(num_nodes, "not a triangular number")
734,823
Verifies that a point is in the reference triangle. I.e., checks that they sum to <= one and are each non-negative. Args: s (float): Parameter along the reference triangle. t (float): Parameter along the reference triangle. Raises: ValueError: If the point lies outside the reference triangle.
def _verify_cartesian(s, t): if s < 0.0 or t < 0.0 or s + t > 1.0: raise ValueError("Point lies outside reference triangle", s, t)
734,831
Get the library directory paths for ``gfortran``. Looks for ``libraries: =`` in the output of ``gfortran -print-search-dirs`` and then parses the paths. If this fails for any reason, this method will print an error and return ``library_dirs``. Args: library_dirs (List[str]): Existing library directories. Returns: List[str]: The library directories for ``gfortran``.
def gfortran_search_path(library_dirs): cmd = ("gfortran", "-print-search-dirs") process = subprocess.Popen(cmd, stdout=subprocess.PIPE) return_code = process.wait() # Bail out if the command failed. if return_code != 0: return library_dirs cmd_output = process.stdout.read().decode("utf-8") # Find single line starting with ``libraries: ``. search_lines = cmd_output.strip().split("\n") library_lines = [ line[len(FORTRAN_LIBRARY_PREFIX) :] for line in search_lines if line.startswith(FORTRAN_LIBRARY_PREFIX) ] if len(library_lines) != 1: msg = GFORTRAN_MISSING_LIBS.format(cmd_output) print(msg, file=sys.stderr) return library_dirs # Go through each library in the ``libraries: = ...`` line. library_line = library_lines[0] accepted = set(library_dirs) for part in library_line.split(os.pathsep): full_path = os.path.abspath(part.strip()) if os.path.isdir(full_path): accepted.add(full_path) else: # Ignore anything that isn't a directory. msg = GFORTRAN_BAD_PATH.format(full_path) print(msg, file=sys.stderr) return sorted(accepted)
734,851
Update a given set of compiler flags. Args: compiler_flags (List[str]): Existing flags associated with a compiler. remove_flags (Optional[Container[str]]): A container of flags to remove that will override any of the defaults. Returns: List[str]: The modified list (i.e. some flags added and some removed).
def _update_flags(compiler_flags, remove_flags=()): for flag in GFORTRAN_SHARED_FLAGS: if flag not in compiler_flags: compiler_flags.append(flag) if DEBUG_ENV in os.environ: to_add = GFORTRAN_DEBUG_FLAGS to_remove = GFORTRAN_OPTIMIZE_FLAGS else: to_add = GFORTRAN_OPTIMIZE_FLAGS if os.environ.get(WHEEL_ENV) is None: to_add += (GFORTRAN_NATIVE_FLAG,) to_remove = GFORTRAN_DEBUG_FLAGS for flag in to_add: if flag not in compiler_flags: compiler_flags.append(flag) return [ flag for flag in compiler_flags if not (flag in to_remove or flag in remove_flags) ]
734,853
Create a static library (i.e. a ``.a`` / ``.lib`` file). Args: obj_files (List[str]): List of paths of compiled object files.
def _default_static_lib(self, obj_files): c_compiler = self.F90_COMPILER.c_compiler static_lib_dir = os.path.join(self.build_lib, "bezier", "lib") if not os.path.exists(static_lib_dir): os.makedirs(static_lib_dir) c_compiler.create_static_lib( obj_files, "bezier", output_dir=static_lib_dir ) # NOTE: We must "modify" the paths for the ``extra_objects`` in # each extension since they were compiled with # ``output_dir=self.build_temp``. for extension in self.extensions: extension.extra_objects[:] = [ os.path.join(self.build_temp, rel_path) for rel_path in extension.extra_objects ]
734,862
Verify a pair of sides share an endpoint. .. note:: This currently checks that edge endpoints match **exactly** but allowing some roundoff may be desired. Args: prev (.Curve): "Previous" curve at piecewise junction. curr (.Curve): "Next" curve at piecewise junction. Raises: ValueError: If the previous side is not in 2D. ValueError: If consecutive sides don't share an endpoint.
def _verify_pair(prev, curr): if prev._dimension != 2: raise ValueError("Curve not in R^2", prev) end = prev._nodes[:, -1] start = curr._nodes[:, 0] if not _helpers.vector_close(end, start): raise ValueError( "Not sufficiently close", "Consecutive sides do not have common endpoint", prev, curr, )
734,867
Plot the current curved polygon. Args: pts_per_edge (int): Number of points to plot per curved edge. color (Optional[Tuple[float, float, float]]): Color as RGB profile. ax (Optional[matplotlib.artist.Artist]): matplotlib axis object to add plot to. Returns: matplotlib.artist.Artist: The axis containing the plot. This may be a newly created axis.
def plot(self, pts_per_edge, color=None, ax=None): if ax is None: ax = _plot_helpers.new_axis() _plot_helpers.add_patch(ax, color, pts_per_edge, *self._edges) return ax
734,870
Post-process a generated journal file on Travis macOS. Args: journal_filename (str): The name of the journal file.
def post_process_travis_macos(journal_filename): travis_build_dir = os.environ.get("TRAVIS_BUILD_DIR", "") with open(journal_filename, "r") as file_obj: content = file_obj.read() processed = content.replace(travis_build_dir, "${TRAVIS_BUILD_DIR}") with open(journal_filename, "w") as file_obj: file_obj.write(processed)
734,876
Convert Sphinx ``:mod:`` to plain reST link. Args: match (_sre.SRE_Match): A match (from ``re``) to be used in substitution. sphinx_modules (list): List to be track the modules that have been encountered. Returns: str: The ``match`` converted to a link.
def mod_replace(match, sphinx_modules): sphinx_modules.append(match.group("module")) return "`{}`_".format(match.group("value"))
734,878
Convert Sphinx ``:doc:`` to plain reST link. Args: match (_sre.SRE_Match): A match (from ``re``) to be used in substitution. sphinx_docs (list): List to be track the documents that have been encountered. Returns: str: The ``match`` converted to a link.
def doc_replace(match, sphinx_docs): sphinx_docs.append(match.group("path")) return "`{}`_".format(match.group("value"))
734,879
Get a diff between two strings. Args: value1 (str): First string to be compared. value2 (str): Second string to be compared. name1 (str): Name of the first string. name2 (str): Name of the second string. Returns: str: The full diff.
def get_diff(value1, value2, name1, name2): lines1 = [line + "\n" for line in value1.splitlines()] lines2 = [line + "\n" for line in value2.splitlines()] diff_lines = difflib.context_diff( lines1, lines2, fromfile=name1, tofile=name2 ) return "".join(diff_lines)
734,880
Updates the template so that curly braces are escaped correctly. Args: content (str): The template for ``docs/index.rst.release.template``. Returns: str: The updated template with properly escaped curly braces.
def release_docs_side_effect(content): # First replace **all** curly braces. result = content.replace("{", "{{").replace("}", "}}") # Then reset the actual template arguments. result = result.replace("{{version}}", "{version}") result = result.replace("{{circleci_build}}", "{circleci_build}") result = result.replace("{{travis_build}}", "{travis_build}") result = result.replace("{{appveyor_build}}", "{appveyor_build}") result = result.replace("{{coveralls_build}}", "{coveralls_build}") return result
734,885
Helper for :func:`evaluate` when ``nodes`` is degree 3. Args: nodes (numpy.ndarray): ``2 x 4`` array of nodes in a curve. x_val (float): ``x``-coordinate for evaluation. y_val (float): ``y``-coordinate for evaluation. Returns: float: The computed value of :math:`f(x, y)`.
def _evaluate3(nodes, x_val, y_val): # NOTE: This may be (a) slower and (b) less precise than # hard-coding the determinant. sylvester_mat = np.zeros((6, 6), order="F") delta = nodes - np.asfortranarray([[x_val], [y_val]]) delta[:, 1:3] *= 3.0 # Swap rows/columns so that x-y are right next to each other. # This will only change the determinant up to a sign. sylvester_mat[:2, :4] = delta sylvester_mat[2:4, 1:5] = delta sylvester_mat[4:, 2:] = delta return np.linalg.det(sylvester_mat)
734,889
r"""Compute roots of a polynomial in the unit interval. Args: coeffs (numpy.ndarray): A 1D array (size ``d + 1``) of coefficients in monomial / power basis. Returns: numpy.ndarray: ``N``-array of real values in :math:`\left[0, 1\right]`.
def roots_in_unit_interval(coeffs): r all_roots = polynomial.polyroots(coeffs) # Only keep roots inside or very near to the unit interval. all_roots = all_roots[ (_UNIT_INTERVAL_WIGGLE_START < all_roots.real) & (all_roots.real < _UNIT_INTERVAL_WIGGLE_END) ] # Only keep roots with very small imaginary part. (Really only # keep the real parts.) real_inds = np.abs(all_roots.imag) < _IMAGINARY_WIGGLE return all_roots[real_inds].real
734,908
r"""Find the parameter corresponding to a point on a curve. .. note:: This assumes that the curve :math:`B(s, t)` defined by ``nodes`` lives in :math:`\mathbf{R}^2`. Args: nodes (numpy.ndarray): The nodes defining a B |eacute| zier curve. x_val (float): The :math:`x`-coordinate of the point. y_val (float): The :math:`y`-coordinate of the point. Returns: Optional[float]: The parameter on the curve (if it exists).
def locate_point(nodes, x_val, y_val): r # First, reduce to the true degree of x(s) and y(s). zero1 = _curve_helpers.full_reduce(nodes[[0], :]) - x_val zero2 = _curve_helpers.full_reduce(nodes[[1], :]) - y_val # Make sure we have the lowest degree in front, to make the polynomial # solve have the fewest number of roots. if zero1.shape[1] > zero2.shape[1]: zero1, zero2 = zero2, zero1 # If the "smallest" is a constant, we can't find any roots from it. if zero1.shape[1] == 1: # NOTE: We assume that callers won't pass ``nodes`` that are # degree 0, so if ``zero1`` is a constant, ``zero2`` won't be. zero1, zero2 = zero2, zero1 power_basis1 = poly_to_power_basis(zero1[0, :]) all_roots = roots_in_unit_interval(power_basis1) if all_roots.size == 0: return None # NOTE: We normalize ``power_basis2`` because we want to check for # "zero" values, i.e. f2(s) == 0. power_basis2 = normalize_polynomial(poly_to_power_basis(zero2[0, :])) near_zero = np.abs(polynomial.polyval(all_roots, power_basis2)) index = np.argmin(near_zero) if near_zero[index] < _ZERO_THRESHOLD: return all_roots[index] return None
734,914
Get the bounding box for set of points. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): A set of points. Returns: Tuple[float, float, float, float]: The left, right, bottom and top bounds for the box.
def _bbox(nodes): left, bottom = np.min(nodes, axis=1) right, top = np.max(nodes, axis=1) return left, right, bottom, top
734,917
r"""Predicate indicating if a point is within a bounding box. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): A set of points. point (numpy.ndarray): A 1D NumPy array representing a point in the same dimension as ``nodes``. Returns: bool: Indicating containment.
def _contains_nd(nodes, point): r min_vals = np.min(nodes, axis=1) if not np.all(min_vals <= point): return False max_vals = np.max(nodes, axis=1) if not np.all(point <= max_vals): return False return True
734,918
Checks if a value is in a sorted list. Uses the :mod:`bisect` builtin to find the insertion point for ``value``. Args: values (List[int]): Integers sorted in ascending order. value (int): Value to check if contained in ``values``. Returns: bool: Indicating if the value is contained.
def in_sorted(values, value): index = bisect.bisect_left(values, value) if index >= len(values): return False return values[index] == value
734,922
Add a buffer of empty space around a plot boundary. .. note:: This only uses ``line`` data from the axis. It **could** use ``patch`` data, but doesn't at this time. Args: ax (matplotlib.artist.Artist): A matplotlib axis. padding (Optional[float]): Amount (as a fraction of width and height) of padding to add around data. Defaults to ``0.125``.
def add_plot_boundary(ax, padding=0.125): nodes = np.asfortranarray( np.vstack([line.get_xydata() for line in ax.lines]).T ) left, right, bottom, top = _helpers.bbox(nodes) center_x = 0.5 * (right + left) delta_x = right - left center_y = 0.5 * (top + bottom) delta_y = top - bottom multiplier = (1.0 + padding) * 0.5 ax.set_xlim( center_x - multiplier * delta_x, center_x + multiplier * delta_x ) ax.set_ylim( center_y - multiplier * delta_y, center_y + multiplier * delta_y )
734,933
Add a polygonal surface patch to a plot. Args: ax (matplotlib.artist.Artist): A matplotlib axis. color (Tuple[float, float, float]): Color as RGB profile. pts_per_edge (int): Number of points to use in polygonal approximation of edge. edges (Tuple[~bezier.curve.Curve, ...]): Curved edges defining a boundary.
def add_patch(ax, color, pts_per_edge, *edges): from matplotlib import patches from matplotlib import path as _path_mod s_vals = np.linspace(0.0, 1.0, pts_per_edge) # Evaluate points on each edge. all_points = [] for edge in edges: points = edge.evaluate_multi(s_vals) # We assume the edges overlap and leave out the first point # in each. all_points.append(points[:, 1:]) # Add first point as last point (polygon is closed). first_edge = all_points[0] all_points.append(first_edge[:, [0]]) # Add boundary first. polygon = np.asfortranarray(np.hstack(all_points)) line, = ax.plot(polygon[0, :], polygon[1, :], color=color) # Reset ``color`` in case it was ``None`` and set from color wheel. color = line.get_color() # ``polygon`` is stored Fortran-contiguous with ``x-y`` points in each # column but ``Path()`` wants ``x-y`` points in each row. path = _path_mod.Path(polygon.T) patch = patches.PathPatch(path, facecolor=color, alpha=0.625) ax.add_patch(patch)
734,934
Determine if two line segments meet. This is a helper for :func:`convex_hull_collide` in the special case that the two convex hulls are actually just line segments. (Even in this case, this is only problematic if both segments are on a single line.) Args: line1 (numpy.ndarray): ``2 x 2`` array of start and end nodes. line2 (numpy.ndarray): ``2 x 2`` array of start and end nodes. Returns: bool: Indicating if the line segments collide.
def line_line_collide(line1, line2): s, t, success = segment_intersection( line1[:, 0], line1[:, 1], line2[:, 0], line2[:, 1] ) if success: return _helpers.in_interval(s, 0.0, 1.0) and _helpers.in_interval( t, 0.0, 1.0 ) else: disjoint, _ = parallel_lines_parameters( line1[:, 0], line1[:, 1], line2[:, 0], line2[:, 1] ) return not disjoint
734,940
Determine if the convex hulls of two curves collide. .. note:: This is a helper for :func:`from_linearized`. Args: nodes1 (numpy.ndarray): Control points of a first curve. nodes2 (numpy.ndarray): Control points of a second curve. Returns: bool: Indicating if the convex hulls collide.
def convex_hull_collide(nodes1, nodes2): polygon1 = _helpers.simple_convex_hull(nodes1) _, polygon_size1 = polygon1.shape polygon2 = _helpers.simple_convex_hull(nodes2) _, polygon_size2 = polygon2.shape if polygon_size1 == 2 and polygon_size2 == 2: return line_line_collide(polygon1, polygon2) else: return _helpers.polygon_collide(polygon1, polygon2)
734,941
Reduce number of candidate intersection pairs. .. note:: This is a helper for :func:`_all_intersections`. Uses more strict bounding box intersection predicate by forming the actual convex hull of each candidate curve segment and then checking if those convex hulls collide. Args: candidates (List): An iterable of pairs of curves (or linearized curves). Returns: List: A pruned list of curve pairs.
def prune_candidates(candidates): pruned = [] # NOTE: In the below we replace ``isinstance(a, B)`` with # ``a.__class__ is B``, which is a 3-3.5x speedup. for first, second in candidates: if first.__class__ is Linearization: nodes1 = first.curve.nodes else: nodes1 = first.nodes if second.__class__ is Linearization: nodes2 = second.curve.nodes else: nodes2 = second.nodes if convex_hull_collide(nodes1, nodes2): pruned.append((first, second)) return pruned
734,948
Degree-elevate a curve so two curves have matching degree. Args: nodes1 (numpy.ndarray): Set of control points for a B |eacute| zier curve. nodes2 (numpy.ndarray): Set of control points for a B |eacute| zier curve. Returns: Tuple[numpy.ndarray, numpy.ndarray]: The potentially degree-elevated nodes passed in.
def make_same_degree(nodes1, nodes2): _, num_nodes1 = nodes1.shape _, num_nodes2 = nodes2.shape for _ in six.moves.xrange(num_nodes2 - num_nodes1): nodes1 = _curve_helpers.elevate_nodes(nodes1) for _ in six.moves.xrange(num_nodes1 - num_nodes2): nodes2 = _curve_helpers.elevate_nodes(nodes2) return nodes1, nodes2
734,949
Try to linearize a curve (or an already linearized curve). Args: shape (Union[SubdividedCurve, \ ~bezier._geometric_intersection.Linearization]): A curve or an already linearized curve. Returns: Union[SubdividedCurve, \ ~bezier._geometric_intersection.Linearization]: The (potentially linearized) curve.
def from_shape(cls, shape): # NOTE: In the below we replace ``isinstance(a, B)`` with # ``a.__class__ is B``, which is a 3-3.5x speedup. if shape.__class__ is cls: return shape else: error = linearization_error(shape.nodes) if error < _ERROR_VAL: linearized = cls(shape, error) return linearized else: return shape
734,958
Strip trailing whitespace and clean up "local" names in C source. These source files are autogenerated from the ``cython`` CLI. Args: c_source (str): Path to a ``.c`` source file. virtualenv_dirname (str): The name of the ``virtualenv`` directory where Cython is installed (this is part of a relative path ``.nox/{NAME}/lib/...``).
def clean_file(c_source, virtualenv_dirname): with open(c_source, "r") as file_obj: contents = file_obj.read().rstrip() # Replace the path to the Cython include files. py_version = "python{}.{}".format(*sys.version_info[:2]) lib_path = os.path.join( ".nox", virtualenv_dirname, "lib", py_version, "site-packages", "" ) contents = contents.replace(lib_path, "") # Write the files back, but strip all trailing whitespace. lines = contents.split("\n") with open(c_source, "w") as file_obj: for line in lines: file_obj.write(line.rstrip() + "\n")
734,968
Populates ``binary-extension.rst`` with release-specific data. Args: version (str): The current version.
def populate_native_libraries(version): with open(BINARY_EXT_TEMPLATE, "r") as file_obj: template = file_obj.read() contents = template.format(revision=version) with open(BINARY_EXT_FILE, "w") as file_obj: file_obj.write(contents)
734,975
Populates ``DEVELOPMENT.rst`` with release-specific data. This is because ``DEVELOPMENT.rst`` is used in the Sphinx documentation. Args: version (str): The current version.
def populate_development(version): with open(DEVELOPMENT_TEMPLATE, "r") as file_obj: template = file_obj.read() contents = template.format(revision=version, rtd_version=version) with open(DEVELOPMENT_FILE, "w") as file_obj: file_obj.write(contents)
734,976
Make the matrix used to subdivide a curve. .. note:: This is a helper for :func:`_subdivide_nodes`. It does not have a Fortran speedup because it is **only** used by a function which has a Fortran speedup. Args: degree (int): The degree of the curve. Returns: Tuple[numpy.ndarray, numpy.ndarray]: The matrices used to convert the nodes into left and right nodes, respectively.
def make_subdivision_matrices(degree): left = np.zeros((degree + 1, degree + 1), order="F") right = np.zeros((degree + 1, degree + 1), order="F") left[0, 0] = 1.0 right[-1, -1] = 1.0 for col in six.moves.xrange(1, degree + 1): half_prev = 0.5 * left[:col, col - 1] left[:col, col] = half_prev left[1 : col + 1, col] += half_prev # noqa: E203 # Populate the complement col (in right) as well. complement = degree - col # NOTE: We "should" reverse the results when using # the complement, but they are symmetric so # that would be a waste. right[-(col + 1) :, complement] = left[: col + 1, col] # noqa: E203 return left, right
734,980
Subdivide a curve into two sub-curves. Does so by taking the unit interval (i.e. the domain of the surface) and splitting it into two sub-intervals by splitting down the middle. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): The nodes defining a B |eacute| zier curve. Returns: Tuple[numpy.ndarray, numpy.ndarray]: The nodes for the two sub-curves.
def _subdivide_nodes(nodes): _, num_nodes = np.shape(nodes) if num_nodes == 2: left_nodes = _helpers.matrix_product(nodes, _LINEAR_SUBDIVIDE_LEFT) right_nodes = _helpers.matrix_product(nodes, _LINEAR_SUBDIVIDE_RIGHT) elif num_nodes == 3: left_nodes = _helpers.matrix_product(nodes, _QUADRATIC_SUBDIVIDE_LEFT) right_nodes = _helpers.matrix_product( nodes, _QUADRATIC_SUBDIVIDE_RIGHT ) elif num_nodes == 4: left_nodes = _helpers.matrix_product(nodes, _CUBIC_SUBDIVIDE_LEFT) right_nodes = _helpers.matrix_product(nodes, _CUBIC_SUBDIVIDE_RIGHT) else: left_mat, right_mat = make_subdivision_matrices(num_nodes - 1) left_nodes = _helpers.matrix_product(nodes, left_mat) right_nodes = _helpers.matrix_product(nodes, right_mat) return left_nodes, right_nodes
734,981
r"""Compute :math:`\|B(s)\|_2`. .. note:: This is a helper for :func:`_compute_length` and does not have a Fortran speedup. Intended to be used with ``functools.partial`` to fill in the value of ``nodes`` and create a callable that only accepts ``s_val``. Args: nodes (numpy.ndarray): The nodes defining a curve. s_val (float): Parameter to compute :math:`B(s)`. Returns: float: The norm of :math:`B(s)`.
def vec_size(nodes, s_val): r result_vec = evaluate_multi(nodes, np.asfortranarray([s_val])) # NOTE: We convert to 1D to make sure NumPy uses vector norm. return np.linalg.norm(result_vec[:, 0], ord=2)
734,983
Performs degree-reduction for a B |eacute| zier curve. Does so by using the pseudo-inverse of the degree elevation operator (which is overdetermined). .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): The nodes in the curve. Returns: numpy.ndarray: The reduced nodes. Raises: .UnsupportedDegree: If the degree is not 1, 2, 3 or 4.
def _reduce_pseudo_inverse(nodes): _, num_nodes = np.shape(nodes) if num_nodes == 2: reduction = _REDUCTION0 denom = _REDUCTION_DENOM0 elif num_nodes == 3: reduction = _REDUCTION1 denom = _REDUCTION_DENOM1 elif num_nodes == 4: reduction = _REDUCTION2 denom = _REDUCTION_DENOM2 elif num_nodes == 5: reduction = _REDUCTION3 denom = _REDUCTION_DENOM3 else: raise _helpers.UnsupportedDegree(num_nodes - 1, supported=(1, 2, 3, 4)) result = _helpers.matrix_product(nodes, reduction) result /= denom return result
734,992
Apply degree reduction to ``nodes`` until it can no longer be reduced. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): The nodes in the curve. Returns: numpy.ndarray: The fully degree-reduced nodes.
def _full_reduce(nodes): was_reduced, nodes = maybe_reduce(nodes) while was_reduced: was_reduced, nodes = maybe_reduce(nodes) return nodes
734,995
Patch up ``f90_compiler.library_dirs``. Updates flags in ``gfortran`` and ignores other compilers. The only modification is the removal of ``-fPIC`` since it is not used on Windows and the build flags turn warnings into errors. Args: f90_compiler (numpy.distutils.fcompiler.FCompiler): A Fortran compiler instance.
def patch_f90_compiler(f90_compiler): # NOTE: NumPy may not be installed, but we don't want **this** module to # cause an import failure. from numpy.distutils.fcompiler import gnu # Only Windows. if os.name != "nt": return # Only ``gfortran``. if not isinstance(f90_compiler, gnu.Gnu95FCompiler): return f90_compiler.compiler_f77[:] = _remove_fpic(f90_compiler.compiler_f77) f90_compiler.compiler_f90[:] = _remove_fpic(f90_compiler.compiler_f90) c_compiler = f90_compiler.c_compiler if c_compiler.compiler_type != "msvc": raise NotImplementedError( "MSVC is the only supported C compiler on Windows." )
735,002
source: https://github.com/openstack/deb-python-oauth2client Generates a 'code_verifier' as described in section 4.1 of RFC 7636. This is a 'high-entropy cryptographic random string' that will be impractical for an attacker to guess. Args: n_bytes: integer between 31 and 96, inclusive. default: 64 number of bytes of entropy to include in verifier. Returns: Bytestring, representing urlsafe base64-encoded random data.
def generate_code_verifier(n_bytes=64): verifier = base64.urlsafe_b64encode( os.urandom(n_bytes) ).rstrip(b'=').decode('utf-8') # https://tools.ietf.org/html/rfc7636#section-4.1 # minimum length of 43 characters and a maximum length of 128 characters. if len(verifier) < 43: raise ValueError("Verifier too short. n_bytes must be > 30.") elif len(verifier) > 128: raise ValueError("Verifier too long. n_bytes must be < 97.") else: return verifier
735,780
source: https://github.com/openstack/deb-python-oauth2client Creates a 'code_challenge' as described in section 4.2 of RFC 7636 by taking the sha256 hash of the verifier and then urlsafe base64-encoding it. Args: verifier: bytestring, representing a code_verifier as generated by generate_code_verifier(). Returns: Bytestring, representing a urlsafe base64-encoded sha256 hash digest, without '=' padding.
def generate_code_challenge(verifier): digest = hashlib.sha256(verifier.encode('utf-8')).digest() return base64.urlsafe_b64encode(digest).rstrip(b'=').decode('utf-8')
735,781
Constructor Arguments: path {String} -- The path on the disk to save the data settings {dict} -- The settings values for diskcache
def __init__(self, path, **settings): from diskcache import Cache self._cache = Cache(path, **settings)
735,812
Creates gutter clients and memoizes them in a registry for future quick access. Args: alias (str or None): Name of the client. Used for caching. If name is falsy then do not use the cache. cache (dict): cache to store gutter managers in. **kwargs: kwargs to be passed the Manger class. Returns (Manager): A gutter client.
def get_gutter_client( alias='default', cache=CLIENT_CACHE, **kwargs ): from gutter.client.models import Manager if not alias: return Manager(**kwargs) elif alias not in cache: cache[alias] = Manager(**kwargs) return cache[alias]
735,946
Currently a small stub to create an instance of Checker for the passed ``infile`` and run its test functions through linting. Args: infile Returns: int: Number of flake8 errors raised.
def do_command_line(infile: typing.IO[str]) -> int: lines = infile.readlines() tree = ast.parse(''.join(lines)) checker = Checker(tree, lines, infile.name) checker.load() errors = [] # type: typing.List[AAAError] for func in checker.all_funcs(skip_noqa=True): try: errors = list(func.check_all()) except ValidationError as error: errors = [error.to_aaa()] print(func.__str__(errors), end='') return len(errors)
736,480