text
stringlengths
81
112k
Open a URL using an opener that will simulate a browser user-agent url: The URL httpuser, httppassword: HTTP authentication credentials (either both or neither must be provided) method: The HTTP method Caller is reponsible for calling close() on the returned object def open_url(url, httpuser=None, httppassword=None, method=None): """ Open a URL using an opener that will simulate a browser user-agent url: The URL httpuser, httppassword: HTTP authentication credentials (either both or neither must be provided) method: The HTTP method Caller is reponsible for calling close() on the returned object """ if os.getenv('OMEGO_SSL_NO_VERIFY') == '1': # This needs to come first to override the default HTTPS handler log.debug('OMEGO_SSL_NO_VERIFY=1') try: sslctx = ssl.create_default_context() except Exception as e: log.error('Failed to create Default SSL context: %s' % e) raise Stop( 'Failed to create Default SSL context, OMEGO_SSL_NO_VERIFY ' 'is not supported on older versions of Python') sslctx.check_hostname = False sslctx.verify_mode = ssl.CERT_NONE opener = urllib2.build_opener(urllib2.HTTPSHandler(context=sslctx)) else: opener = urllib2.build_opener() if 'USER_AGENT' in os.environ: opener.addheaders = [('User-agent', os.environ.get('USER_AGENT'))] log.debug('Setting user-agent: %s', os.environ.get('USER_AGENT')) if httpuser and httppassword: mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() mgr.add_password(None, url, httpuser, httppassword) log.debug('Enabling HTTP authentication') opener.add_handler(urllib2.HTTPBasicAuthHandler(mgr)) opener.add_handler(urllib2.HTTPDigestAuthHandler(mgr)) elif httpuser or httppassword: raise FileException( 'httpuser and httppassword must be used together', url) # Override method http://stackoverflow.com/a/4421485 req = urllib2.Request(url) if method: req.get_method = lambda: method return opener.open(req)
Makes a HEAD request to find the final destination of a URL after following any redirects def dereference_url(url): """ Makes a HEAD request to find the final destination of a URL after following any redirects """ res = open_url(url, method='HEAD') res.close() return res.url
Read the contents of a URL into memory, return def read(url, **kwargs): """ Read the contents of a URL into memory, return """ response = open_url(url, **kwargs) try: return response.read() finally: response.close()
Download a file, optionally printing a simple progress bar url: The URL to download filename: The filename to save to, default is to use the URL basename print_progress: The length of the progress bar, use 0 to disable delete_fail: If True delete the file if the download was not successful, default is to keep the temporary file return: The downloaded filename def download(url, filename=None, print_progress=0, delete_fail=True, **kwargs): """ Download a file, optionally printing a simple progress bar url: The URL to download filename: The filename to save to, default is to use the URL basename print_progress: The length of the progress bar, use 0 to disable delete_fail: If True delete the file if the download was not successful, default is to keep the temporary file return: The downloaded filename """ blocksize = 1024 * 1024 downloaded = 0 progress = None log.info('Downloading %s', url) response = open_url(url, **kwargs) if not filename: filename = os.path.basename(url) output = None try: total = int(response.headers['Content-Length']) if print_progress: progress = ProgressBar(print_progress, total) with tempfile.NamedTemporaryFile( prefix=filename + '.', dir='.', delete=False) as output: while downloaded < total: block = response.read(blocksize) output.write(block) downloaded += len(block) if progress: progress.update(downloaded) os.rename(output.name, filename) output = None return filename finally: response.close() if delete_fail and output: os.unlink(output.name)
Append a backup prefix to a file or directory, with an increasing numeric suffix (.N) if a file already exists def rename_backup(name, suffix='.bak'): """ Append a backup prefix to a file or directory, with an increasing numeric suffix (.N) if a file already exists """ newname = '%s%s' % (name, suffix) n = 0 while os.path.exists(newname): n += 1 newname = '%s%s.%d' % (name, suffix, n) log.info('Renaming %s to %s', name, newname) os.rename(name, newname) return newname
Return a string of the form [basename-TIMESTAMP.ext] where TIMESTAMP is of the form YYYYMMDD-HHMMSS-MILSEC def timestamp_filename(basename, ext=None): """ Return a string of the form [basename-TIMESTAMP.ext] where TIMESTAMP is of the form YYYYMMDD-HHMMSS-MILSEC """ dt = datetime.now().strftime('%Y%m%d-%H%M%S-%f') if ext: return '%s-%s.%s' % (basename, dt, ext) return '%s-%s' % (basename, dt)
Check whether zip file paths are all relative, and optionally in a specified subdirectory, raises an exception if not namelist: A list of paths from the zip file subdir: If specified then check whether all paths in the zip file are under this subdirectory Python docs are unclear about the security of extract/extractall: https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extractall https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extract def check_extracted_paths(namelist, subdir=None): """ Check whether zip file paths are all relative, and optionally in a specified subdirectory, raises an exception if not namelist: A list of paths from the zip file subdir: If specified then check whether all paths in the zip file are under this subdirectory Python docs are unclear about the security of extract/extractall: https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extractall https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extract """ def relpath(p): # relpath strips a trailing sep # Windows paths may also use unix sep q = os.path.relpath(p) if p.endswith(os.path.sep) or p.endswith('/'): q += os.path.sep return q parent = os.path.abspath('.') if subdir: if os.path.isabs(subdir): raise FileException('subdir must be a relative path', subdir) subdir = relpath(subdir + os.path.sep) for name in namelist: if os.path.commonprefix([parent, os.path.abspath(name)]) != parent: raise FileException('Insecure path in zipfile', name) if subdir and os.path.commonprefix( [subdir, relpath(name)]) != subdir: raise FileException( 'Path in zipfile is not in required subdir', name)
Extract all files from a zip archive filename: The path to the zip file match_dir: If True all files in the zip must be contained in a subdirectory named after the archive file with extension removed destdir: Extract the zip into this directory, default current directory return: If match_dir is True then returns the subdirectory (including destdir), otherwise returns destdir or '.' def unzip(filename, match_dir=False, destdir=None): """ Extract all files from a zip archive filename: The path to the zip file match_dir: If True all files in the zip must be contained in a subdirectory named after the archive file with extension removed destdir: Extract the zip into this directory, default current directory return: If match_dir is True then returns the subdirectory (including destdir), otherwise returns destdir or '.' """ if not destdir: destdir = '.' z = zipfile.ZipFile(filename) unzipped = '.' if match_dir: if not filename.endswith('.zip'): raise FileException('Expected .zip file extension', filename) unzipped = os.path.basename(filename)[:-4] check_extracted_paths(z.namelist(), unzipped) else: check_extracted_paths(z.namelist()) # File permissions, see # http://stackoverflow.com/a/6297838 # http://stackoverflow.com/a/3015466 for info in z.infolist(): log.debug('Extracting %s to %s', info.filename, destdir) z.extract(info, destdir) perms = info.external_attr >> 16 & 4095 if perms > 0: os.chmod(os.path.join(destdir, info.filename), perms) return os.path.join(destdir, unzipped)
Create a new zip archive containing files filename: The name of the zip file to be created paths: A list of files or directories strip_dir: Remove this prefix from all file-paths before adding to zip def zip(filename, paths, strip_prefix=''): """ Create a new zip archive containing files filename: The name of the zip file to be created paths: A list of files or directories strip_dir: Remove this prefix from all file-paths before adding to zip """ if isinstance(paths, basestring): paths = [paths] filelist = set() for p in paths: if os.path.isfile(p): filelist.add(p) else: for root, dirs, files in os.walk(p): for f in files: filelist.add(os.path.join(root, f)) z = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED) for f in sorted(filelist): arcname = f if arcname.startswith(strip_prefix): arcname = arcname[len(strip_prefix):] if arcname.startswith(os.path.sep): arcname = arcname[1:] log.debug('Adding %s to %s[%s]', f, filename, arcname) z.write(f, arcname) z.close()
Automatically handle local and remote URLs, files and directories path: Either a local directory, file or remote URL. If a URL is given it will be fetched. If this is a zip it will be automatically expanded by default. overwrite: Whether to overwrite an existing file: 'error': Raise an exception 'backup: Renamed the old file and use the new one 'keep': Keep the old file, don't overwrite or raise an exception progress: Number of progress dots, default 0 (don't print) httpuser, httppass: Credentials for HTTP authentication return: A tuple (type, localpath) type: 'file': localpath is the path to a local file 'directory': localpath is the path to a local directory 'unzipped': localpath is the path to a local unzipped directory def get_as_local_path(path, overwrite, progress=0, httpuser=None, httppassword=None): """ Automatically handle local and remote URLs, files and directories path: Either a local directory, file or remote URL. If a URL is given it will be fetched. If this is a zip it will be automatically expanded by default. overwrite: Whether to overwrite an existing file: 'error': Raise an exception 'backup: Renamed the old file and use the new one 'keep': Keep the old file, don't overwrite or raise an exception progress: Number of progress dots, default 0 (don't print) httpuser, httppass: Credentials for HTTP authentication return: A tuple (type, localpath) type: 'file': localpath is the path to a local file 'directory': localpath is the path to a local directory 'unzipped': localpath is the path to a local unzipped directory """ m = re.match('([A-Za-z]+)://', path) if m: # url_open handles multiple protocols so don't bother validating log.debug('Detected URL protocol: %s', m.group(1)) # URL should use / as the pathsep localpath = path.split('/')[-1] if not localpath: raise FileException( 'Remote path appears to be a directory', path) if os.path.exists(localpath): if overwrite == 'error': raise FileException('File already exists', localpath) elif overwrite == 'keep': log.info('Keeping existing %s', localpath) elif overwrite == 'backup': rename_backup(localpath) download(path, localpath, progress, httpuser=httpuser, httppassword=httppassword) else: raise Exception('Invalid overwrite flag: %s' % overwrite) else: download(path, localpath, progress, httpuser=httpuser, httppassword=httppassword) else: localpath = path log.debug("Local path: %s", localpath) if os.path.isdir(localpath): return 'directory', localpath if os.path.exists(localpath): return 'file', localpath # Somethings gone very wrong raise Exception('Local path does not exist: %s' % localpath)
Allocates and initializes an encoder state. def create(fs, channels, application): """Allocates and initializes an encoder state.""" result_code = ctypes.c_int() result = _create(fs, channels, application, ctypes.byref(result_code)) if result_code.value is not constants.OK: raise OpusError(result_code.value) return result
Encodes an Opus frame Returns string output payload def encode(encoder, pcm, frame_size, max_data_bytes): """Encodes an Opus frame Returns string output payload """ pcm = ctypes.cast(pcm, c_int16_pointer) data = (ctypes.c_char * max_data_bytes)() result = _encode(encoder, pcm, frame_size, data, max_data_bytes) if result < 0: raise OpusError(result) return array.array('c', data[:result]).tostring()
Encodes an Opus frame from floating point input def encode_float(encoder, pcm, frame_size, max_data_bytes): """Encodes an Opus frame from floating point input""" pcm = ctypes.cast(pcm, c_float_pointer) data = (ctypes.c_char * max_data_bytes)() result = _encode_float(encoder, pcm, frame_size, data, max_data_bytes) if result < 0: raise OpusError(result) return array.array('c', data[:result]).tostring()
Builds and returns the MeCab function for parsing Unicode text. Args: fn_name: MeCab function name that determines the function behavior, either 'mecab_sparse_tostr' or 'mecab_nbest_sparse_tostr'. Returns: A function definition, tailored to parsing Unicode text and returning the result as a string suitable for display on stdout, using either the default or N-best behavior. def __parse_tostr(self, text, **kwargs): '''Builds and returns the MeCab function for parsing Unicode text. Args: fn_name: MeCab function name that determines the function behavior, either 'mecab_sparse_tostr' or 'mecab_nbest_sparse_tostr'. Returns: A function definition, tailored to parsing Unicode text and returning the result as a string suitable for display on stdout, using either the default or N-best behavior. ''' n = self.options.get('nbest', 1) if self._KW_BOUNDARY in kwargs: patt = kwargs.get(self._KW_BOUNDARY, '.') tokens = list(self.__split_pattern(text, patt)) text = ''.join([t[0] for t in tokens]) btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) bpos = 0 self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY) for (token, match) in tokens: bpos += 1 if match: mark = self.MECAB_INSIDE_TOKEN else: mark = self.MECAB_ANY_BOUNDARY for _ in range(1, len(self.__str2bytes(token))): self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, mark) bpos += 1 self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY) elif self._KW_FEATURE in kwargs: features = kwargs.get(self._KW_FEATURE, ()) fd = {morph: self.__str2bytes(feat) for morph, feat in features} tokens = self.__split_features(text, [e[0] for e in features]) text = ''.join([t[0] for t in tokens]) btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) bpos = 0 for chunk, match in tokens: c = len(self.__str2bytes(chunk)) if match == True: self.__mecab.mecab_lattice_set_feature_constraint( self.lattice, bpos, bpos+c, fd[chunk]) bpos += c else: btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) self.__mecab.mecab_parse_lattice(self.tagger, self.lattice) if n > 1: res = self.__mecab.mecab_lattice_nbest_tostr(self.lattice, n) else: res = self.__mecab.mecab_lattice_tostr(self.lattice) if res != self.__ffi.NULL: raw = self.__ffi.string(res) return self.__bytes2str(raw).strip() else: err = self.__mecab.mecab_lattice_strerror(self.lattice) logger.error(self.__bytes2str(self.__ffi.string(err))) raise MeCabError(self.__bytes2str(self.__ffi.string(err)))
Builds and returns the MeCab function for parsing to nodes using morpheme boundary constraints. Args: format_feature: flag indicating whether or not to format the feature value for each node yielded. Returns: A function which returns a Generator, tailored to using boundary constraints and parsing as nodes, using either the default or N-best behavior. def __parse_tonodes(self, text, **kwargs): '''Builds and returns the MeCab function for parsing to nodes using morpheme boundary constraints. Args: format_feature: flag indicating whether or not to format the feature value for each node yielded. Returns: A function which returns a Generator, tailored to using boundary constraints and parsing as nodes, using either the default or N-best behavior. ''' n = self.options.get('nbest', 1) try: if self._KW_BOUNDARY in kwargs: patt = kwargs.get(self._KW_BOUNDARY, '.') tokens = list(self.__split_pattern(text, patt)) text = ''.join([t[0] for t in tokens]) btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) bpos = 0 self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY) for (token, match) in tokens: bpos += 1 if match: mark = self.MECAB_INSIDE_TOKEN else: mark = self.MECAB_ANY_BOUNDARY for _ in range(1, len(self.__str2bytes(token))): self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, mark) bpos += 1 self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY) elif self._KW_FEATURE in kwargs: features = kwargs.get(self._KW_FEATURE, ()) fd = {morph: self.__str2bytes(feat) for morph, feat in features} tokens = self.__split_features(text, [e[0] for e in features]) text = ''.join([t[0] for t in tokens]) btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) bpos = 0 for chunk, match in tokens: c = len(self.__str2bytes(chunk)) if match: self.__mecab.mecab_lattice_set_feature_constraint( self.lattice, bpos, bpos+c, fd[chunk]) bpos += c else: btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) self.__mecab.mecab_parse_lattice(self.tagger, self.lattice) for _ in range(n): check = self.__mecab.mecab_lattice_next(self.lattice) if n == 1 or check: nptr = self.__mecab.mecab_lattice_get_bos_node(self.lattice) while nptr != self.__ffi.NULL: # skip over any BOS nodes, since mecab does if nptr.stat != MeCabNode.BOS_NODE: raws = self.__ffi.string( nptr.surface[0:nptr.length]) surf = self.__bytes2str(raws).strip() if 'output_format_type' in self.options or \ 'node_format' in self.options: sp = self.__mecab.mecab_format_node( self.tagger, nptr) if sp != self.__ffi.NULL: rawf = self.__ffi.string(sp) else: err = self.__mecab.mecab_strerror( self.tagger) err = self.__bytes2str( self.__ffi.string(err)) msg = self._ERROR_NODEFORMAT.format( surf, err) raise MeCabError(msg) else: rawf = self.__ffi.string(nptr.feature) feat = self.__bytes2str(rawf).strip() mnode = MeCabNode(nptr, surf, feat) yield mnode nptr = getattr(nptr, 'next') except GeneratorExit: logger.debug('close invoked on generator') except MeCabError: raise except: err = self.__mecab.mecab_lattice_strerror(self.lattice) logger.error(self.__bytes2str(self.__ffi.string(err))) raise MeCabError(self.__bytes2str(self.__ffi.string(err)))
Parse the given text and return result from MeCab. :param text: the text to parse. :type text: str :param as_nodes: return generator of MeCabNodes if True; or string if False. :type as_nodes: bool, defaults to False :param boundary_constraints: regular expression for morpheme boundary splitting; if non-None and feature_constraints is None, then boundary constraint parsing will be used. :type boundary_constraints: str or re :param feature_constraints: tuple containing tuple instances of target morpheme and corresponding feature string in order of precedence; if non-None and boundary_constraints is None, then feature constraint parsing will be used. :type feature_constraints: tuple :return: A single string containing the entire MeCab output; or a Generator yielding the MeCabNode instances. :raises: MeCabError def parse(self, text, **kwargs): '''Parse the given text and return result from MeCab. :param text: the text to parse. :type text: str :param as_nodes: return generator of MeCabNodes if True; or string if False. :type as_nodes: bool, defaults to False :param boundary_constraints: regular expression for morpheme boundary splitting; if non-None and feature_constraints is None, then boundary constraint parsing will be used. :type boundary_constraints: str or re :param feature_constraints: tuple containing tuple instances of target morpheme and corresponding feature string in order of precedence; if non-None and boundary_constraints is None, then feature constraint parsing will be used. :type feature_constraints: tuple :return: A single string containing the entire MeCab output; or a Generator yielding the MeCabNode instances. :raises: MeCabError ''' if text is None: logger.error(self._ERROR_EMPTY_STR) raise MeCabError(self._ERROR_EMPTY_STR) elif not isinstance(text, str): logger.error(self._ERROR_NOTSTR) raise MeCabError(self._ERROR_NOTSTR) elif 'partial' in self.options and not text.endswith("\n"): logger.error(self._ERROR_MISSING_NL) raise MeCabError(self._ERROR_MISSING_NL) if self._KW_BOUNDARY in kwargs: val = kwargs[self._KW_BOUNDARY] if not isinstance(val, self._REGEXTYPE) and not isinstance(val, str): logger.error(self._ERROR_BOUNDARY) raise MeCabError(self._ERROR_BOUNDARY) elif self._KW_FEATURE in kwargs: val = kwargs[self._KW_FEATURE] if not isinstance(val, tuple): logger.error(self._ERROR_FEATURE) raise MeCabError(self._ERROR_FEATURE) as_nodes = kwargs.get(self._KW_ASNODES, False) if as_nodes: return self.__parse_tonodes(text, **kwargs) else: return self.__parse_tostr(text, **kwargs)
MAX_TERM_COUNT = 10000 # There are 39,000 terms in the GO! def parse(filename, MAX_TERM_COUNT=1000): """ MAX_TERM_COUNT = 10000 # There are 39,000 terms in the GO! """ with open(filename, "r") as f: termId = None name = None desc = None parents = [] termCount = 0 for l in f.readlines(): if l.startswith("id:"): termId = l.strip()[4:] if l.startswith("name:"): name = l.strip()[6:] elif l.startswith("def:"): desc = l.strip()[5:] elif l.startswith("is_a:"): pid = l.strip()[6:].split(" ", 1)[0] parents.append(pid) if len(l) == 1: # newline # save if termId is not None and name is not None: terms[termId] = {'name': name, 'desc': desc, 'parents': parents[:], 'children': []} termId = None name = None parents = [] termCount += 1 if MAX_TERM_COUNT is not None and \ termCount > MAX_TERM_COUNT: break count = 0 for tid, tdict in terms.items(): count += 1 # purely for display for p in tdict['parents']: if p in terms.keys(): terms[p]['children'].append(tid) # Get unique term IDs for Tag Groups. tagGroups = set() for tid, tdict in terms.items(): # Only create Tags for GO:terms that are 'leafs' of the tree if len(tdict['children']) == 0: for p in tdict['parents']: tagGroups.add(p) return tagGroups, terms
create Tag Groups and Child Tags using data from terms dict def generate(tagGroups, terms): """ create Tag Groups and Child Tags using data from terms dict """ rv = [] for pid in tagGroups: # In testing we may not have complete set if pid not in terms.keys(): continue groupData = terms[pid] groupName = "[%s] %s" % (pid, groupData['name']) groupDesc = groupData['desc'] children = [] group = dict(name=groupName, desc=groupDesc, set=children) rv.append(group) for cid in groupData['children']: cData = terms[cid] cName = "[%s] %s" % (cid, cData['name']) cDesc = cData['desc'] child = dict(name=cName, desc=cDesc) children.append(child) return json.dumps(rv, indent=2)
We need to support deprecated behaviour for now which makes this quite complicated Current behaviour: - install: Installs a new server, existing server causes an error - install --upgrade: Installs or upgrades a server - install --managedb: Automatically initialise or upgrade the db Deprecated: - install --upgradedb --initdb: Replaced by install --managedb - install --upgradedb: upgrade the db, must exist - install --initdb: initialise the db - upgrade: Upgrades a server, must already exist - upgrade --upgradedb: Automatically upgrade the db returns: - Modified args object, flag to indicate new/existing/auto install def _handle_args(self, cmd, args): """ We need to support deprecated behaviour for now which makes this quite complicated Current behaviour: - install: Installs a new server, existing server causes an error - install --upgrade: Installs or upgrades a server - install --managedb: Automatically initialise or upgrade the db Deprecated: - install --upgradedb --initdb: Replaced by install --managedb - install --upgradedb: upgrade the db, must exist - install --initdb: initialise the db - upgrade: Upgrades a server, must already exist - upgrade --upgradedb: Automatically upgrade the db returns: - Modified args object, flag to indicate new/existing/auto install """ if cmd == 'install': if args.upgrade: # Current behaviour: install or upgrade if args.initdb or args.upgradedb: raise Stop(10, ( 'Deprecated --initdb --upgradedb flags ' 'are incompatible with --upgrade')) newinstall = None else: # Current behaviour: Server must not exist newinstall = True if args.managedb: # Current behaviour if args.initdb or args.upgradedb: raise Stop(10, ( 'Deprecated --initdb --upgradedb flags ' 'are incompatible with --managedb')) args.initdb = True args.upgradedb = True else: if args.initdb or args.upgradedb: log.warn('--initdb and --upgradedb are deprecated, ' 'use --managedb') elif cmd == 'upgrade': # Deprecated behaviour log.warn( '"omero upgrade" is deprecated, use "omego install --upgrade"') cmd = 'install' args.upgrade = True # Deprecated behaviour: Server must exist newinstall = False else: raise Exception('Unexpected command: %s' % cmd) return args, newinstall
Either downloads and/or unzips the server if necessary return: the directory of the unzipped server def get_server_dir(self): """ Either downloads and/or unzips the server if necessary return: the directory of the unzipped server """ if not self.args.server: if self.args.skipunzip: raise Stop(0, 'Unzip disabled, exiting') log.info('Downloading server') # The downloader automatically symlinks the server, however if # we are upgrading we want to delay the symlink swap, so this # overrides args.sym # TODO: Find a nicer way to do this? artifact_args = copy.copy(self.args) artifact_args.sym = '' artifacts = Artifacts(artifact_args) server = artifacts.download('server') else: progress = 0 if self.args.verbose: progress = 20 ptype, server = fileutils.get_as_local_path( self.args.server, self.args.overwrite, progress=progress, httpuser=self.args.httpuser, httppassword=self.args.httppassword) if ptype == 'file': if self.args.skipunzip: raise Stop(0, 'Unzip disabled, exiting') log.info('Unzipping %s', server) server = fileutils.unzip( server, match_dir=True, destdir=self.args.unzipdir) log.debug('Server directory: %s', server) return server
Handle database initialisation and upgrade, taking into account command line arguments def handle_database(self): """ Handle database initialisation and upgrade, taking into account command line arguments """ # TODO: When initdb and upgradedb are dropped we can just test # managedb, but for backwards compatibility we need to support # initdb without upgradedb and vice-versa if self.args.initdb or self.args.upgradedb: db = DbAdmin(self.dir, None, self.args, self.external) status = db.check() log.debug('OMERO database upgrade status: %s', status) else: log.warn('OMERO database check disabled') return DB_INIT_NEEDED if status == DB_INIT_NEEDED: if self.args.initdb: log.debug('Initialising OMERO database') db.init() else: log.error('OMERO database not found') raise Stop(DB_INIT_NEEDED, 'Install/Upgrade failed: OMERO database not found') elif status == DB_UPGRADE_NEEDED: log.warn('OMERO database exists but is out of date') if self.args.upgradedb: log.debug('Upgrading OMERO database') db.upgrade() else: raise Stop( DB_UPGRADE_NEEDED, 'Pass --managedb or upgrade your OMERO database manually') else: assert status == DB_UPTODATE return status
Runs a command as if from the command-line without the need for using popen or subprocess def run(self, command): """ Runs a command as if from the command-line without the need for using popen or subprocess """ if isinstance(command, basestring): command = command.split() else: command = list(command) self.external.omero_cli(command)
Runs the omero command-line client with an array of arguments using the old environment def bin(self, command): """ Runs the omero command-line client with an array of arguments using the old environment """ if isinstance(command, basestring): command = command.split() self.external.omero_bin(command)
The default symlink was changed from OMERO-CURRENT to OMERO.server. If `--sym` was not specified and OMERO-CURRENT exists in the current directory stop and warn. def symlink_check_and_set(self): """ The default symlink was changed from OMERO-CURRENT to OMERO.server. If `--sym` was not specified and OMERO-CURRENT exists in the current directory stop and warn. """ if self.args.sym == '': if os.path.exists('OMERO-CURRENT'): log.error('Deprecated OMERO-CURRENT found but --sym not set') raise Stop( 30, 'The default for --sym has changed to OMERO.server ' 'but the current directory contains OMERO-CURRENT. ' 'Either remove OMERO-CURRENT or explicity pass --sym.') if self.args.sym in ('', 'auto'): self.args.sym = 'OMERO.server'
Query encoder/decoder with a request value def query(request): """Query encoder/decoder with a request value""" def inner(func, obj): result_code = func(obj, request) if result_code is not constants.OK: raise OpusError(result_code) return result_code return inner
Get CTL value from a encoder/decoder def get(request, result_type): """Get CTL value from a encoder/decoder""" def inner(func, obj): result = result_type() result_code = func(obj, request, ctypes.byref(result)) if result_code is not constants.OK: raise OpusError(result_code) return result.value return inner
Set new CTL value to a encoder/decoder def set(request): """Set new CTL value to a encoder/decoder""" def inner(func, obj, value): result_code = func(obj, request, value) if result_code is not constants.OK: raise OpusError(result_code) return inner
Sort a list of SQL schemas in order def sort_schemas(schemas): """Sort a list of SQL schemas in order""" def keyfun(v): x = SQL_SCHEMA_REGEXP.match(v).groups() # x3: 'DEV' should come before '' return (int(x[0]), x[1], int(x[2]) if x[2] else None, x[3] if x[3] else 'zzz', int(x[4])) return sorted(schemas, key=keyfun)
Parse a list of SQL files and return a dictionary of valid schema files where each key is a valid schema file and the corresponding value is a tuple containing the source and the target schema. def parse_schema_files(files): """ Parse a list of SQL files and return a dictionary of valid schema files where each key is a valid schema file and the corresponding value is a tuple containing the source and the target schema. """ f_dict = {} for f in files: root, ext = os.path.splitext(f) if ext != ".sql": continue vto, vfrom = os.path.split(root) vto = os.path.split(vto)[1] if is_schema(vto) and is_schema(vfrom): f_dict[f] = (vfrom, vto) return f_dict
Dump the database using the postgres custom format def dump(self): """ Dump the database using the postgres custom format """ dumpfile = self.args.dumpfile if not dumpfile: db, env = self.get_db_args_env() dumpfile = fileutils.timestamp_filename( 'omero-database-%s' % db['name'], 'pgdump') log.info('Dumping database to %s', dumpfile) if not self.args.dry_run: self.pgdump('-Fc', '-f', dumpfile)
Get a dictionary of database connection parameters, and create an environment for running postgres commands. Falls back to omego defaults. def get_db_args_env(self): """ Get a dictionary of database connection parameters, and create an environment for running postgres commands. Falls back to omego defaults. """ db = { 'name': self.args.dbname, 'host': self.args.dbhost, 'user': self.args.dbuser, 'pass': self.args.dbpass } if not self.args.no_db_config: try: c = self.external.get_config(force=True) except Exception as e: log.warn('config.xml not found: %s', e) c = {} for k in db: try: db[k] = c['omero.db.%s' % k] except KeyError: log.info( 'Failed to lookup parameter omero.db.%s, using %s', k, db[k]) if not db['name']: raise Exception('Database name required') env = os.environ.copy() env['PGPASSWORD'] = db['pass'] return db, env
Run a psql command def psql(self, *psqlargs): """ Run a psql command """ db, env = self.get_db_args_env() args = [ '-v', 'ON_ERROR_STOP=on', '-d', db['name'], '-h', db['host'], '-U', db['user'], '-w', '-A', '-t' ] + list(psqlargs) stdout, stderr = External.run('psql', args, capturestd=True, env=env) if stderr: log.warn('stderr: %s', stderr) log.debug('stdout: %s', stdout) return stdout
Run a pg_dump command def pgdump(self, *pgdumpargs): """ Run a pg_dump command """ db, env = self.get_db_args_env() args = ['-d', db['name'], '-h', db['host'], '-U', db['user'], '-w' ] + list(pgdumpargs) stdout, stderr = External.run( 'pg_dump', args, capturestd=True, env=env) if stderr: log.warn('stderr: %s', stderr) log.debug('stdout: %s', stdout) return stdout
Set the directory of the server to be controlled def set_server_dir(self, dir): """ Set the directory of the server to be controlled """ self.dir = os.path.abspath(dir) config = os.path.join(self.dir, 'etc', 'grid', 'config.xml') self.configured = os.path.exists(config)
Returns a dictionary of all config.xml properties If `force = True` then ignore any cached state and read config.xml if possible setup_omero_cli() must be called before this method to import the correct omero module to minimise the possibility of version conflicts def get_config(self, force=False): """ Returns a dictionary of all config.xml properties If `force = True` then ignore any cached state and read config.xml if possible setup_omero_cli() must be called before this method to import the correct omero module to minimise the possibility of version conflicts """ if not force and not self.has_config(): raise Exception('No config file') configxml = os.path.join(self.dir, 'etc', 'grid', 'config.xml') if not os.path.exists(configxml): raise Exception('No config file') try: # Attempt to open config.xml read-only, though this flag is not # present in early versions of OMERO 5.0 c = self._omero.config.ConfigXml( configxml, exclusive=False, read_only=True) except TypeError: c = self._omero.config.ConfigXml(configxml, exclusive=False) try: return c.as_map() finally: c.close()
Imports the omero CLI module so that commands can be run directly. Note Python does not allow a module to be imported multiple times, so this will only work with a single omero instance. This can have several surprising effects, so setup_omero_cli() must be explcitly called. def setup_omero_cli(self): """ Imports the omero CLI module so that commands can be run directly. Note Python does not allow a module to be imported multiple times, so this will only work with a single omero instance. This can have several surprising effects, so setup_omero_cli() must be explcitly called. """ if not self.dir: raise Exception('No server directory set') if 'omero.cli' in sys.modules: raise Exception('omero.cli can only be imported once') log.debug("Setting up omero CLI") lib = os.path.join(self.dir, "lib", "python") if not os.path.exists(lib): raise Exception("%s does not exist!" % lib) sys.path.insert(0, lib) import omero import omero.cli log.debug("Using omero CLI from %s", omero.cli.__file__) self.cli = omero.cli.CLI() self.cli.loadplugins() self._omero = omero
Create a copy of the current environment for interacting with the current OMERO server installation def setup_previous_omero_env(self, olddir, savevarsfile): """ Create a copy of the current environment for interacting with the current OMERO server installation """ env = self.get_environment(savevarsfile) def addpath(varname, p): if not os.path.exists(p): raise Exception("%s does not exist!" % p) current = env.get(varname) if current: env[varname] = p + os.pathsep + current else: env[varname] = p olddir = os.path.abspath(olddir) lib = os.path.join(olddir, "lib", "python") addpath("PYTHONPATH", lib) bin = os.path.join(olddir, "bin") addpath("PATH", bin) self.old_env = env
Runs a command as if from the OMERO command-line without the need for using popen or subprocess. def omero_cli(self, command): """ Runs a command as if from the OMERO command-line without the need for using popen or subprocess. """ assert isinstance(command, list) if not self.cli: raise Exception('omero.cli not initialised') log.info("Invoking CLI [current environment]: %s", " ".join(command)) self.cli.invoke(command, strict=True)
Runs the omero command-line client with an array of arguments using the old environment def omero_bin(self, command): """ Runs the omero command-line client with an array of arguments using the old environment """ assert isinstance(command, list) if not self.old_env: raise Exception('Old environment not initialised') log.info("Running [old environment]: %s", " ".join(command)) self.run('omero', command, capturestd=True, env=self.old_env)
Runs an executable with an array of arguments, optionally in the specified environment. Returns stdout and stderr def run(exe, args, capturestd=False, env=None): """ Runs an executable with an array of arguments, optionally in the specified environment. Returns stdout and stderr """ command = [exe] + args if env: log.info("Executing [custom environment]: %s", " ".join(command)) else: log.info("Executing : %s", " ".join(command)) start = time.time() # Temp files will be automatically deleted on close() # If run() throws the garbage collector should call close(), so don't # bother with try-finally outfile = None errfile = None if capturestd: outfile = tempfile.TemporaryFile() errfile = tempfile.TemporaryFile() # Use call instead of Popen so that stdin is connected to the console, # in case user input is required # On Windows shell=True is needed otherwise the modified environment # PATH variable is ignored. On Unix this breaks things. r = subprocess.call( command, env=env, stdout=outfile, stderr=errfile, shell=WINDOWS) stdout = None stderr = None if capturestd: outfile.seek(0) stdout = outfile.read() outfile.close() errfile.seek(0) stderr = errfile.read() errfile.close() end = time.time() if r != 0: log.error("Failed [%.3f s]", end - start) raise RunException( "Non-zero return code", exe, args, r, stdout, stderr) log.info("Completed [%.3f s]", end - start) return stdout, stderr
Create byte-to-string and string-to-byte conversion functions for internal use. :param py3enc: Encoding used by Python 3 environment. :type py3enc: str def string_support(py3enc): '''Create byte-to-string and string-to-byte conversion functions for internal use. :param py3enc: Encoding used by Python 3 environment. :type py3enc: str ''' if sys.version < '3': def bytes2str(b): '''Identity, returns the argument string (bytes).''' return b def str2bytes(s): '''Identity, returns the argument string (bytes).''' return s else: def bytes2str(b): '''Transforms bytes into string (Unicode).''' return b.decode(py3enc) def str2bytes(u): '''Transforms Unicode into string (bytes).''' return u.encode(py3enc) return (bytes2str, str2bytes)
Create tokenizer for use in boundary constraint parsing. :param py2enc: Encoding used by Python 2 environment. :type py2enc: str def splitter_support(py2enc): '''Create tokenizer for use in boundary constraint parsing. :param py2enc: Encoding used by Python 2 environment. :type py2enc: str ''' if sys.version < '3': def _fn_sentence(pattern, sentence): if REGEXTYPE == type(pattern): if pattern.flags & re.UNICODE: return sentence.decode(py2enc) else: return sentence else: return sentence def _fn_token2str(pattern): if REGEXTYPE == type(pattern): if pattern.flags & re.UNICODE: def _fn(token): return token.encode(py2enc) else: def _fn(token): return token else: def _fn(token): return token return _fn else: def _fn_sentence(pattern, sentence): return sentence def _fn_token2str(pattern): def _fn(token): return token return _fn def _fn_tokenize_pattern(text, pattern): pos = 0 sentence = _fn_sentence(pattern, text) postprocess = _fn_token2str(pattern) for m in re.finditer(pattern, sentence): if pos < m.start(): token = postprocess(sentence[pos:m.start()]) yield (token.strip(), False) pos = m.start() token = postprocess(sentence[pos:m.end()]) yield (token.strip(), True) pos = m.end() if pos < len(sentence): token = postprocess(sentence[pos:]) yield (token.strip(), False) def _fn_tokenize_features(text, features): acc = [] acc.append((text.strip(), False)) for feat in features: for i,e in enumerate(acc): if e[1]==False: tmp = list(_fn_tokenize_pattern(e[0], feat)) if len(tmp) > 0: acc.pop(i) acc[i:i] = tmp return acc return _fn_tokenize_pattern, _fn_tokenize_features
Apply updates given in update_spec to the document whose id matches that of doc. def update(self, document_id, update_spec, namespace, timestamp): """Apply updates given in update_spec to the document whose id matches that of doc. """ index, doc_type = self._index_and_mapping(namespace) with self.lock: # Check if document source is stored in local buffer document = self.BulkBuffer.get_from_sources(index, doc_type, u(document_id)) if document: # Document source collected from local buffer # Perform apply_update on it and then it will be # ready for commiting to Elasticsearch updated = self.apply_update(document, update_spec) # _id is immutable in MongoDB, so won't have changed in update updated['_id'] = document_id self.upsert(updated, namespace, timestamp) else: # Document source needs to be retrieved from Elasticsearch # before performing update. Pass update_spec to upsert function updated = {"_id": document_id} self.upsert(updated, namespace, timestamp, update_spec) # upsert() strips metadata, so only _id + fields in _source still here return updated
Insert a document into Elasticsearch. def upsert(self, doc, namespace, timestamp, update_spec=None): """Insert a document into Elasticsearch.""" index, doc_type = self._index_and_mapping(namespace) # No need to duplicate '_id' in source document doc_id = u(doc.pop("_id")) metadata = { 'ns': namespace, '_ts': timestamp } # Index the source document, using lowercase namespace as index name. action = { '_op_type': 'index', '_index': index, '_type': doc_type, '_id': doc_id, '_source': self._formatter.format_document(doc) } # Index document metadata with original namespace (mixed upper/lower). meta_action = { '_op_type': 'index', '_index': self.meta_index_name, '_type': self.meta_type, '_id': doc_id, '_source': bson.json_util.dumps(metadata) } self.index(action, meta_action, doc, update_spec) # Leave _id, since it's part of the original document doc['_id'] = doc_id
Insert multiple documents into Elasticsearch. def bulk_upsert(self, docs, namespace, timestamp): """Insert multiple documents into Elasticsearch.""" def docs_to_upsert(): doc = None for doc in docs: # Remove metadata and redundant _id index, doc_type = self._index_and_mapping(namespace) doc_id = u(doc.pop("_id")) document_action = { '_index': index, '_type': doc_type, '_id': doc_id, '_source': self._formatter.format_document(doc) } document_meta = { '_index': self.meta_index_name, '_type': self.meta_type, '_id': doc_id, '_source': { 'ns': namespace, '_ts': timestamp } } yield document_action yield document_meta if doc is None: raise errors.EmptyDocsError( "Cannot upsert an empty sequence of " "documents into Elastic Search") try: kw = {} if self.chunk_size > 0: kw['chunk_size'] = self.chunk_size responses = streaming_bulk(client=self.elastic, actions=docs_to_upsert(), **kw) for ok, resp in responses: if not ok: LOG.error( "Could not bulk-upsert document " "into ElasticSearch: %r" % resp) if self.auto_commit_interval == 0: self.commit() except errors.EmptyDocsError: # This can happen when mongo-connector starts up, there is no # config file, but nothing to dump pass
Remove a document from Elasticsearch. def remove(self, document_id, namespace, timestamp): """Remove a document from Elasticsearch.""" index, doc_type = self._index_and_mapping(namespace) action = { '_op_type': 'delete', '_index': index, '_type': doc_type, '_id': u(document_id) } meta_action = { '_op_type': 'delete', '_index': self.meta_index_name, '_type': self.meta_type, '_id': u(document_id) } self.index(action, meta_action)
Send buffered operations to Elasticsearch. This method is periodically called by the AutoCommitThread. def send_buffered_operations(self): """Send buffered operations to Elasticsearch. This method is periodically called by the AutoCommitThread. """ with self.lock: try: action_buffer = self.BulkBuffer.get_buffer() if action_buffer: successes, errors = bulk(self.elastic, action_buffer) LOG.debug("Bulk request finished, successfully sent %d " "operations", successes) if errors: LOG.error( "Bulk request finished with errors: %r", errors) except es_exceptions.ElasticsearchException: LOG.exception("Bulk request failed with exception")
Get the most recently modified document from Elasticsearch. This method is used to help define a time window within which documents may be in conflict after a MongoDB rollback. def get_last_doc(self): """Get the most recently modified document from Elasticsearch. This method is used to help define a time window within which documents may be in conflict after a MongoDB rollback. """ try: result = self.elastic.search( index=self.meta_index_name, body={ "query": {"match_all": {}}, "sort": [{"_ts": "desc"}], }, size=1 )["hits"]["hits"] for r in result: r['_source']['_id'] = r['_id'] return r['_source'] except es_exceptions.RequestError: # no documents so ES returns 400 because of undefined _ts mapping return None
Split a list of parameters/types by commas, whilst respecting brackets. For example: String arg0, int arg2 = 1, List<int> arg3 = [1, 2, 3] => ['String arg0', 'int arg2 = 1', 'List<int> arg3 = [1, 2, 3]'] def split_sig(params): """ Split a list of parameters/types by commas, whilst respecting brackets. For example: String arg0, int arg2 = 1, List<int> arg3 = [1, 2, 3] => ['String arg0', 'int arg2 = 1', 'List<int> arg3 = [1, 2, 3]'] """ result = [] current = '' level = 0 for char in params: if char in ('<', '{', '['): level += 1 elif char in ('>', '}', ']'): level -= 1 if char != ',' or level > 0: current += char elif char == ',' and level == 0: result.append(current) current = '' if current.strip() != '': result.append(current) return result
Parse a method signature of the form: modifier* type name (params) def parse_method_signature(sig): """ Parse a method signature of the form: modifier* type name (params) """ match = METH_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Method signature invalid: ' + sig) modifiers, return_type, name, generic_types, params = match.groups() if params.strip() != '': params = split_sig(params) params = [parse_param_signature(x) for x in params] else: params = [] return (modifiers.split(), return_type, name, generic_types, params)
Parse a property signature of the form: modifier* type name { (get;)? (set;)? } def parse_property_signature(sig): """ Parse a property signature of the form: modifier* type name { (get;)? (set;)? } """ match = PROP_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Property signature invalid: ' + sig) groups = match.groups() if groups[0] is not None: modifiers = [x.strip() for x in groups[:-4]] groups = groups[-4:] else: modifiers = [] groups = groups[1:] typ, name, getter, setter = groups return (modifiers, typ, name, getter is not None, setter is not None)
Parse a indexer signature of the form: modifier* type this[params] { (get;)? (set;)? } def parse_indexer_signature(sig): """ Parse a indexer signature of the form: modifier* type this[params] { (get;)? (set;)? } """ match = IDXR_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Indexer signature invalid: ' + sig) modifiers, return_type, params, getter, setter = match.groups() params = split_sig(params) params = [parse_param_signature(x) for x in params] return (modifiers.split(), return_type, params, getter is not None, setter is not None)
Parse a parameter signature of the form: type name (= default)? def parse_param_signature(sig): """ Parse a parameter signature of the form: type name (= default)? """ match = PARAM_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Parameter signature invalid, got ' + sig) groups = match.groups() modifiers = groups[0].split() typ, name, _, default = groups[-4:] return ParamTuple(name=name, typ=typ, default=default, modifiers=modifiers)
Parse a type signature def parse_type_signature(sig): """ Parse a type signature """ match = TYPE_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Type signature invalid, got ' + sig) groups = match.groups() typ = groups[0] generic_types = groups[1] if not generic_types: generic_types = [] else: generic_types = split_sig(generic_types[1:-1]) is_array = (groups[2] is not None) return typ, generic_types, is_array
Parse an attribute signature def parse_attr_signature(sig): """ Parse an attribute signature """ match = ATTR_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Attribute signature invalid, got ' + sig) name, _, params = match.groups() if params is not None and params.strip() != '': params = split_sig(params) params = [parse_param_signature(x) for x in params] else: params = [] return (name, params)
Try and create a reference to a type on MSDN def get_msdn_ref(name): """ Try and create a reference to a type on MSDN """ in_msdn = False if name in MSDN_VALUE_TYPES: name = MSDN_VALUE_TYPES[name] in_msdn = True if name.startswith('System.'): in_msdn = True if in_msdn: link = name.split('<')[0] if link in MSDN_LINK_MAP: link = MSDN_LINK_MAP[link] else: link = link.lower() url = 'https://msdn.microsoft.com/en-us/library/'+link+'.aspx' node = nodes.reference(name, shorten_type(name)) node['refuri'] = url node['reftitle'] = name return node else: return None
Shorten a type. E.g. drops 'System.' def shorten_type(typ): """ Shorten a type. E.g. drops 'System.' """ offset = 0 for prefix in SHORTEN_TYPE_PREFIXES: if typ.startswith(prefix): if len(prefix) > offset: offset = len(prefix) return typ[offset:]
Parses the MeCab options, returning them in a dictionary. Lattice-level option has been deprecated; please use marginal or nbest instead. :options string or dictionary of options to use when instantiating the MeCab instance. May be in short- or long-form, or in a Python dictionary. Returns: A dictionary of the specified MeCab options, where the keys are snake-cased names of the long-form of the option names. Raises: MeCabError: An invalid value for N-best was passed in. def parse_mecab_options(self, options): '''Parses the MeCab options, returning them in a dictionary. Lattice-level option has been deprecated; please use marginal or nbest instead. :options string or dictionary of options to use when instantiating the MeCab instance. May be in short- or long-form, or in a Python dictionary. Returns: A dictionary of the specified MeCab options, where the keys are snake-cased names of the long-form of the option names. Raises: MeCabError: An invalid value for N-best was passed in. ''' class MeCabArgumentParser(argparse.ArgumentParser): '''MeCab option parser for natto-py.''' def error(self, message): '''error(message: string) Raises ValueError. ''' raise ValueError(message) options = options or {} dopts = {} if type(options) is dict: for name in iter(list(self._SUPPORTED_OPTS.values())): if name in options: if options[name] or options[name] is '': val = options[name] if isinstance(val, bytes): val = self.__bytes2str(options[name]) dopts[name] = val else: p = MeCabArgumentParser() p.add_argument('-r', '--rcfile', help='use FILE as a resource file', action='store', dest='rcfile') p.add_argument('-d', '--dicdir', help='set DIR as a system dicdir', action='store', dest='dicdir') p.add_argument('-u', '--userdic', help='use FILE as a user dictionary', action='store', dest='userdic') p.add_argument('-l', '--lattice-level', help='lattice information level (DEPRECATED)', action='store', dest='lattice_level', type=int) p.add_argument('-O', '--output-format-type', help='set output format type (wakati, none,...)', action='store', dest='output_format_type') p.add_argument('-a', '--all-morphs', help='output all morphs (default false)', action='store_true', default=False) p.add_argument('-N', '--nbest', help='output N best results (default 1)', action='store', dest='nbest', type=int) p.add_argument('-p', '--partial', help='partial parsing mode (default false)', action='store_true', default=False) p.add_argument('-m', '--marginal', help='output marginal probability (default false)', action='store_true', default=False) p.add_argument('-M', '--max-grouping-size', help=('maximum grouping size for unknown words ' '(default 24)'), action='store', dest='max_grouping_size', type=int) p.add_argument('-F', '--node-format', help='use STR as the user-defined node format', action='store', dest='node_format') p.add_argument('-U', '--unk-format', help=('use STR as the user-defined unknown ' 'node format'), action='store', dest='unk_format') p.add_argument('-B', '--bos-format', help=('use STR as the user-defined ' 'beginning-of-sentence format'), action='store', dest='bos_format') p.add_argument('-E', '--eos-format', help=('use STR as the user-defined ' 'end-of-sentence format'), action='store', dest='eos_format') p.add_argument('-S', '--eon-format', help=('use STR as the user-defined end-of-NBest ' 'format'), action='store', dest='eon_format') p.add_argument('-x', '--unk-feature', help='use STR as the feature for unknown word', action='store', dest='unk_feature') p.add_argument('-b', '--input-buffer-size', help='set input buffer size (default 8192)', action='store', dest='input_buffer_size', type=int) p.add_argument('-C', '--allocate-sentence', help='allocate new memory for input sentence', action='store_true', dest='allocate_sentence', default=False) p.add_argument('-t', '--theta', help=('set temperature parameter theta ' '(default 0.75)'), action='store', dest='theta', type=float) p.add_argument('-c', '--cost-factor', help='set cost factor (default 700)', action='store', dest='cost_factor', type=int) opts = p.parse_args([o.replace('\"', '').replace('\'', '') for o in options.split()]) for name in iter(list(self._SUPPORTED_OPTS.values())): if hasattr(opts, name): v = getattr(opts, name) if v or v is '': dopts[name] = v # final checks if 'nbest' in dopts \ and (dopts['nbest'] < 1 or dopts['nbest'] > self._NBEST_MAX): logger.error(self._ERROR_NVALUE) raise ValueError(self._ERROR_NVALUE) # warning for lattice-level deprecation if 'lattice_level' in dopts: logger.warn('WARNING: {}\n'.format(self._WARN_LATTICE_LEVEL)) return dopts
Returns a string concatenation of the MeCab options. Args: options: dictionary of options to use when instantiating the MeCab instance. Returns: A string concatenation of the options used when instantiating the MeCab instance, in long-form. def build_options_str(self, options): '''Returns a string concatenation of the MeCab options. Args: options: dictionary of options to use when instantiating the MeCab instance. Returns: A string concatenation of the options used when instantiating the MeCab instance, in long-form. ''' opts = [] for name in iter(list(self._SUPPORTED_OPTS.values())): if name in options: key = name.replace('_', '-') if key in self._BOOLEAN_OPTIONS: if options[name]: opts.append('--{}'.format(key)) else: opts.append('--{}={}'.format(key, options[name])) return self.__str2bytes(' '.join(opts))
Allocates and initializes a decoder state def create(fs, channels): """Allocates and initializes a decoder state""" result_code = ctypes.c_int() result = _create(fs, channels, ctypes.byref(result_code)) if result_code.value is not 0: raise OpusError(result_code.value) return result
Gets the bandwidth of an Opus packet. def packet_get_bandwidth(data): """Gets the bandwidth of an Opus packet.""" data_pointer = ctypes.c_char_p(data) result = _packet_get_bandwidth(data_pointer) if result < 0: raise OpusError(result) return result
Gets the number of channels from an Opus packet def packet_get_nb_channels(data): """Gets the number of channels from an Opus packet""" data_pointer = ctypes.c_char_p(data) result = _packet_get_nb_channels(data_pointer) if result < 0: raise OpusError(result) return result
Gets the number of frames in an Opus packet def packet_get_nb_frames(data, length=None): """Gets the number of frames in an Opus packet""" data_pointer = ctypes.c_char_p(data) if length is None: length = len(data) result = _packet_get_nb_frames(data_pointer, ctypes.c_int(length)) if result < 0: raise OpusError(result) return result
Gets the number of samples per frame from an Opus packet def packet_get_samples_per_frame(data, fs): """Gets the number of samples per frame from an Opus packet""" data_pointer = ctypes.c_char_p(data) result = _packet_get_nb_frames(data_pointer, ctypes.c_int(fs)) if result < 0: raise OpusError(result) return result
Decode an Opus frame Unlike the `opus_decode` function , this function takes an additional parameter `channels`, which indicates the number of channels in the frame def decode(decoder, data, length, frame_size, decode_fec, channels=2): """Decode an Opus frame Unlike the `opus_decode` function , this function takes an additional parameter `channels`, which indicates the number of channels in the frame """ pcm_size = frame_size * channels * ctypes.sizeof(ctypes.c_int16) pcm = (ctypes.c_int16 * pcm_size)() pcm_pointer = ctypes.cast(pcm, c_int16_pointer) # Converting from a boolean to int decode_fec = int(bool(decode_fec)) result = _decode(decoder, data, length, pcm_pointer, frame_size, decode_fec) if result < 0: raise OpusError(result) return array.array('h', pcm).tostring()
Extracts comma separate tag=value pairs from a string Assumes all characters other than / and , are valid def label_list_parser(self, url): """ Extracts comma separate tag=value pairs from a string Assumes all characters other than / and , are valid """ labels = re.findall('([^/,]+=[^/,]+)', url) slabels = set(labels) if '' in slabels: slabels.remove('') return slabels
:param app: The Flask app :param register_blueprint: Override to False to stop the blueprint from automatically being registered to the app :param url_prefix: The URL prefix for the blueprint, defaults to /fm :param access_control_function: Pass in a function here to implement access control. The function will be called any time someone tries to access the filemanager, and a 404 will be returned if this function returns False :param custom_config_json_path: Set this to the full path of you filemanager.config.json file if you want to use a custom config. Example: os.path.join(app.root_path, 'static/filemanager.config.json') :param custom_init_js_path: Set this to the full path of you filemanager.init.js file if you want to use a custom init.js. Example: os.path.join(app.root_path, 'static/filemanager.init.js') def init(app, register_blueprint=True, url_prefix='/fm', access_control_function=None, custom_config_json_path=None, custom_init_js_path=None): """ :param app: The Flask app :param register_blueprint: Override to False to stop the blueprint from automatically being registered to the app :param url_prefix: The URL prefix for the blueprint, defaults to /fm :param access_control_function: Pass in a function here to implement access control. The function will be called any time someone tries to access the filemanager, and a 404 will be returned if this function returns False :param custom_config_json_path: Set this to the full path of you filemanager.config.json file if you want to use a custom config. Example: os.path.join(app.root_path, 'static/filemanager.config.json') :param custom_init_js_path: Set this to the full path of you filemanager.init.js file if you want to use a custom init.js. Example: os.path.join(app.root_path, 'static/filemanager.init.js') """ global _initialised, _FILE_PATH, _URL_PREFIX if _initialised: raise Exception('Flask Filemanager can only be registered once!') _initialised = True _FILE_PATH = app.config.get('FLASKFILEMANAGER_FILE_PATH') if not _FILE_PATH: raise Exception('No FLASKFILEMANAGER_FILE_PATH value in config') log.info('File Manager Using file path: {}'.format(_FILE_PATH)) util.ensure_dir(_FILE_PATH) if access_control_function: set_access_control_function(access_control_function) if custom_config_json_path: set_custom_config_json_path(custom_config_json_path) log.info('File Manager using custom config.json path: {}'.format(custom_config_json_path)) if custom_init_js_path: set_custom_init_js_path(custom_init_js_path) log.info('File Manager using custom init.js path: {}'.format(custom_init_js_path)) if register_blueprint: log.info('Registering filemanager blueprint to {}'.format(url_prefix)) app.register_blueprint(filemanager_blueprint, url_prefix=url_prefix)
:param path: relative path, or None to get from request :param content: file content, output in data. Used for editfile def get_file(path=None, content=None): """ :param path: relative path, or None to get from request :param content: file content, output in data. Used for editfile """ if path is None: path = request.args.get('path') if path is None: return error('No path in request') filename = os.path.split(path.rstrip('/'))[-1] extension = filename.rsplit('.', 1)[-1] os_file_path = web_path_to_os_path(path) if os.path.isdir(os_file_path): file_type = 'folder' # Ensure trailing slash if path[-1] != '/': path += '/' else: file_type = 'file' ctime = int(os.path.getctime(os_file_path)) mtime = int(os.path.getmtime(os_file_path)) height = 0 width = 0 if extension in ['gif', 'jpg', 'jpeg', 'png']: try: im = PIL.Image.open(os_file_path) height, width = im.size except OSError: log.exception('Error loading image "{}" to get width and height'.format(os_file_path)) attributes = { 'name': filename, 'path': get_url_path(path), 'readable': 1 if os.access(os_file_path, os.R_OK) else 0, 'writeable': 1 if os.access(os_file_path, os.W_OK) else 0, 'created': datetime.datetime.fromtimestamp(ctime).ctime(), 'modified': datetime.datetime.fromtimestamp(mtime).ctime(), 'timestamp': mtime, 'width': width, 'height': height, 'size': os.path.getsize(os_file_path) } if content: attributes['content'] = content return { 'id': path, 'type': file_type, 'attributes': attributes }
Return the character encoding (charset) used internally by MeCab. Charset is that of the system dictionary used by MeCab. Will defer to the user-specified MECAB_CHARSET environment variable, if set. Defaults to shift-jis on Windows. Defaults to utf-8 on Mac OS. Defaults to euc-jp, as per MeCab documentation, when all else fails. Returns: Character encoding (charset) used by MeCab. def __get_charset(self): '''Return the character encoding (charset) used internally by MeCab. Charset is that of the system dictionary used by MeCab. Will defer to the user-specified MECAB_CHARSET environment variable, if set. Defaults to shift-jis on Windows. Defaults to utf-8 on Mac OS. Defaults to euc-jp, as per MeCab documentation, when all else fails. Returns: Character encoding (charset) used by MeCab. ''' cset = os.getenv(self.MECAB_CHARSET) if cset: logger.debug(self._DEBUG_CSET_DEFAULT.format(cset)) return cset else: try: res = Popen(['mecab', '-D'], stdout=PIPE).communicate() lines = res[0].decode() if not lines.startswith('unrecognized'): dicinfo = lines.split(os.linesep) t = [t for t in dicinfo if t.startswith('charset')] if len(t) > 0: cset = t[0].split()[1].lower() logger.debug(self._DEBUG_CSET_DEFAULT.format(cset)) return cset else: logger.error('{}\n'.format(self._ERROR_NODIC)) raise EnvironmentError(self._ERROR_NODIC) else: logger.error('{}\n'.format(self._ERROR_NOCMD)) raise EnvironmentError(self._ERROR_NOCMD) except OSError: cset = 'euc-jp' if sys.platform == 'win32': cset = 'shift-jis' elif sys.platform == 'darwin': cset = 'utf8' logger.debug(self._DEBUG_CSET_DEFAULT.format(cset)) return cset
Return the absolute path to the MeCab library. On Windows, the path to the system dictionary is used to deduce the path to libmecab.dll. Otherwise, mecab-config is used find the libmecab shared object or dynamic library (*NIX or Mac OS, respectively). Will defer to the user-specified MECAB_PATH environment variable, if set. Returns: The absolute path to the MeCab library. Raises: EnvironmentError: A problem was encountered in trying to locate the MeCab library. def __get_libpath(self): '''Return the absolute path to the MeCab library. On Windows, the path to the system dictionary is used to deduce the path to libmecab.dll. Otherwise, mecab-config is used find the libmecab shared object or dynamic library (*NIX or Mac OS, respectively). Will defer to the user-specified MECAB_PATH environment variable, if set. Returns: The absolute path to the MeCab library. Raises: EnvironmentError: A problem was encountered in trying to locate the MeCab library. ''' libp = os.getenv(self.MECAB_PATH) if libp: return os.path.abspath(libp) else: plat = sys.platform if plat == 'win32': lib = self._LIBMECAB.format(self._WINLIB_EXT) try: v = self.__regkey_value(self._WINHKEY, self._WINVALUE) ldir = v.split('etc')[0] libp = os.path.join(ldir, 'bin', lib) except EnvironmentError as err: logger.error('{}\n'.format(err)) logger.error('{}\n'.format(sys.exc_info()[0])) raise EnvironmentError( self._ERROR_WINREG.format(self._WINVALUE, self._WINHKEY)) else: # UNIX-y OS? if plat == 'darwin': lib = self._LIBMECAB.format(self._MACLIB_EXT) else: lib = self._LIBMECAB.format(self._UNIXLIB_EXT) try: cmd = ['mecab-config', '--libs-only-L'] res = Popen(cmd, stdout=PIPE).communicate() lines = res[0].decode() if not lines.startswith('unrecognized'): linfo = lines.strip() libp = os.path.join(linfo, lib) else: raise EnvironmentError( self._ERROR_MECABCONFIG.format(lib)) except EnvironmentError as err: logger.error('{}\n'.format(err)) logger.error('{}\n'.format(sys.exc_info()[0])) raise EnvironmentError(self._ERROR_NOLIB.format(lib)) if libp and os.path.exists(libp): libp = os.path.abspath(libp) os.environ[self.MECAB_PATH] = libp return libp else: raise EnvironmentError(self._ERROR_NOLIB.format(libp))
r'''Return the data of value mecabrc at MeCab HKEY node. On Windows, the path to the mecabrc as set in the Windows Registry is used to deduce the path to libmecab.dll. Returns: The full path to the mecabrc on Windows. Raises: WindowsError: A problem was encountered in trying to locate the value mecabrc at HKEY_CURRENT_USER\Software\MeCab. def __regkey_value(self, path, name='', start_key=None): r'''Return the data of value mecabrc at MeCab HKEY node. On Windows, the path to the mecabrc as set in the Windows Registry is used to deduce the path to libmecab.dll. Returns: The full path to the mecabrc on Windows. Raises: WindowsError: A problem was encountered in trying to locate the value mecabrc at HKEY_CURRENT_USER\Software\MeCab. ''' if sys.version < '3': import _winreg as reg else: import winreg as reg def _fn(path, name='', start_key=None): if isinstance(path, str): path = path.split('\\') if start_key is None: start_key = getattr(reg, path[0]) return _fn(path[1:], name, start_key) else: subkey = path.pop(0) with reg.OpenKey(start_key, subkey) as handle: if path: return _fn(path, name, handle) else: desc, i = None, 0 while not desc or desc[0] != name: desc = reg.EnumValue(handle, i) i += 1 return desc[1] return _fn(path, name, start_key)
Show the differences between the old and new html document, as html. Return the document html with extra tags added to show changes. Add <ins> tags around newly added sections, and <del> tags to show sections that have been deleted. def diff(old_html, new_html, cutoff=0.0, plaintext=False, pretty=False): """Show the differences between the old and new html document, as html. Return the document html with extra tags added to show changes. Add <ins> tags around newly added sections, and <del> tags to show sections that have been deleted. """ if plaintext: old_dom = parse_text(old_html) new_dom = parse_text(new_html) else: old_dom = parse_minidom(old_html) new_dom = parse_minidom(new_html) # If the two documents are not similar enough, don't show the changes. if not check_text_similarity(old_dom, new_dom, cutoff): return '<h2>The differences from the previous version are too large to show concisely.</h2>' dom = dom_diff(old_dom, new_dom) # HTML-specific cleanup. if not plaintext: fix_lists(dom) fix_tables(dom) # Only return html for the document body contents. body_elements = dom.getElementsByTagName('body') if len(body_elements) == 1: dom = body_elements[0] return minidom_tostring(dom, pretty=pretty)
Iterate through opcodes, turning them into a series of insert and delete operations, adjusting indices to account for the size of insertions and deletions. >>> def sequence_opcodes(old, new): return difflib.SequenceMatcher(a=old, b=new).get_opcodes() >>> list(adjusted_ops(sequence_opcodes('abc', 'b'))) [('delete', 0, 1, 0, 0), ('delete', 1, 2, 1, 1)] >>> list(adjusted_ops(sequence_opcodes('b', 'abc'))) [('insert', 0, 0, 0, 1), ('insert', 2, 2, 2, 3)] >>> list(adjusted_ops(sequence_opcodes('axxa', 'aya'))) [('delete', 1, 3, 1, 1), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('axa', 'aya'))) [('delete', 1, 2, 1, 1), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('ab', 'bc'))) [('delete', 0, 1, 0, 0), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('bc', 'ab'))) [('insert', 0, 0, 0, 1), ('delete', 2, 3, 2, 2)] def adjusted_ops(opcodes): """ Iterate through opcodes, turning them into a series of insert and delete operations, adjusting indices to account for the size of insertions and deletions. >>> def sequence_opcodes(old, new): return difflib.SequenceMatcher(a=old, b=new).get_opcodes() >>> list(adjusted_ops(sequence_opcodes('abc', 'b'))) [('delete', 0, 1, 0, 0), ('delete', 1, 2, 1, 1)] >>> list(adjusted_ops(sequence_opcodes('b', 'abc'))) [('insert', 0, 0, 0, 1), ('insert', 2, 2, 2, 3)] >>> list(adjusted_ops(sequence_opcodes('axxa', 'aya'))) [('delete', 1, 3, 1, 1), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('axa', 'aya'))) [('delete', 1, 2, 1, 1), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('ab', 'bc'))) [('delete', 0, 1, 0, 0), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('bc', 'ab'))) [('insert', 0, 0, 0, 1), ('delete', 2, 3, 2, 2)] """ while opcodes: op = opcodes.pop(0) tag, i1, i2, j1, j2 = op shift = 0 if tag == 'equal': continue if tag == 'replace': # change the single replace op into a delete then insert # pay careful attention to the variables here, there's no typo opcodes = [ ('delete', i1, i2, j1, j1), ('insert', i2, i2, j1, j2), ] + opcodes continue yield op if tag == 'delete': shift = -(i2 - i1) elif tag == 'insert': shift = +(j2 - j1) new_opcodes = [] for tag, i1, i2, j1, j2 in opcodes: new_opcodes.append(( tag, i1 + shift, i2 + shift, j1, j2, )) opcodes = new_opcodes
Yield index tuples (old_index, new_index) for each place in the match. def match_indices(match): """Yield index tuples (old_index, new_index) for each place in the match.""" a, b, size = match for i in range(size): yield a + i, b + i
Use difflib to get the opcodes for a set of matching blocks. def get_opcodes(matching_blocks): """Use difflib to get the opcodes for a set of matching blocks.""" sm = difflib.SequenceMatcher(a=[], b=[]) sm.matching_blocks = matching_blocks return sm.get_opcodes()
Use difflib to find matching blocks. def match_blocks(hash_func, old_children, new_children): """Use difflib to find matching blocks.""" sm = difflib.SequenceMatcher( _is_junk, a=[hash_func(c) for c in old_children], b=[hash_func(c) for c in new_children], ) return sm
Given a list of matching blocks, output the gaps between them. Non-matches have the format (alo, ahi, blo, bhi). This specifies two index ranges, one in the A sequence, and one in the B sequence. def get_nonmatching_blocks(matching_blocks): """Given a list of matching blocks, output the gaps between them. Non-matches have the format (alo, ahi, blo, bhi). This specifies two index ranges, one in the A sequence, and one in the B sequence. """ i = j = 0 for match in matching_blocks: a, b, size = match yield (i, a, j, b) i = a + size j = b + size
Given two lists of blocks, combine them, in the proper order. Ensure that there are no overlaps, and that they are for sequences of the same length. def merge_blocks(a_blocks, b_blocks): """Given two lists of blocks, combine them, in the proper order. Ensure that there are no overlaps, and that they are for sequences of the same length. """ # Check sentinels for sequence length. assert a_blocks[-1][2] == b_blocks[-1][2] == 0 # sentinel size is 0 assert a_blocks[-1] == b_blocks[-1] combined_blocks = sorted(list(set(a_blocks + b_blocks))) # Check for overlaps. i = j = 0 for a, b, size in combined_blocks: assert i <= a assert j <= b i = a + size j = b + size return combined_blocks
Remove comments, as they can break the xml parser. See html5lib issue #122 ( http://code.google.com/p/html5lib/issues/detail?id=122 ). >>> remove_comments('<!-- -->') '' >>> remove_comments('<!--\\n-->') '' >>> remove_comments('<p>stuff<!-- \\n -->stuff</p>') '<p>stuffstuff</p>' def remove_comments(xml): """ Remove comments, as they can break the xml parser. See html5lib issue #122 ( http://code.google.com/p/html5lib/issues/detail?id=122 ). >>> remove_comments('<!-- -->') '' >>> remove_comments('<!--\\n-->') '' >>> remove_comments('<p>stuff<!-- \\n -->stuff</p>') '<p>stuffstuff</p>' """ regex = re.compile(r'<!--.*?-->', re.DOTALL) return regex.sub('', xml)
r"""Remove newlines in the xml. If the newline separates words in text, then replace with a space instead. >>> remove_newlines('<p>para one</p>\n<p>para two</p>') '<p>para one</p><p>para two</p>' >>> remove_newlines('<p>line one\nline two</p>') '<p>line one line two</p>' >>> remove_newlines('one\n1') 'one 1' >>> remove_newlines('hey!\nmore text!') 'hey! more text!' def remove_newlines(xml): r"""Remove newlines in the xml. If the newline separates words in text, then replace with a space instead. >>> remove_newlines('<p>para one</p>\n<p>para two</p>') '<p>para one</p><p>para two</p>' >>> remove_newlines('<p>line one\nline two</p>') '<p>line one line two</p>' >>> remove_newlines('one\n1') 'one 1' >>> remove_newlines('hey!\nmore text!') 'hey! more text!' """ # Normalize newlines. xml = xml.replace('\r\n', '\n') xml = xml.replace('\r', '\n') # Remove newlines that don't separate text. The remaining ones do separate text. xml = re.sub(r'(?<=[>\s])\n(?=[<\s])', '', xml) xml = xml.replace('\n', ' ') return xml.strip()
For html elements that should not have text nodes inside them, remove all whitespace. For elements that may have text, collapse multiple spaces to a single space. def remove_insignificant_text_nodes(dom): """ For html elements that should not have text nodes inside them, remove all whitespace. For elements that may have text, collapse multiple spaces to a single space. """ nodes_to_remove = [] for node in walk_dom(dom): if is_text(node): text = node.nodeValue if node.parentNode.tagName in _non_text_node_tags: nodes_to_remove.append(node) else: node.nodeValue = re.sub(r'\s+', ' ', text) for node in nodes_to_remove: remove_node(node)
Get the child at the given index, or return None if it doesn't exist. def get_child(parent, child_index): """ Get the child at the given index, or return None if it doesn't exist. """ if child_index < 0 or child_index >= len(parent.childNodes): return None return parent.childNodes[child_index]
Get the node at the specified location in the dom. Location is a sequence of child indices, starting at the children of the root element. If there is no node at this location, raise a ValueError. def get_location(dom, location): """ Get the node at the specified location in the dom. Location is a sequence of child indices, starting at the children of the root element. If there is no node at this location, raise a ValueError. """ node = dom.documentElement for i in location: node = get_child(node, i) if not node: raise ValueError('Node at location %s does not exist.' % location) #TODO: line not covered return node
Check whether two dom trees have similar text or not. def check_text_similarity(a_dom, b_dom, cutoff): """Check whether two dom trees have similar text or not.""" a_words = list(tree_words(a_dom)) b_words = list(tree_words(b_dom)) sm = WordMatcher(a=a_words, b=b_words) if sm.text_ratio() >= cutoff: return True return False
Return all the significant text below the given node as a list of words. >>> list(tree_words(parse_minidom('<h1>one</h1> two <div>three<em>four</em></div>'))) ['one', 'two', 'three', 'four'] def tree_words(node): """Return all the significant text below the given node as a list of words. >>> list(tree_words(parse_minidom('<h1>one</h1> two <div>three<em>four</em></div>'))) ['one', 'two', 'three', 'four'] """ for word in split_text(tree_text(node)): word = word.strip() if word: yield word
>>> tree_text(parse_minidom('<h1>one</h1>two<div>three<em>four</em></div>')) 'one two three four' def tree_text(node): """ >>> tree_text(parse_minidom('<h1>one</h1>two<div>three<em>four</em></div>')) 'one two three four' """ text = [] for descendant in walk_dom(node): if is_text(descendant): text.append(descendant.nodeValue) return ' '.join(text)
Insert the node before next_sibling. If next_sibling is None, append the node last instead. def insert_or_append(parent, node, next_sibling): """ Insert the node before next_sibling. If next_sibling is None, append the node last instead. """ # simple insert if next_sibling: parent.insertBefore(node, next_sibling) else: parent.appendChild(node)
Wrap the given tag around a node. def wrap(node, tag): """Wrap the given tag around a node.""" wrap_node = node.ownerDocument.createElement(tag) parent = node.parentNode if parent: parent.replaceChild(wrap_node, node) wrap_node.appendChild(node) return wrap_node
Wrap the given tag around the contents of a node. def wrap_inner(node, tag): """Wrap the given tag around the contents of a node.""" children = list(node.childNodes) wrap_node = node.ownerDocument.createElement(tag) for c in children: wrap_node.appendChild(c) node.appendChild(wrap_node)
Remove a node, replacing it with its children. def unwrap(node): """Remove a node, replacing it with its children.""" for child in list(node.childNodes): node.parentNode.insertBefore(child, node) remove_node(node)
Split the text by the regex, keeping all parts. The parts should re-join back into the original text. >>> list(full_split('word', re.compile('&.*?'))) ['word'] def full_split(text, regex): """ Split the text by the regex, keeping all parts. The parts should re-join back into the original text. >>> list(full_split('word', re.compile('&.*?'))) ['word'] """ while text: m = regex.search(text) if not m: yield text break left = text[:m.start()] middle = text[m.start():m.end()] right = text[m.end():] if left: yield left if middle: yield middle text = right
Split the text by the given regexes, in priority order. Make sure that the regex is parenthesized so that matches are returned in re.split(). Splitting on a single regex works like normal split. >>> '|'.join(multi_split('one two three', [r'\w+'])) 'one| |two| |three' Splitting on digits first separates the digits from their word >>> '|'.join(multi_split('one234five 678', [r'\d+', r'\w+'])) 'one|234|five| |678' Splitting on words first keeps the word with digits intact. >>> '|'.join(multi_split('one234five 678', [r'\w+', r'\d+'])) 'one234five| |678' def multi_split(text, regexes): """ Split the text by the given regexes, in priority order. Make sure that the regex is parenthesized so that matches are returned in re.split(). Splitting on a single regex works like normal split. >>> '|'.join(multi_split('one two three', [r'\w+'])) 'one| |two| |three' Splitting on digits first separates the digits from their word >>> '|'.join(multi_split('one234five 678', [r'\d+', r'\w+'])) 'one|234|five| |678' Splitting on words first keeps the word with digits intact. >>> '|'.join(multi_split('one234five 678', [r'\w+', r'\d+'])) 'one234five| |678' """ def make_regex(s): return re.compile(s) if isinstance(s, basestring) else s regexes = [make_regex(r) for r in regexes] # Run the list of pieces through the regex split, splitting it into more # pieces. Once a piece has been matched, add it to finished_pieces and # don't split it again. The pieces should always join back together to form # the original text. piece_list = [text] finished_pieces = set() def apply_re(regex, piece_list): for piece in piece_list: if piece in finished_pieces: yield piece continue for s in full_split(piece, regex): if regex.match(s): finished_pieces.add(s) if s: yield s for regex in regexes: piece_list = list(apply_re(regex, piece_list)) assert ''.join(piece_list) == text return piece_list
Return a measure of the sequences' word similarity (float in [0,1]). Each word has weight equal to its length for this measure >>> m = WordMatcher(a=['abcdef', '12'], b=['abcdef', '34']) # 3/4 of the text is the same >>> '%.3f' % m.ratio() # normal ratio fails '0.500' >>> '%.3f' % m.text_ratio() # text ratio is accurate '0.750' def text_ratio(self): """Return a measure of the sequences' word similarity (float in [0,1]). Each word has weight equal to its length for this measure >>> m = WordMatcher(a=['abcdef', '12'], b=['abcdef', '34']) # 3/4 of the text is the same >>> '%.3f' % m.ratio() # normal ratio fails '0.500' >>> '%.3f' % m.text_ratio() # text ratio is accurate '0.750' """ return _calculate_ratio( self.match_length(), self._text_length(self.a) + self._text_length(self.b), )
Find the total length of all words that match between the two sequences. def match_length(self): """ Find the total length of all words that match between the two sequences.""" length = 0 for match in self.get_matching_blocks(): a, b, size = match length += self._text_length(self.a[a:a+size]) return length
Run an xml edit script, and return the new html produced. def run_edit_script(self): """ Run an xml edit script, and return the new html produced. """ for action, location, properties in self.edit_script: if action == 'delete': node = get_location(self.dom, location) self.action_delete(node) elif action == 'insert': parent = get_location(self.dom, location[:-1]) child_index = location[-1] self.action_insert(parent, child_index, **properties) return self.dom
Add <ins> and <del> tags to the dom to show changes. def add_changes_markup(dom, ins_nodes, del_nodes): """ Add <ins> and <del> tags to the dom to show changes. """ # add markup for inserted and deleted sections for node in reversed(del_nodes): # diff algorithm deletes nodes in reverse order, so un-reverse the # order for this iteration insert_or_append(node.orig_parent, node, node.orig_next_sibling) wrap(node, 'del') for node in ins_nodes: wrap(node, 'ins') # Perform post-processing and cleanup. remove_nesting(dom, 'del') remove_nesting(dom, 'ins') sort_del_before_ins(dom) merge_adjacent(dom, 'del') merge_adjacent(dom, 'ins')
Unwrap items in the node list that have ancestors with the same tag. def remove_nesting(dom, tag_name): """ Unwrap items in the node list that have ancestors with the same tag. """ for node in dom.getElementsByTagName(tag_name): for ancestor in ancestors(node): if ancestor is node: continue if ancestor is dom.documentElement: break if ancestor.tagName == tag_name: unwrap(node) break
Sort the nodes of the dom in-place, based on a comparison function. def sort_nodes(dom, cmp_func): """ Sort the nodes of the dom in-place, based on a comparison function. """ dom.normalize() for node in list(walk_dom(dom, elements_only=True)): prev_sib = node.previousSibling while prev_sib and cmp_func(prev_sib, node) == 1: node.parentNode.insertBefore(node, prev_sib) prev_sib = node.previousSibling
Merge all adjacent tags with the specified tag name. Return the number of merges performed. def merge_adjacent(dom, tag_name): """ Merge all adjacent tags with the specified tag name. Return the number of merges performed. """ for node in dom.getElementsByTagName(tag_name): prev_sib = node.previousSibling if prev_sib and prev_sib.nodeName == node.tagName: for child in list(node.childNodes): prev_sib.appendChild(child) remove_node(node)
Wrap a copy of the given element around the contents of each of its children, removing the node in the process. def distribute(node): """ Wrap a copy of the given element around the contents of each of its children, removing the node in the process. """ children = list(c for c in node.childNodes if is_element(c)) unwrap(node) tag_name = node.tagName for c in children: wrap_inner(c, tag_name)