text stringlengths 0 1.05M | meta dict |
|---|---|
"""ApacheParser is a member object of the ApacheConfigurator class."""
import fnmatch
import itertools
import logging
import os
import re
import subprocess
from certbot import errors
from certbot_apache import constants
logger = logging.getLogger(__name__)
class ApacheParser(object):
"""Class handles the fine details of parsing the Apache Configuration.
.. todo:: Make parsing general... remove sites-available etc...
:ivar str root: Normalized absolute path to the server root
directory. Without trailing slash.
:ivar set modules: All module names that are currently enabled.
:ivar dict loc: Location to place directives, root - configuration origin,
default - user config file, name - NameVirtualHost,
"""
arg_var_interpreter = re.compile(r"\$\{[^ \}]*}")
fnmatch_chars = set(["*", "?", "\\", "[", "]"])
def __init__(self, aug, root, vhostroot, version=(2, 4)):
# Note: Order is important here.
# This uses the binary, so it can be done first.
# https://httpd.apache.org/docs/2.4/mod/core.html#define
# https://httpd.apache.org/docs/2.4/mod/core.html#ifdefine
# This only handles invocation parameters and Define directives!
self.parser_paths = {}
self.variables = {}
if version >= (2, 4):
self.update_runtime_variables()
self.aug = aug
# Find configuration root and make sure augeas can parse it.
self.root = os.path.abspath(root)
self.loc = {"root": self._find_config_root()}
self._parse_file(self.loc["root"])
self.vhostroot = os.path.abspath(vhostroot)
# This problem has been fixed in Augeas 1.0
self.standardize_excl()
# Temporarily set modules to be empty, so that find_dirs can work
# https://httpd.apache.org/docs/2.4/mod/core.html#ifmodule
# This needs to come before locations are set.
self.modules = set()
self.init_modules()
# Set up rest of locations
self.loc.update(self._set_locations())
# Must also attempt to parse virtual host root
self._parse_file(self.vhostroot + "/" +
constants.os_constant("vhost_files"))
# check to see if there were unparsed define statements
if version < (2, 4):
if self.find_dir("Define", exclude=False):
raise errors.PluginError("Error parsing runtime variables")
def init_modules(self):
"""Iterates on the configuration until no new modules are loaded.
..todo:: This should be attempted to be done with a binary to avoid
the iteration issue. Else... parse and enable mods at same time.
"""
# Since modules are being initiated... clear existing set.
self.modules = set()
matches = self.find_dir("LoadModule")
iterator = iter(matches)
# Make sure prev_size != cur_size for do: while: iteration
prev_size = -1
while len(self.modules) != prev_size:
prev_size = len(self.modules)
for match_name, match_filename in itertools.izip(
iterator, iterator):
self.modules.add(self.get_arg(match_name))
self.modules.add(
os.path.basename(self.get_arg(match_filename))[:-2] + "c")
def update_runtime_variables(self):
""""
.. note:: Compile time variables (apache2ctl -V) are not used within
the dynamic configuration files. These should not be parsed or
interpreted.
.. todo:: Create separate compile time variables...
simply for arg_get()
"""
stdout = self._get_runtime_cfg()
variables = dict()
matches = re.compile(r"Define: ([^ \n]*)").findall(stdout)
try:
matches.remove("DUMP_RUN_CFG")
except ValueError:
return
for match in matches:
if match.count("=") > 1:
logger.error("Unexpected number of equal signs in "
"runtime config dump.")
raise errors.PluginError(
"Error parsing Apache runtime variables")
parts = match.partition("=")
variables[parts[0]] = parts[2]
self.variables = variables
def _get_runtime_cfg(self): # pylint: disable=no-self-use
"""Get runtime configuration info.
:returns: stdout from DUMP_RUN_CFG
"""
try:
proc = subprocess.Popen(
constants.os_constant("define_cmd"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
except (OSError, ValueError):
logger.error(
"Error running command %s for runtime parameters!%s",
constants.os_constant("define_cmd"), os.linesep)
raise errors.MisconfigurationError(
"Error accessing loaded Apache parameters: %s",
constants.os_constant("define_cmd"))
# Small errors that do not impede
if proc.returncode != 0:
logger.warning("Error in checking parameter list: %s", stderr)
raise errors.MisconfigurationError(
"Apache is unable to check whether or not the module is "
"loaded because Apache is misconfigured.")
return stdout
def filter_args_num(self, matches, args): # pylint: disable=no-self-use
"""Filter out directives with specific number of arguments.
This function makes the assumption that all related arguments are given
in order. Thus /files/apache/directive[5]/arg[2] must come immediately
after /files/apache/directive[5]/arg[1]. Runs in 1 linear pass.
:param string matches: Matches of all directives with arg nodes
:param int args: Number of args you would like to filter
:returns: List of directives that contain # of arguments.
(arg is stripped off)
"""
filtered = []
if args == 1:
for i in range(len(matches)):
if matches[i].endswith("/arg"):
filtered.append(matches[i][:-4])
else:
for i in range(len(matches)):
if matches[i].endswith("/arg[%d]" % args):
# Make sure we don't cause an IndexError (end of list)
# Check to make sure arg + 1 doesn't exist
if (i == (len(matches) - 1) or
not matches[i + 1].endswith("/arg[%d]" %
(args + 1))):
filtered.append(matches[i][:-len("/arg[%d]" % args)])
return filtered
def add_dir_to_ifmodssl(self, aug_conf_path, directive, args):
"""Adds directive and value to IfMod ssl block.
Adds given directive and value along configuration path within
an IfMod mod_ssl.c block. If the IfMod block does not exist in
the file, it is created.
:param str aug_conf_path: Desired Augeas config path to add directive
:param str directive: Directive you would like to add, e.g. Listen
:param args: Values of the directive; str "443" or list of str
:type args: list
"""
# TODO: Add error checking code... does the path given even exist?
# Does it throw exceptions?
if_mod_path = self._get_ifmod(aug_conf_path, "mod_ssl.c")
# IfModule can have only one valid argument, so append after
self.aug.insert(if_mod_path + "arg", "directive", False)
nvh_path = if_mod_path + "directive[1]"
self.aug.set(nvh_path, directive)
if len(args) == 1:
self.aug.set(nvh_path + "/arg", args[0])
else:
for i, arg in enumerate(args):
self.aug.set("%s/arg[%d]" % (nvh_path, i + 1), arg)
def _get_ifmod(self, aug_conf_path, mod):
"""Returns the path to <IfMod mod> and creates one if it doesn't exist.
:param str aug_conf_path: Augeas configuration path
:param str mod: module ie. mod_ssl.c
"""
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
if len(if_mods) == 0:
self.aug.set("%s/IfModule[last() + 1]" % aug_conf_path, "")
self.aug.set("%s/IfModule[last()]/arg" % aug_conf_path, mod)
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
# Strip off "arg" at end of first ifmod path
return if_mods[0][:len(if_mods[0]) - 3]
def add_dir(self, aug_conf_path, directive, args):
"""Appends directive to the end fo the file given by aug_conf_path.
.. note:: Not added to AugeasConfigurator because it may depend
on the lens
:param str aug_conf_path: Augeas configuration path to add directive
:param str directive: Directive to add
:param args: Value of the directive. ie. Listen 443, 443 is arg
:type args: list or str
"""
self.aug.set(aug_conf_path + "/directive[last() + 1]", directive)
if isinstance(args, list):
for i, value in enumerate(args, 1):
self.aug.set(
"%s/directive[last()]/arg[%d]" % (aug_conf_path, i), value)
else:
self.aug.set(aug_conf_path + "/directive[last()]/arg", args)
def find_dir(self, directive, arg=None, start=None, exclude=True):
"""Finds directive in the configuration.
Recursively searches through config files to find directives
Directives should be in the form of a case insensitive regex currently
.. todo:: arg should probably be a list
.. todo:: arg search currently only supports direct matching. It does
not handle the case of variables or quoted arguments. This should
be adapted to use a generic search for the directive and then do a
case-insensitive self.get_arg filter
Note: Augeas is inherently case sensitive while Apache is case
insensitive. Augeas 1.0 allows case insensitive regexes like
regexp(/Listen/, "i"), however the version currently supported
by Ubuntu 0.10 does not. Thus I have included my own case insensitive
transformation by calling case_i() on everything to maintain
compatibility.
:param str directive: Directive to look for
:param arg: Specific value directive must have, None if all should
be considered
:type arg: str or None
:param str start: Beginning Augeas path to begin looking
:param bool exclude: Whether or not to exclude directives based on
variables and enabled modules
"""
# Cannot place member variable in the definition of the function so...
if not start:
start = get_aug_path(self.loc["root"])
# No regexp code
# if arg is None:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive + "']/arg")
# else:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive +
# "']/* [self::arg='" + arg + "']")
# includes = self.aug.match(start +
# "//* [self::directive='Include']/* [label()='arg']")
regex = "(%s)|(%s)|(%s)" % (case_i(directive),
case_i("Include"),
case_i("IncludeOptional"))
matches = self.aug.match(
"%s//*[self::directive=~regexp('%s')]" % (start, regex))
if exclude:
matches = self._exclude_dirs(matches)
if arg is None:
arg_suffix = "/arg"
else:
arg_suffix = "/*[self::arg=~regexp('%s')]" % case_i(arg)
ordered_matches = []
# TODO: Wildcards should be included in alphabetical order
# https://httpd.apache.org/docs/2.4/mod/core.html#include
for match in matches:
dir_ = self.aug.get(match).lower()
if dir_ == "include" or dir_ == "includeoptional":
ordered_matches.extend(self.find_dir(
directive, arg,
self._get_include_path(self.get_arg(match + "/arg")),
exclude))
# This additionally allows Include
if dir_ == directive.lower():
ordered_matches.extend(self.aug.match(match + arg_suffix))
return ordered_matches
def get_arg(self, match):
"""Uses augeas.get to get argument value and interprets result.
This also converts all variables and parameters appropriately.
"""
value = self.aug.get(match)
# No need to strip quotes for variables, as apache2ctl already does
# this, but we do need to strip quotes for all normal arguments.
# Note: normal argument may be a quoted variable
# e.g. strip now, not later
value = value.strip("'\"")
variables = ApacheParser.arg_var_interpreter.findall(value)
for var in variables:
# Strip off ${ and }
try:
value = value.replace(var, self.variables[var[2:-1]])
except KeyError:
raise errors.PluginError("Error Parsing variable: %s" % var)
return value
def _exclude_dirs(self, matches):
"""Exclude directives that are not loaded into the configuration."""
filters = [("ifmodule", self.modules), ("ifdefine", self.variables)]
valid_matches = []
for match in matches:
for filter_ in filters:
if not self._pass_filter(match, filter_):
break
else:
valid_matches.append(match)
return valid_matches
def _pass_filter(self, match, filter_):
"""Determine if directive passes a filter.
:param str match: Augeas path
:param list filter: list of tuples of form
[("lowercase if directive", set of relevant parameters)]
"""
match_l = match.lower()
last_match_idx = match_l.find(filter_[0])
while last_match_idx != -1:
# Check args
end_of_if = match_l.find("/", last_match_idx)
# This should be aug.get (vars are not used e.g. parser.aug_get)
expression = self.aug.get(match[:end_of_if] + "/arg")
if expression.startswith("!"):
# Strip off "!"
if expression[1:] in filter_[1]:
return False
else:
if expression not in filter_[1]:
return False
last_match_idx = match_l.find(filter_[0], end_of_if)
return True
def _get_include_path(self, arg):
"""Converts an Apache Include directive into Augeas path.
Converts an Apache Include directive argument into an Augeas
searchable path
.. todo:: convert to use os.path.join()
:param str arg: Argument of Include directive
:returns: Augeas path string
:rtype: str
"""
# Check to make sure only expected characters are used <- maybe remove
# validChars = re.compile("[a-zA-Z0-9.*?_-/]*")
# matchObj = validChars.match(arg)
# if matchObj.group() != arg:
# logger.error("Error: Invalid regexp characters in %s", arg)
# return []
# Remove beginning and ending quotes
arg = arg.strip("'\"")
# Standardize the include argument based on server root
if not arg.startswith("/"):
# Normpath will condense ../
arg = os.path.normpath(os.path.join(self.root, arg))
else:
arg = os.path.normpath(arg)
# Attempts to add a transform to the file if one does not already exist
if os.path.isdir(arg):
self._parse_file(os.path.join(arg, "*"))
else:
self._parse_file(arg)
# Argument represents an fnmatch regular expression, convert it
# Split up the path and convert each into an Augeas accepted regex
# then reassemble
split_arg = arg.split("/")
for idx, split in enumerate(split_arg):
if any(char in ApacheParser.fnmatch_chars for char in split):
# Turn it into a augeas regex
# TODO: Can this instead be an augeas glob instead of regex
split_arg[idx] = ("* [label()=~regexp('%s')]" %
self.fnmatch_to_re(split))
# Reassemble the argument
# Note: This also normalizes the argument /serverroot/ -> /serverroot
arg = "/".join(split_arg)
return get_aug_path(arg)
def fnmatch_to_re(self, clean_fn_match): # pylint: disable=no-self-use
"""Method converts Apache's basic fnmatch to regular expression.
Assumption - Configs are assumed to be well-formed and only writable by
privileged users.
https://apr.apache.org/docs/apr/2.0/apr__fnmatch_8h_source.html
http://apache2.sourcearchive.com/documentation/2.2.16-6/apr__fnmatch_8h_source.html
:param str clean_fn_match: Apache style filename match, like globs
:returns: regex suitable for augeas
:rtype: str
"""
# This strips off final /Z(?ms)
return fnmatch.translate(clean_fn_match)[:-7]
def _parse_file(self, filepath):
"""Parse file with Augeas
Checks to see if file_path is parsed by Augeas
If filepath isn't parsed, the file is added and Augeas is reloaded
:param str filepath: Apache config file path
"""
use_new, remove_old = self._check_path_actions(filepath)
# Test if augeas included file for Httpd.lens
# Note: This works for augeas globs, ie. *.conf
if use_new:
inc_test = self.aug.match(
"/augeas/load/Httpd['%s' =~ glob(incl)]" % filepath)
if not inc_test:
# Load up files
# This doesn't seem to work on TravisCI
# self.aug.add_transform("Httpd.lns", [filepath])
if remove_old:
self._remove_httpd_transform(filepath)
self._add_httpd_transform(filepath)
self.aug.load()
def _check_path_actions(self, filepath):
"""Determine actions to take with a new augeas path
This helper function will return a tuple that defines
if we should try to append the new filepath to augeas
parser paths, and / or remove the old one with more
narrow matching.
:param str filepath: filepath to check the actions for
"""
try:
new_file_match = os.path.basename(filepath)
existing_matches = self.parser_paths[os.path.dirname(filepath)]
if "*" in existing_matches:
use_new = False
else:
use_new = True
if new_file_match == "*":
remove_old = True
else:
remove_old = False
except KeyError:
use_new = True
remove_old = False
return use_new, remove_old
def _remove_httpd_transform(self, filepath):
"""Remove path from Augeas transform
:param str filepath: filepath to remove
"""
remove_basenames = self.parser_paths[os.path.dirname(filepath)]
remove_dirname = os.path.dirname(filepath)
for name in remove_basenames:
remove_path = remove_dirname + "/" + name
remove_inc = self.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % remove_path)
self.aug.remove(remove_inc[0])
self.parser_paths.pop(remove_dirname)
def _add_httpd_transform(self, incl):
"""Add a transform to Augeas.
This function will correctly add a transform to augeas
The existing augeas.add_transform in python doesn't seem to work for
Travis CI as it loads in libaugeas.so.0.10.0
:param str incl: filepath to include for transform
"""
last_include = self.aug.match("/augeas/load/Httpd/incl [last()]")
if last_include:
# Insert a new node immediately after the last incl
self.aug.insert(last_include[0], "incl", False)
self.aug.set("/augeas/load/Httpd/incl[last()]", incl)
# On first use... must load lens and add file to incl
else:
# Augeas uses base 1 indexing... insert at beginning...
self.aug.set("/augeas/load/Httpd/lens", "Httpd.lns")
self.aug.set("/augeas/load/Httpd/incl", incl)
# Add included path to paths dictionary
try:
self.parser_paths[os.path.dirname(incl)].append(
os.path.basename(incl))
except KeyError:
self.parser_paths[os.path.dirname(incl)] = [
os.path.basename(incl)]
def standardize_excl(self):
"""Standardize the excl arguments for the Httpd lens in Augeas.
Note: Hack!
Standardize the excl arguments for the Httpd lens in Augeas
Servers sometimes give incorrect defaults
Note: This problem should be fixed in Augeas 1.0. Unfortunately,
Augeas 0.10 appears to be the most popular version currently.
"""
# attempt to protect against augeas error in 0.10.0 - ubuntu
# *.augsave -> /*.augsave upon augeas.load()
# Try to avoid bad httpd files
# There has to be a better way... but after a day and a half of testing
# I had no luck
# This is a hack... work around... submit to augeas if still not fixed
excl = ["*.augnew", "*.augsave", "*.dpkg-dist", "*.dpkg-bak",
"*.dpkg-new", "*.dpkg-old", "*.rpmsave", "*.rpmnew",
"*~",
self.root + "/*.augsave",
self.root + "/*~",
self.root + "/*/*augsave",
self.root + "/*/*~",
self.root + "/*/*/*.augsave",
self.root + "/*/*/*~"]
for i, excluded in enumerate(excl, 1):
self.aug.set("/augeas/load/Httpd/excl[%d]" % i, excluded)
self.aug.load()
def _set_locations(self):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
default = self.loc["root"]
temp = os.path.join(self.root, "ports.conf")
if os.path.isfile(temp):
listen = temp
name = temp
else:
listen = default
name = default
return {"default": default, "listen": listen, "name": name}
def _find_config_root(self):
"""Find the Apache Configuration Root file."""
location = ["apache2.conf", "httpd.conf", "conf/httpd.conf"]
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError("Could not find configuration root")
def case_i(string):
"""Returns case insensitive regex.
Returns a sloppy, but necessary version of a case insensitive regex.
Any string should be able to be submitted and the string is
escaped and then made case insensitive.
May be replaced by a more proper /i once augeas 1.0 is widely
supported.
:param str string: string to make case i regex
"""
return "".join(["[" + c.upper() + c.lower() + "]"
if c.isalpha() else c for c in re.escape(string)])
def get_aug_path(file_path):
"""Return augeas path for full filepath.
:param str file_path: Full filepath
"""
return "/files%s" % file_path
| {
"repo_name": "bsmr-misc-forks/letsencrypt",
"path": "certbot-apache/certbot_apache/parser.py",
"copies": "7",
"size": "24118",
"license": "apache-2.0",
"hash": 5343351331119563000,
"line_mean": 36.450310559,
"line_max": 91,
"alpha_frac": 0.5715648064,
"autogenerated": false,
"ratio": 4.168337366055997,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 644
} |
"""ApacheParser is a member object of the ApacheConfigurator class."""
import fnmatch
import itertools
import logging
import os
import re
import subprocess
from letsencrypt import errors
from letsencrypt_apache import constants
logger = logging.getLogger(__name__)
class ApacheParser(object):
"""Class handles the fine details of parsing the Apache Configuration.
.. todo:: Make parsing general... remove sites-available etc...
:ivar str root: Normalized absolute path to the server root
directory. Without trailing slash.
:ivar set modules: All module names that are currently enabled.
:ivar dict loc: Location to place directives, root - configuration origin,
default - user config file, name - NameVirtualHost,
"""
arg_var_interpreter = re.compile(r"\$\{[^ \}]*}")
fnmatch_chars = set(["*", "?", "\\", "[", "]"])
def __init__(self, aug, root, vhostroot, version=(2, 4)):
# Note: Order is important here.
# This uses the binary, so it can be done first.
# https://httpd.apache.org/docs/2.4/mod/core.html#define
# https://httpd.apache.org/docs/2.4/mod/core.html#ifdefine
# This only handles invocation parameters and Define directives!
self.parser_paths = {}
self.variables = {}
if version >= (2, 4):
self.update_runtime_variables()
self.aug = aug
# Find configuration root and make sure augeas can parse it.
self.root = os.path.abspath(root)
self.loc = {"root": self._find_config_root()}
self._parse_file(self.loc["root"])
self.vhostroot = os.path.abspath(vhostroot)
# This problem has been fixed in Augeas 1.0
self.standardize_excl()
# Temporarily set modules to be empty, so that find_dirs can work
# https://httpd.apache.org/docs/2.4/mod/core.html#ifmodule
# This needs to come before locations are set.
self.modules = set()
self.init_modules()
# Set up rest of locations
self.loc.update(self._set_locations())
# Must also attempt to parse virtual host root
self._parse_file(self.vhostroot + "/" +
constants.os_constant("vhost_files"))
# check to see if there were unparsed define statements
if version < (2, 4):
if self.find_dir("Define", exclude=False):
raise errors.PluginError("Error parsing runtime variables")
def init_modules(self):
"""Iterates on the configuration until no new modules are loaded.
..todo:: This should be attempted to be done with a binary to avoid
the iteration issue. Else... parse and enable mods at same time.
"""
# Since modules are being initiated... clear existing set.
self.modules = set()
matches = self.find_dir("LoadModule")
iterator = iter(matches)
# Make sure prev_size != cur_size for do: while: iteration
prev_size = -1
while len(self.modules) != prev_size:
prev_size = len(self.modules)
for match_name, match_filename in itertools.izip(
iterator, iterator):
self.modules.add(self.get_arg(match_name))
self.modules.add(
os.path.basename(self.get_arg(match_filename))[:-2] + "c")
def update_runtime_variables(self):
""""
.. note:: Compile time variables (apache2ctl -V) are not used within
the dynamic configuration files. These should not be parsed or
interpreted.
.. todo:: Create separate compile time variables...
simply for arg_get()
"""
stdout = self._get_runtime_cfg()
variables = dict()
matches = re.compile(r"Define: ([^ \n]*)").findall(stdout)
try:
matches.remove("DUMP_RUN_CFG")
except ValueError:
return
for match in matches:
if match.count("=") > 1:
logger.error("Unexpected number of equal signs in "
"runtime config dump.")
raise errors.PluginError(
"Error parsing Apache runtime variables")
parts = match.partition("=")
variables[parts[0]] = parts[2]
self.variables = variables
def _get_runtime_cfg(self): # pylint: disable=no-self-use
"""Get runtime configuration info.
:returns: stdout from DUMP_RUN_CFG
"""
try:
proc = subprocess.Popen(
constants.os_constant("define_cmd"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
except (OSError, ValueError):
logger.error(
"Error running command %s for runtime parameters!%s",
constants.os_constant("define_cmd"), os.linesep)
raise errors.MisconfigurationError(
"Error accessing loaded Apache parameters: %s",
constants.os_constant("define_cmd"))
# Small errors that do not impede
if proc.returncode != 0:
logger.warn("Error in checking parameter list: %s", stderr)
raise errors.MisconfigurationError(
"Apache is unable to check whether or not the module is "
"loaded because Apache is misconfigured.")
return stdout
def filter_args_num(self, matches, args): # pylint: disable=no-self-use
"""Filter out directives with specific number of arguments.
This function makes the assumption that all related arguments are given
in order. Thus /files/apache/directive[5]/arg[2] must come immediately
after /files/apache/directive[5]/arg[1]. Runs in 1 linear pass.
:param string matches: Matches of all directives with arg nodes
:param int args: Number of args you would like to filter
:returns: List of directives that contain # of arguments.
(arg is stripped off)
"""
filtered = []
if args == 1:
for i in range(len(matches)):
if matches[i].endswith("/arg"):
filtered.append(matches[i][:-4])
else:
for i in range(len(matches)):
if matches[i].endswith("/arg[%d]" % args):
# Make sure we don't cause an IndexError (end of list)
# Check to make sure arg + 1 doesn't exist
if (i == (len(matches) - 1) or
not matches[i + 1].endswith("/arg[%d]" %
(args + 1))):
filtered.append(matches[i][:-len("/arg[%d]" % args)])
return filtered
def add_dir_to_ifmodssl(self, aug_conf_path, directive, args):
"""Adds directive and value to IfMod ssl block.
Adds given directive and value along configuration path within
an IfMod mod_ssl.c block. If the IfMod block does not exist in
the file, it is created.
:param str aug_conf_path: Desired Augeas config path to add directive
:param str directive: Directive you would like to add, e.g. Listen
:param args: Values of the directive; str "443" or list of str
:type args: list
"""
# TODO: Add error checking code... does the path given even exist?
# Does it throw exceptions?
if_mod_path = self._get_ifmod(aug_conf_path, "mod_ssl.c")
# IfModule can have only one valid argument, so append after
self.aug.insert(if_mod_path + "arg", "directive", False)
nvh_path = if_mod_path + "directive[1]"
self.aug.set(nvh_path, directive)
if len(args) == 1:
self.aug.set(nvh_path + "/arg", args[0])
else:
for i, arg in enumerate(args):
self.aug.set("%s/arg[%d]" % (nvh_path, i + 1), arg)
def _get_ifmod(self, aug_conf_path, mod):
"""Returns the path to <IfMod mod> and creates one if it doesn't exist.
:param str aug_conf_path: Augeas configuration path
:param str mod: module ie. mod_ssl.c
"""
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
if len(if_mods) == 0:
self.aug.set("%s/IfModule[last() + 1]" % aug_conf_path, "")
self.aug.set("%s/IfModule[last()]/arg" % aug_conf_path, mod)
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
# Strip off "arg" at end of first ifmod path
return if_mods[0][:len(if_mods[0]) - 3]
def add_dir(self, aug_conf_path, directive, args):
"""Appends directive to the end fo the file given by aug_conf_path.
.. note:: Not added to AugeasConfigurator because it may depend
on the lens
:param str aug_conf_path: Augeas configuration path to add directive
:param str directive: Directive to add
:param args: Value of the directive. ie. Listen 443, 443 is arg
:type args: list or str
"""
self.aug.set(aug_conf_path + "/directive[last() + 1]", directive)
if isinstance(args, list):
for i, value in enumerate(args, 1):
self.aug.set(
"%s/directive[last()]/arg[%d]" % (aug_conf_path, i), value)
else:
self.aug.set(aug_conf_path + "/directive[last()]/arg", args)
def find_dir(self, directive, arg=None, start=None, exclude=True):
"""Finds directive in the configuration.
Recursively searches through config files to find directives
Directives should be in the form of a case insensitive regex currently
.. todo:: arg should probably be a list
.. todo:: arg search currently only supports direct matching. It does
not handle the case of variables or quoted arguments. This should
be adapted to use a generic search for the directive and then do a
case-insensitive self.get_arg filter
Note: Augeas is inherently case sensitive while Apache is case
insensitive. Augeas 1.0 allows case insensitive regexes like
regexp(/Listen/, "i"), however the version currently supported
by Ubuntu 0.10 does not. Thus I have included my own case insensitive
transformation by calling case_i() on everything to maintain
compatibility.
:param str directive: Directive to look for
:param arg: Specific value directive must have, None if all should
be considered
:type arg: str or None
:param str start: Beginning Augeas path to begin looking
:param bool exclude: Whether or not to exclude directives based on
variables and enabled modules
"""
# Cannot place member variable in the definition of the function so...
if not start:
start = get_aug_path(self.loc["root"])
# No regexp code
# if arg is None:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive + "']/arg")
# else:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive +
# "']/* [self::arg='" + arg + "']")
# includes = self.aug.match(start +
# "//* [self::directive='Include']/* [label()='arg']")
regex = "(%s)|(%s)|(%s)" % (case_i(directive),
case_i("Include"),
case_i("IncludeOptional"))
matches = self.aug.match(
"%s//*[self::directive=~regexp('%s')]" % (start, regex))
if exclude:
matches = self._exclude_dirs(matches)
if arg is None:
arg_suffix = "/arg"
else:
arg_suffix = "/*[self::arg=~regexp('%s')]" % case_i(arg)
ordered_matches = []
# TODO: Wildcards should be included in alphabetical order
# https://httpd.apache.org/docs/2.4/mod/core.html#include
for match in matches:
dir_ = self.aug.get(match).lower()
if dir_ == "include" or dir_ == "includeoptional":
ordered_matches.extend(self.find_dir(
directive, arg,
self._get_include_path(self.get_arg(match + "/arg")),
exclude))
# This additionally allows Include
if dir_ == directive.lower():
ordered_matches.extend(self.aug.match(match + arg_suffix))
return ordered_matches
def get_arg(self, match):
"""Uses augeas.get to get argument value and interprets result.
This also converts all variables and parameters appropriately.
"""
value = self.aug.get(match)
# No need to strip quotes for variables, as apache2ctl already does
# this, but we do need to strip quotes for all normal arguments.
# Note: normal argument may be a quoted variable
# e.g. strip now, not later
value = value.strip("'\"")
variables = ApacheParser.arg_var_interpreter.findall(value)
for var in variables:
# Strip off ${ and }
try:
value = value.replace(var, self.variables[var[2:-1]])
except KeyError:
raise errors.PluginError("Error Parsing variable: %s" % var)
return value
def _exclude_dirs(self, matches):
"""Exclude directives that are not loaded into the configuration."""
filters = [("ifmodule", self.modules), ("ifdefine", self.variables)]
valid_matches = []
for match in matches:
for filter_ in filters:
if not self._pass_filter(match, filter_):
break
else:
valid_matches.append(match)
return valid_matches
def _pass_filter(self, match, filter_):
"""Determine if directive passes a filter.
:param str match: Augeas path
:param list filter: list of tuples of form
[("lowercase if directive", set of relevant parameters)]
"""
match_l = match.lower()
last_match_idx = match_l.find(filter_[0])
while last_match_idx != -1:
# Check args
end_of_if = match_l.find("/", last_match_idx)
# This should be aug.get (vars are not used e.g. parser.aug_get)
expression = self.aug.get(match[:end_of_if] + "/arg")
if expression.startswith("!"):
# Strip off "!"
if expression[1:] in filter_[1]:
return False
else:
if expression not in filter_[1]:
return False
last_match_idx = match_l.find(filter_[0], end_of_if)
return True
def _get_include_path(self, arg):
"""Converts an Apache Include directive into Augeas path.
Converts an Apache Include directive argument into an Augeas
searchable path
.. todo:: convert to use os.path.join()
:param str arg: Argument of Include directive
:returns: Augeas path string
:rtype: str
"""
# Check to make sure only expected characters are used <- maybe remove
# validChars = re.compile("[a-zA-Z0-9.*?_-/]*")
# matchObj = validChars.match(arg)
# if matchObj.group() != arg:
# logger.error("Error: Invalid regexp characters in %s", arg)
# return []
# Remove beginning and ending quotes
arg = arg.strip("'\"")
# Standardize the include argument based on server root
if not arg.startswith("/"):
# Normpath will condense ../
arg = os.path.normpath(os.path.join(self.root, arg))
else:
arg = os.path.normpath(arg)
# Attempts to add a transform to the file if one does not already exist
if os.path.isdir(arg):
self._parse_file(os.path.join(arg, "*"))
else:
self._parse_file(arg)
# Argument represents an fnmatch regular expression, convert it
# Split up the path and convert each into an Augeas accepted regex
# then reassemble
split_arg = arg.split("/")
for idx, split in enumerate(split_arg):
if any(char in ApacheParser.fnmatch_chars for char in split):
# Turn it into a augeas regex
# TODO: Can this instead be an augeas glob instead of regex
split_arg[idx] = ("* [label()=~regexp('%s')]" %
self.fnmatch_to_re(split))
# Reassemble the argument
# Note: This also normalizes the argument /serverroot/ -> /serverroot
arg = "/".join(split_arg)
return get_aug_path(arg)
def fnmatch_to_re(self, clean_fn_match): # pylint: disable=no-self-use
"""Method converts Apache's basic fnmatch to regular expression.
Assumption - Configs are assumed to be well-formed and only writable by
privileged users.
https://apr.apache.org/docs/apr/2.0/apr__fnmatch_8h_source.html
http://apache2.sourcearchive.com/documentation/2.2.16-6/apr__fnmatch_8h_source.html
:param str clean_fn_match: Apache style filename match, like globs
:returns: regex suitable for augeas
:rtype: str
"""
# This strips off final /Z(?ms)
return fnmatch.translate(clean_fn_match)[:-7]
def _parse_file(self, filepath):
"""Parse file with Augeas
Checks to see if file_path is parsed by Augeas
If filepath isn't parsed, the file is added and Augeas is reloaded
:param str filepath: Apache config file path
"""
use_new, remove_old = self._check_path_actions(filepath)
# Test if augeas included file for Httpd.lens
# Note: This works for augeas globs, ie. *.conf
if use_new:
inc_test = self.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % filepath)
if not inc_test:
# Load up files
# This doesn't seem to work on TravisCI
# self.aug.add_transform("Httpd.lns", [filepath])
if remove_old:
self._remove_httpd_transform(filepath)
self._add_httpd_transform(filepath)
self.aug.load()
def _check_path_actions(self, filepath):
"""Determine actions to take with a new augeas path
This helper function will return a tuple that defines
if we should try to append the new filepath to augeas
parser paths, and / or remove the old one with more
narrow matching.
:param str filepath: filepath to check the actions for
"""
try:
new_file_match = os.path.basename(filepath)
existing_matches = self.parser_paths[os.path.dirname(filepath)]
if "*" in existing_matches:
use_new = False
else:
use_new = True
if new_file_match == "*":
remove_old = True
else:
remove_old = False
except KeyError:
use_new = True
remove_old = False
return use_new, remove_old
def _remove_httpd_transform(self, filepath):
"""Remove path from Augeas transform
:param str filepath: filepath to remove
"""
remove_basenames = self.parser_paths[os.path.dirname(filepath)]
remove_dirname = os.path.dirname(filepath)
for name in remove_basenames:
remove_path = remove_dirname + "/" + name
remove_inc = self.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % remove_path)
self.aug.remove(remove_inc[0])
self.parser_paths.pop(remove_dirname)
def _add_httpd_transform(self, incl):
"""Add a transform to Augeas.
This function will correctly add a transform to augeas
The existing augeas.add_transform in python doesn't seem to work for
Travis CI as it loads in libaugeas.so.0.10.0
:param str incl: filepath to include for transform
"""
last_include = self.aug.match("/augeas/load/Httpd/incl [last()]")
if last_include:
# Insert a new node immediately after the last incl
self.aug.insert(last_include[0], "incl", False)
self.aug.set("/augeas/load/Httpd/incl[last()]", incl)
# On first use... must load lens and add file to incl
else:
# Augeas uses base 1 indexing... insert at beginning...
self.aug.set("/augeas/load/Httpd/lens", "Httpd.lns")
self.aug.set("/augeas/load/Httpd/incl", incl)
# Add included path to paths dictionary
try:
self.parser_paths[os.path.dirname(incl)].append(
os.path.basename(incl))
except KeyError:
self.parser_paths[os.path.dirname(incl)] = [
os.path.basename(incl)]
def standardize_excl(self):
"""Standardize the excl arguments for the Httpd lens in Augeas.
Note: Hack!
Standardize the excl arguments for the Httpd lens in Augeas
Servers sometimes give incorrect defaults
Note: This problem should be fixed in Augeas 1.0. Unfortunately,
Augeas 0.10 appears to be the most popular version currently.
"""
# attempt to protect against augeas error in 0.10.0 - ubuntu
# *.augsave -> /*.augsave upon augeas.load()
# Try to avoid bad httpd files
# There has to be a better way... but after a day and a half of testing
# I had no luck
# This is a hack... work around... submit to augeas if still not fixed
excl = ["*.augnew", "*.augsave", "*.dpkg-dist", "*.dpkg-bak",
"*.dpkg-new", "*.dpkg-old", "*.rpmsave", "*.rpmnew",
"*~",
self.root + "/*.augsave",
self.root + "/*~",
self.root + "/*/*augsave",
self.root + "/*/*~",
self.root + "/*/*/*.augsave",
self.root + "/*/*/*~"]
for i, excluded in enumerate(excl, 1):
self.aug.set("/augeas/load/Httpd/excl[%d]" % i, excluded)
self.aug.load()
def _set_locations(self):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
default = self.loc["root"]
temp = os.path.join(self.root, "ports.conf")
if os.path.isfile(temp):
listen = temp
name = temp
else:
listen = default
name = default
return {"default": default, "listen": listen, "name": name}
def _find_config_root(self):
"""Find the Apache Configuration Root file."""
location = ["apache2.conf", "httpd.conf", "conf/httpd.conf"]
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError("Could not find configuration root")
def case_i(string):
"""Returns case insensitive regex.
Returns a sloppy, but necessary version of a case insensitive regex.
Any string should be able to be submitted and the string is
escaped and then made case insensitive.
May be replaced by a more proper /i once augeas 1.0 is widely
supported.
:param str string: string to make case i regex
"""
return "".join(["[" + c.upper() + c.lower() + "]"
if c.isalpha() else c for c in re.escape(string)])
def get_aug_path(file_path):
"""Return augeas path for full filepath.
:param str file_path: Full filepath
"""
return "/files%s" % file_path
| {
"repo_name": "TheBoegl/letsencrypt",
"path": "letsencrypt-apache/letsencrypt_apache/parser.py",
"copies": "1",
"size": "24118",
"license": "apache-2.0",
"hash": -388773089462318900,
"line_mean": 36.450310559,
"line_max": 91,
"alpha_frac": 0.5716062692,
"autogenerated": false,
"ratio": 4.170499740619056,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 644
} |
"""ApacheParser is a member object of the ApacheConfigurator class."""
import fnmatch
import itertools
import logging
import os
import re
import subprocess
from letsencrypt import errors
logger = logging.getLogger(__name__)
class ApacheParser(object):
"""Class handles the fine details of parsing the Apache Configuration.
.. todo:: Make parsing general... remove sites-available etc...
:ivar str root: Normalized absolute path to the server root
directory. Without trailing slash.
:ivar str root: Server root
:ivar set modules: All module names that are currently enabled.
:ivar dict loc: Location to place directives, root - configuration origin,
default - user config file, name - NameVirtualHost,
"""
arg_var_interpreter = re.compile(r"\$\{[^ \}]*}")
fnmatch_chars = set(["*", "?", "\\", "[", "]"])
def __init__(self, aug, root, ctl):
# Note: Order is important here.
# This uses the binary, so it can be done first.
# https://httpd.apache.org/docs/2.4/mod/core.html#define
# https://httpd.apache.org/docs/2.4/mod/core.html#ifdefine
# This only handles invocation parameters and Define directives!
self.variables = {}
self.update_runtime_variables(ctl)
self.aug = aug
# Find configuration root and make sure augeas can parse it.
self.root = os.path.abspath(root)
self.loc = {"root": self._find_config_root()}
self._parse_file(self.loc["root"])
# This problem has been fixed in Augeas 1.0
self.standardize_excl()
# Temporarily set modules to be empty, so that find_dirs can work
# https://httpd.apache.org/docs/2.4/mod/core.html#ifmodule
# This needs to come before locations are set.
self.modules = set()
self.init_modules()
# Set up rest of locations
self.loc.update(self._set_locations())
# Must also attempt to parse sites-available or equivalent
# Sites-available is not included naturally in configuration
self._parse_file(os.path.join(self.root, "sites-available") + "/*")
def init_modules(self):
"""Iterates on the configuration until no new modules are loaded.
..todo:: This should be attempted to be done with a binary to avoid
the iteration issue. Else... parse and enable mods at same time.
"""
# Since modules are being initiated... clear existing set.
self.modules = set()
matches = self.find_dir("LoadModule")
iterator = iter(matches)
# Make sure prev_size != cur_size for do: while: iteration
prev_size = -1
while len(self.modules) != prev_size:
prev_size = len(self.modules)
for match_name, match_filename in itertools.izip(
iterator, iterator):
self.modules.add(self.get_arg(match_name))
self.modules.add(
os.path.basename(self.get_arg(match_filename))[:-2] + "c")
def update_runtime_variables(self, ctl):
""""
.. note:: Compile time variables (apache2ctl -V) are not used within the
dynamic configuration files. These should not be parsed or
interpreted.
.. todo:: Create separate compile time variables... simply for arg_get()
"""
stdout = self._get_runtime_cfg(ctl)
variables = dict()
matches = re.compile(r"Define: ([^ \n]*)").findall(stdout)
try:
matches.remove("DUMP_RUN_CFG")
except ValueError:
raise errors.PluginError("Unable to parse runtime variables")
for match in matches:
if match.count("=") > 1:
logger.error("Unexpected number of equal signs in "
"apache2ctl -D DUMP_RUN_CFG")
raise errors.PluginError(
"Error parsing Apache runtime variables")
parts = match.partition("=")
variables[parts[0]] = parts[2]
self.variables = variables
def _get_runtime_cfg(self, ctl): # pylint: disable=no-self-use
"""Get runtime configuration info.
:returns: stdout from DUMP_RUN_CFG
"""
try:
proc = subprocess.Popen(
[ctl, "-D", "DUMP_RUN_CFG"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
except (OSError, ValueError):
logger.error(
"Error accessing %s for runtime parameters!%s", ctl, os.linesep)
raise errors.MisconfigurationError(
"Error accessing loaded Apache parameters: %s", ctl)
# Small errors that do not impede
if proc.returncode != 0:
logger.warn("Error in checking parameter list: %s", stderr)
raise errors.MisconfigurationError(
"Apache is unable to check whether or not the module is "
"loaded because Apache is misconfigured.")
return stdout
def filter_args_num(self, matches, args): # pylint: disable=no-self-use
"""Filter out directives with specific number of arguments.
This function makes the assumption that all related arguments are given
in order. Thus /files/apache/directive[5]/arg[2] must come immediately
after /files/apache/directive[5]/arg[1]. Runs in 1 linear pass.
:param string matches: Matches of all directives with arg nodes
:param int args: Number of args you would like to filter
:returns: List of directives that contain # of arguments.
(arg is stripped off)
"""
filtered = []
if args == 1:
for i in range(len(matches)):
if matches[i].endswith("/arg"):
filtered.append(matches[i][:-4])
else:
for i in range(len(matches)):
if matches[i].endswith("/arg[%d]" % args):
# Make sure we don't cause an IndexError (end of list)
# Check to make sure arg + 1 doesn't exist
if (i == (len(matches) - 1) or
not matches[i + 1].endswith("/arg[%d]" % (args + 1))):
filtered.append(matches[i][:-len("/arg[%d]" % args)])
return filtered
def add_dir_to_ifmodssl(self, aug_conf_path, directive, args):
"""Adds directive and value to IfMod ssl block.
Adds given directive and value along configuration path within
an IfMod mod_ssl.c block. If the IfMod block does not exist in
the file, it is created.
:param str aug_conf_path: Desired Augeas config path to add directive
:param str directive: Directive you would like to add, e.g. Listen
:param args: Values of the directive; str "443" or list of str
:type args: list
"""
# TODO: Add error checking code... does the path given even exist?
# Does it throw exceptions?
if_mod_path = self._get_ifmod(aug_conf_path, "mod_ssl.c")
# IfModule can have only one valid argument, so append after
self.aug.insert(if_mod_path + "arg", "directive", False)
nvh_path = if_mod_path + "directive[1]"
self.aug.set(nvh_path, directive)
if len(args) == 1:
self.aug.set(nvh_path + "/arg", args[0])
else:
for i, arg in enumerate(args):
self.aug.set("%s/arg[%d]" % (nvh_path, i+1), arg)
def _get_ifmod(self, aug_conf_path, mod):
"""Returns the path to <IfMod mod> and creates one if it doesn't exist.
:param str aug_conf_path: Augeas configuration path
:param str mod: module ie. mod_ssl.c
"""
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
if len(if_mods) == 0:
self.aug.set("%s/IfModule[last() + 1]" % aug_conf_path, "")
self.aug.set("%s/IfModule[last()]/arg" % aug_conf_path, mod)
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
# Strip off "arg" at end of first ifmod path
return if_mods[0][:len(if_mods[0]) - 3]
def add_dir(self, aug_conf_path, directive, args):
"""Appends directive to the end fo the file given by aug_conf_path.
.. note:: Not added to AugeasConfigurator because it may depend
on the lens
:param str aug_conf_path: Augeas configuration path to add directive
:param str directive: Directive to add
:param args: Value of the directive. ie. Listen 443, 443 is arg
:type args: list or str
"""
self.aug.set(aug_conf_path + "/directive[last() + 1]", directive)
if isinstance(args, list):
for i, value in enumerate(args, 1):
self.aug.set(
"%s/directive[last()]/arg[%d]" % (aug_conf_path, i), value)
else:
self.aug.set(aug_conf_path + "/directive[last()]/arg", args)
def find_dir(self, directive, arg=None, start=None, exclude=True):
"""Finds directive in the configuration.
Recursively searches through config files to find directives
Directives should be in the form of a case insensitive regex currently
.. todo:: arg should probably be a list
Note: Augeas is inherently case sensitive while Apache is case
insensitive. Augeas 1.0 allows case insensitive regexes like
regexp(/Listen/, "i"), however the version currently supported
by Ubuntu 0.10 does not. Thus I have included my own case insensitive
transformation by calling case_i() on everything to maintain
compatibility.
:param str directive: Directive to look for
:param arg: Specific value directive must have, None if all should
be considered
:type arg: str or None
:param str start: Beginning Augeas path to begin looking
:param bool exclude: Whether or not to exclude directives based on
variables and enabled modules
"""
# Cannot place member variable in the definition of the function so...
if not start:
start = get_aug_path(self.loc["root"])
# No regexp code
# if arg is None:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive + "']/arg")
# else:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive +
# "']/* [self::arg='" + arg + "']")
# includes = self.aug.match(start +
# "//* [self::directive='Include']/* [label()='arg']")
regex = "(%s)|(%s)|(%s)" % (case_i(directive),
case_i("Include"),
case_i("IncludeOptional"))
matches = self.aug.match(
"%s//*[self::directive=~regexp('%s')]" % (start, regex))
if exclude:
matches = self._exclude_dirs(matches)
if arg is None:
arg_suffix = "/arg"
else:
arg_suffix = "/*[self::arg=~regexp('%s')]" % case_i(arg)
ordered_matches = []
# TODO: Wildcards should be included in alphabetical order
# https://httpd.apache.org/docs/2.4/mod/core.html#include
for match in matches:
dir_ = self.aug.get(match).lower()
if dir_ == "include" or dir_ == "includeoptional":
# start[6:] to strip off /files
#print self._get_include_path(self.get_arg(match +"/arg")), directive, arg
ordered_matches.extend(self.find_dir(
directive, arg,
self._get_include_path(self.get_arg(match + "/arg")),
exclude))
# This additionally allows Include
if dir_ == directive.lower():
ordered_matches.extend(self.aug.match(match + arg_suffix))
return ordered_matches
def get_arg(self, match):
"""Uses augeas.get to get argument value and interprets result.
This also converts all variables and parameters appropriately.
"""
value = self.aug.get(match)
variables = ApacheParser.arg_var_interpreter.findall(value)
for var in variables:
# Strip off ${ and }
try:
value = value.replace(var, self.variables[var[2:-1]])
except KeyError:
raise errors.PluginError("Error Parsing variable: %s" % var)
return value
def _exclude_dirs(self, matches):
"""Exclude directives that are not loaded into the configuration."""
filters = [("ifmodule", self.modules), ("ifdefine", self.variables)]
valid_matches = []
for match in matches:
for filter_ in filters:
if not self._pass_filter(match, filter_):
break
else:
valid_matches.append(match)
return valid_matches
def _pass_filter(self, match, filter_):
"""Determine if directive passes a filter.
:param str match: Augeas path
:param list filter: list of tuples of form
[("lowercase if directive", set of relevant parameters)]
"""
match_l = match.lower()
last_match_idx = match_l.find(filter_[0])
while last_match_idx != -1:
# Check args
end_of_if = match_l.find("/", last_match_idx)
# This should be aug.get (vars are not used e.g. parser.aug_get)
expression = self.aug.get(match[:end_of_if] + "/arg")
if expression.startswith("!"):
# Strip off "!"
if expression[1:] in filter_[1]:
return False
else:
if expression not in filter_[1]:
return False
last_match_idx = match_l.find(filter_[0], end_of_if)
return True
def _get_include_path(self, arg):
"""Converts an Apache Include directive into Augeas path.
Converts an Apache Include directive argument into an Augeas
searchable path
.. todo:: convert to use os.path.join()
:param str arg: Argument of Include directive
:returns: Augeas path string
:rtype: str
"""
# Check to make sure only expected characters are used <- maybe remove
# validChars = re.compile("[a-zA-Z0-9.*?_-/]*")
# matchObj = validChars.match(arg)
# if matchObj.group() != arg:
# logger.error("Error: Invalid regexp characters in %s", arg)
# return []
# Standardize the include argument based on server root
if not arg.startswith("/"):
# Normpath will condense ../
arg = os.path.normpath(os.path.join(self.root, arg))
# Attempts to add a transform to the file if one does not already exist
if os.path.isdir(arg):
self._parse_file(os.path.join(arg, "*"))
else:
self._parse_file(arg)
# Argument represents an fnmatch regular expression, convert it
# Split up the path and convert each into an Augeas accepted regex
# then reassemble
split_arg = arg.split("/")
for idx, split in enumerate(split_arg):
if any(char in ApacheParser.fnmatch_chars for char in split):
# Turn it into a augeas regex
# TODO: Can this instead be an augeas glob instead of regex
split_arg[idx] = ("* [label()=~regexp('%s')]" %
self.fnmatch_to_re(split))
# Reassemble the argument
# Note: This also normalizes the argument /serverroot/ -> /serverroot
arg = "/".join(split_arg)
return get_aug_path(arg)
def fnmatch_to_re(self, clean_fn_match): # pylint: disable=no-self-use
"""Method converts Apache's basic fnmatch to regular expression.
Assumption - Configs are assumed to be well-formed and only writable by
privileged users.
https://apr.apache.org/docs/apr/2.0/apr__fnmatch_8h_source.html
http://apache2.sourcearchive.com/documentation/2.2.16-6/apr__fnmatch_8h_source.html
:param str clean_fn_match: Apache style filename match, similar to globs
:returns: regex suitable for augeas
:rtype: str
"""
# This strips off final /Z(?ms)
return fnmatch.translate(clean_fn_match)[:-7]
def _parse_file(self, filepath):
"""Parse file with Augeas
Checks to see if file_path is parsed by Augeas
If filepath isn't parsed, the file is added and Augeas is reloaded
:param str filepath: Apache config file path
"""
# Test if augeas included file for Httpd.lens
# Note: This works for augeas globs, ie. *.conf
inc_test = self.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % filepath)
if not inc_test:
# Load up files
# This doesn't seem to work on TravisCI
# self.aug.add_transform("Httpd.lns", [filepath])
self._add_httpd_transform(filepath)
self.aug.load()
def _add_httpd_transform(self, incl):
"""Add a transform to Augeas.
This function will correctly add a transform to augeas
The existing augeas.add_transform in python doesn't seem to work for
Travis CI as it loads in libaugeas.so.0.10.0
:param str incl: filepath to include for transform
"""
last_include = self.aug.match("/augeas/load/Httpd/incl [last()]")
if last_include:
# Insert a new node immediately after the last incl
self.aug.insert(last_include[0], "incl", False)
self.aug.set("/augeas/load/Httpd/incl[last()]", incl)
# On first use... must load lens and add file to incl
else:
# Augeas uses base 1 indexing... insert at beginning...
self.aug.set("/augeas/load/Httpd/lens", "Httpd.lns")
self.aug.set("/augeas/load/Httpd/incl", incl)
def standardize_excl(self):
"""Standardize the excl arguments for the Httpd lens in Augeas.
Note: Hack!
Standardize the excl arguments for the Httpd lens in Augeas
Servers sometimes give incorrect defaults
Note: This problem should be fixed in Augeas 1.0. Unfortunately,
Augeas 0.10 appears to be the most popular version currently.
"""
# attempt to protect against augeas error in 0.10.0 - ubuntu
# *.augsave -> /*.augsave upon augeas.load()
# Try to avoid bad httpd files
# There has to be a better way... but after a day and a half of testing
# I had no luck
# This is a hack... work around... submit to augeas if still not fixed
excl = ["*.augnew", "*.augsave", "*.dpkg-dist", "*.dpkg-bak",
"*.dpkg-new", "*.dpkg-old", "*.rpmsave", "*.rpmnew",
"*~",
self.root + "/*.augsave",
self.root + "/*~",
self.root + "/*/*augsave",
self.root + "/*/*~",
self.root + "/*/*/*.augsave",
self.root + "/*/*/*~"]
for i, excluded in enumerate(excl, 1):
self.aug.set("/augeas/load/Httpd/excl[%d]" % i, excluded)
self.aug.load()
def _set_locations(self):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
default = self._set_user_config_file()
temp = os.path.join(self.root, "ports.conf")
if os.path.isfile(temp):
listen = temp
name = temp
else:
listen = default
name = default
return {"default": default, "listen": listen, "name": name}
def _find_config_root(self):
"""Find the Apache Configuration Root file."""
location = ["apache2.conf", "httpd.conf"]
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError("Could not find configuration root")
def _set_user_config_file(self):
"""Set the appropriate user configuration file
.. todo:: This will have to be updated for other distros versions
:param str root: pathname which contains the user config
"""
# Basic check to see if httpd.conf exists and
# in hierarchy via direct include
# httpd.conf was very common as a user file in Apache 2.2
if (os.path.isfile(os.path.join(self.root, "httpd.conf")) and
self.find_dir("Include", "httpd.conf", self.loc["root"])):
return os.path.join(self.root, "httpd.conf")
else:
return os.path.join(self.root, "apache2.conf")
def case_i(string):
"""Returns case insensitive regex.
Returns a sloppy, but necessary version of a case insensitive regex.
Any string should be able to be submitted and the string is
escaped and then made case insensitive.
May be replaced by a more proper /i once augeas 1.0 is widely
supported.
:param str string: string to make case i regex
"""
return "".join(["["+c.upper()+c.lower()+"]"
if c.isalpha() else c for c in re.escape(string)])
def get_aug_path(file_path):
"""Return augeas path for full filepath.
:param str file_path: Full filepath
"""
return "/files%s" % file_path
| {
"repo_name": "ahojjati/letsencrypt",
"path": "letsencrypt-apache/letsencrypt_apache/parser.py",
"copies": "7",
"size": "21847",
"license": "apache-2.0",
"hash": -2344342698961028000,
"line_mean": 36.6024096386,
"line_max": 91,
"alpha_frac": 0.5760974047,
"autogenerated": false,
"ratio": 4.0919647874133735,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001809777301777481,
"num_lines": 581
} |
"""ApacheParser is a member object of the ApacheConfigurator class."""
import os
import re
from letsencrypt import errors
class ApacheParser(object):
"""Class handles the fine details of parsing the Apache Configuration.
:ivar str root: Normalized abosulte path to the server root
directory. Without trailing slash.
"""
def __init__(self, aug, root, ssl_options):
# Find configuration root and make sure augeas can parse it.
self.aug = aug
self.root = os.path.abspath(root)
self.loc = self._set_locations(ssl_options)
self._parse_file(self.loc["root"])
# Must also attempt to parse sites-available or equivalent
# Sites-available is not included naturally in configuration
self._parse_file(os.path.join(self.root, "sites-available") + "/*")
# This problem has been fixed in Augeas 1.0
self.standardize_excl()
def add_dir_to_ifmodssl(self, aug_conf_path, directive, val):
"""Adds directive and value to IfMod ssl block.
Adds given directive and value along configuration path within
an IfMod mod_ssl.c block. If the IfMod block does not exist in
the file, it is created.
:param str aug_conf_path: Desired Augeas config path to add directive
:param str directive: Directive you would like to add
:param str val: Value of directive ie. Listen 443, 443 is the value
"""
# TODO: Add error checking code... does the path given even exist?
# Does it throw exceptions?
if_mod_path = self._get_ifmod(aug_conf_path, "mod_ssl.c")
# IfModule can have only one valid argument, so append after
self.aug.insert(if_mod_path + "arg", "directive", False)
nvh_path = if_mod_path + "directive[1]"
self.aug.set(nvh_path, directive)
self.aug.set(nvh_path + "/arg", val)
def _get_ifmod(self, aug_conf_path, mod):
"""Returns the path to <IfMod mod> and creates one if it doesn't exist.
:param str aug_conf_path: Augeas configuration path
:param str mod: module ie. mod_ssl.c
"""
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
if len(if_mods) == 0:
self.aug.set("%s/IfModule[last() + 1]" % aug_conf_path, "")
self.aug.set("%s/IfModule[last()]/arg" % aug_conf_path, mod)
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
# Strip off "arg" at end of first ifmod path
return if_mods[0][:len(if_mods[0]) - 3]
def add_dir(self, aug_conf_path, directive, arg):
"""Appends directive to the end fo the file given by aug_conf_path.
.. note:: Not added to AugeasConfigurator because it may depend
on the lens
:param str aug_conf_path: Augeas configuration path to add directive
:param str directive: Directive to add
:param str arg: Value of the directive. ie. Listen 443, 443 is arg
"""
self.aug.set(aug_conf_path + "/directive[last() + 1]", directive)
if isinstance(arg, list):
for i, value in enumerate(arg, 1):
self.aug.set(
"%s/directive[last()]/arg[%d]" % (aug_conf_path, i), value)
else:
self.aug.set(aug_conf_path + "/directive[last()]/arg", arg)
def find_dir(self, directive, arg=None, start=None):
"""Finds directive in the configuration.
Recursively searches through config files to find directives
Directives should be in the form of a case insensitive regex currently
.. todo:: Add order to directives returned. Last directive comes last..
.. todo:: arg should probably be a list
Note: Augeas is inherently case sensitive while Apache is case
insensitive. Augeas 1.0 allows case insensitive regexes like
regexp(/Listen/, "i"), however the version currently supported
by Ubuntu 0.10 does not. Thus I have included my own case insensitive
transformation by calling case_i() on everything to maintain
compatibility.
:param str directive: Directive to look for
:param arg: Specific value directive must have, None if all should
be considered
:type arg: str or None
:param str start: Beginning Augeas path to begin looking
"""
# Cannot place member variable in the definition of the function so...
if not start:
start = get_aug_path(self.loc["root"])
# Debug code
# print "find_dir:", directive, "arg:", arg, " | Looking in:", start
# No regexp code
# if arg is None:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive + "']/arg")
# else:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive +
# "']/* [self::arg='" + arg + "']")
# includes = self.aug.match(start +
# "//* [self::directive='Include']/* [label()='arg']")
if arg is None:
matches = self.aug.match(("%s//*[self::directive=~regexp('%s')]/arg"
% (start, directive)))
else:
matches = self.aug.match(("%s//*[self::directive=~regexp('%s')]/*"
"[self::arg=~regexp('%s')]" %
(start, directive, arg)))
incl_regex = "(%s)|(%s)" % (case_i('Include'),
case_i('IncludeOptional'))
includes = self.aug.match(("%s//* [self::directive=~regexp('%s')]/* "
"[label()='arg']" % (start, incl_regex)))
# for inc in includes:
# print inc, self.aug.get(inc)
for include in includes:
# start[6:] to strip off /files
matches.extend(self.find_dir(
directive, arg, self._get_include_path(
strip_dir(start[6:]), self.aug.get(include))))
return matches
def _get_include_path(self, cur_dir, arg):
"""Converts an Apache Include directive into Augeas path.
Converts an Apache Include directive argument into an Augeas
searchable path
.. todo:: convert to use os.path.join()
:param str cur_dir: current working directory
:param str arg: Argument of Include directive
:returns: Augeas path string
:rtype: str
"""
# Sanity check argument - maybe
# Question: what can the attacker do with control over this string
# Effect parse file... maybe exploit unknown errors in Augeas
# If the attacker can Include anything though... and this function
# only operates on Apache real config data... then the attacker has
# already won.
# Perhaps it is better to simply check the permissions on all
# included files?
# check_config to validate apache config doesn't work because it
# would create a race condition between the check and this input
# TODO: Maybe... although I am convinced we have lost if
# Apache files can't be trusted. The augeas include path
# should be made to be exact.
# Check to make sure only expected characters are used <- maybe remove
# validChars = re.compile("[a-zA-Z0-9.*?_-/]*")
# matchObj = validChars.match(arg)
# if matchObj.group() != arg:
# logging.error("Error: Invalid regexp characters in %s", arg)
# return []
# Standardize the include argument based on server root
if not arg.startswith("/"):
arg = cur_dir + arg
# conf/ is a special variable for ServerRoot in Apache
elif arg.startswith("conf/"):
arg = self.root + arg[4:]
# TODO: Test if Apache allows ../ or ~/ for Includes
# Attempts to add a transform to the file if one does not already exist
self._parse_file(arg)
# Argument represents an fnmatch regular expression, convert it
# Split up the path and convert each into an Augeas accepted regex
# then reassemble
if "*" in arg or "?" in arg:
split_arg = arg.split("/")
for idx, split in enumerate(split_arg):
# * and ? are the two special fnmatch characters
if "*" in split or "?" in split:
# Turn it into a augeas regex
# TODO: Can this instead be an augeas glob instead of regex
split_arg[idx] = ("* [label()=~regexp('%s')]" %
self.fnmatch_to_re(split))
# Reassemble the argument
arg = "/".join(split_arg)
# If the include is a directory, just return the directory as a file
if arg.endswith("/"):
return get_aug_path(arg[:len(arg)-1])
return get_aug_path(arg)
def fnmatch_to_re(self, clean_fn_match): # pylint: disable=no-self-use
"""Method converts Apache's basic fnmatch to regular expression.
:param str clean_fn_match: Apache style filename match, similar to globs
:returns: regex suitable for augeas
:rtype: str
"""
# Checkout fnmatch.py in venv/local/lib/python2.7/fnmatch.py
regex = ""
for letter in clean_fn_match:
if letter == '.':
regex = regex + r"\."
elif letter == '*':
regex = regex + ".*"
# According to apache.org ? shouldn't appear
# but in case it is valid...
elif letter == '?':
regex = regex + "."
else:
regex = regex + letter
return regex
def _parse_file(self, filepath):
"""Parse file with Augeas
Checks to see if file_path is parsed by Augeas
If filepath isn't parsed, the file is added and Augeas is reloaded
:param str filepath: Apache config file path
"""
# Test if augeas included file for Httpd.lens
# Note: This works for augeas globs, ie. *.conf
inc_test = self.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % filepath)
if not inc_test:
# Load up files
# This doesn't seem to work on TravisCI
# self.aug.add_transform("Httpd.lns", [filepath])
self._add_httpd_transform(filepath)
self.aug.load()
def _add_httpd_transform(self, incl):
"""Add a transform to Augeas.
This function will correctly add a transform to augeas
The existing augeas.add_transform in python doesn't seem to work for
Travis CI as it loads in libaugeas.so.0.10.0
:param str incl: filepath to include for transform
"""
last_include = self.aug.match("/augeas/load/Httpd/incl [last()]")
if last_include:
# Insert a new node immediately after the last incl
self.aug.insert(last_include[0], "incl", False)
self.aug.set("/augeas/load/Httpd/incl[last()]", incl)
# On first use... must load lens and add file to incl
else:
# Augeas uses base 1 indexing... insert at beginning...
self.aug.set("/augeas/load/Httpd/lens", "Httpd.lns")
self.aug.set("/augeas/load/Httpd/incl", incl)
def standardize_excl(self):
"""Standardize the excl arguments for the Httpd lens in Augeas.
Note: Hack!
Standardize the excl arguments for the Httpd lens in Augeas
Servers sometimes give incorrect defaults
Note: This problem should be fixed in Augeas 1.0. Unfortunately,
Augeas 0.10 appears to be the most popular version currently.
"""
# attempt to protect against augeas error in 0.10.0 - ubuntu
# *.augsave -> /*.augsave upon augeas.load()
# Try to avoid bad httpd files
# There has to be a better way... but after a day and a half of testing
# I had no luck
# This is a hack... work around... submit to augeas if still not fixed
excl = ["*.augnew", "*.augsave", "*.dpkg-dist", "*.dpkg-bak",
"*.dpkg-new", "*.dpkg-old", "*.rpmsave", "*.rpmnew",
"*~",
self.root + "/*.augsave",
self.root + "/*~",
self.root + "/*/*augsave",
self.root + "/*/*~",
self.root + "/*/*/*.augsave",
self.root + "/*/*/*~"]
for i, excluded in enumerate(excl, 1):
self.aug.set("/augeas/load/Httpd/excl[%d]" % i, excluded)
self.aug.load()
def _set_locations(self, ssl_options):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
root = self._find_config_root()
default = self._set_user_config_file(root)
temp = os.path.join(self.root, "ports.conf")
if os.path.isfile(temp):
listen = temp
name = temp
else:
listen = default
name = default
return {"root": root, "default": default, "listen": listen,
"name": name, "ssl_options": ssl_options}
def _find_config_root(self):
"""Find the Apache Configuration Root file."""
location = ["apache2.conf", "httpd.conf"]
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.LetsEncryptNoInstallationError(
"Could not find configuration root")
def _set_user_config_file(self, root):
"""Set the appropriate user configuration file
.. todo:: This will have to be updated for other distros versions
:param str root: pathname which contains the user config
"""
# Basic check to see if httpd.conf exists and
# in hierarchy via direct include
# httpd.conf was very common as a user file in Apache 2.2
if (os.path.isfile(os.path.join(self.root, 'httpd.conf')) and
self.find_dir(
case_i("Include"), case_i("httpd.conf"), root)):
return os.path.join(self.root, 'httpd.conf')
else:
return os.path.join(self.root, 'apache2.conf')
def case_i(string):
"""Returns case insensitive regex.
Returns a sloppy, but necessary version of a case insensitive regex.
Any string should be able to be submitted and the string is
escaped and then made case insensitive.
May be replaced by a more proper /i once augeas 1.0 is widely
supported.
:param str string: string to make case i regex
"""
return "".join(["["+c.upper()+c.lower()+"]"
if c.isalpha() else c for c in re.escape(string)])
def get_aug_path(file_path):
"""Return augeas path for full filepath.
:param str file_path: Full filepath
"""
return "/files%s" % file_path
def strip_dir(path):
"""Returns directory of file path.
.. todo:: Replace this with Python standard function
:param str path: path is a file path. not an augeas section or
directive path
:returns: directory
:rtype: str
"""
index = path.rfind("/")
if index > 0:
return path[:index+1]
# No directory
return ""
| {
"repo_name": "digideskio/lets-encrypt-preview",
"path": "letsencrypt_apache/parser.py",
"copies": "2",
"size": "15605",
"license": "apache-2.0",
"hash": 3979848765111191600,
"line_mean": 36.784503632,
"line_max": 80,
"alpha_frac": 0.5738545338,
"autogenerated": false,
"ratio": 4.046939834024896,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5620794367824896,
"avg_score": null,
"num_lines": null
} |
"""Apache plugin constants."""
import pkg_resources
from letsencrypt import le_util
CLI_DEFAULTS_DEBIAN = dict(
server_root="/etc/apache2",
vhost_root="/etc/apache2/sites-available",
vhost_files="*",
version_cmd=['apache2ctl', '-v'],
define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],
restart_cmd=['apache2ctl', 'graceful'],
conftest_cmd=['apache2ctl', 'configtest'],
enmod="a2enmod",
dismod="a2dismod",
le_vhost_ext="-le-ssl.conf",
handle_mods=True,
handle_sites=True,
challenge_location="/etc/apache2",
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
"letsencrypt_apache", "options-ssl-apache.conf")
)
CLI_DEFAULTS_CENTOS = dict(
server_root="/etc/httpd",
vhost_root="/etc/httpd/conf.d",
vhost_files="*.conf",
version_cmd=['apachectl', '-v'],
define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'],
restart_cmd=['apachectl', 'graceful'],
conftest_cmd=['apachectl', 'configtest'],
enmod=None,
dismod=None,
le_vhost_ext="-le-ssl.conf",
handle_mods=False,
handle_sites=False,
challenge_location="/etc/httpd/conf.d",
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
"letsencrypt_apache", "centos-options-ssl-apache.conf")
)
CLI_DEFAULTS_GENTOO = dict(
server_root="/etc/apache2",
vhost_root="/etc/apache2/vhosts.d",
vhost_files="*.conf",
version_cmd=['/usr/sbin/apache2', '-v'],
define_cmd=['apache2ctl', 'virtualhosts'],
restart_cmd=['apache2ctl', 'graceful'],
conftest_cmd=['apache2ctl', 'configtest'],
enmod=None,
dismod=None,
le_vhost_ext="-le-ssl.conf",
handle_mods=False,
handle_sites=False,
challenge_location="/etc/apache2/vhosts.d",
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
"letsencrypt_apache", "options-ssl-apache.conf")
)
CLI_DEFAULTS_DARWIN = dict(
server_root="/etc/apache2",
vhost_root="/etc/apache2/other",
vhost_files="*.conf",
version_cmd=['/usr/sbin/httpd', '-v'],
define_cmd=['/usr/sbin/httpd', '-t', '-D', 'DUMP_RUN_CFG'],
restart_cmd=['apachectl', 'graceful'],
conftest_cmd=['apachectl', 'configtest'],
enmod=None,
dismod=None,
le_vhost_ext="-le-ssl.conf",
handle_mods=False,
handle_sites=False,
challenge_location="/etc/apache2/other",
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
"letsencrypt_apache", "options-ssl-apache.conf")
)
CLI_DEFAULTS = {
"debian": CLI_DEFAULTS_DEBIAN,
"ubuntu": CLI_DEFAULTS_DEBIAN,
"centos": CLI_DEFAULTS_CENTOS,
"centos linux": CLI_DEFAULTS_CENTOS,
"fedora": CLI_DEFAULTS_CENTOS,
"red hat enterprise linux server": CLI_DEFAULTS_CENTOS,
"gentoo base system": CLI_DEFAULTS_GENTOO,
"darwin": CLI_DEFAULTS_DARWIN,
}
"""CLI defaults."""
MOD_SSL_CONF_DEST = "options-ssl-apache.conf"
"""Name of the mod_ssl config file as saved in `IConfig.config_dir`."""
AUGEAS_LENS_DIR = pkg_resources.resource_filename(
"letsencrypt_apache", "augeas_lens")
"""Path to the Augeas lens directory"""
REWRITE_HTTPS_ARGS = [
"^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,QSA,R=permanent]"]
"""Apache version<2.3.9 rewrite rule arguments used for redirections to
https vhost"""
REWRITE_HTTPS_ARGS_WITH_END = [
"^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[END,QSA,R=permanent]"]
"""Apache version >= 2.3.9 rewrite rule arguments used for redirections to
https vhost"""
HSTS_ARGS = ["always", "set", "Strict-Transport-Security",
"\"max-age=31536000\""]
"""Apache header arguments for HSTS"""
UIR_ARGS = ["always", "set", "Content-Security-Policy",
"upgrade-insecure-requests"]
HEADER_ARGS = {"Strict-Transport-Security": HSTS_ARGS,
"Upgrade-Insecure-Requests": UIR_ARGS}
def os_constant(key):
"""Get a constant value for operating system
:param key: name of cli constant
:return: value of constant for active os
"""
os_info = le_util.get_os_info()
try:
constants = CLI_DEFAULTS[os_info[0].lower()]
except KeyError:
constants = CLI_DEFAULTS["debian"]
return constants[key]
| {
"repo_name": "mitnk/letsencrypt",
"path": "letsencrypt-apache/letsencrypt_apache/constants.py",
"copies": "2",
"size": "4123",
"license": "apache-2.0",
"hash": -9007105534221981000,
"line_mean": 32.25,
"line_max": 74,
"alpha_frac": 0.6458889158,
"autogenerated": false,
"ratio": 3.1282245827010624,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9774113498501062,
"avg_score": 0,
"num_lines": 124
} |
"""Apache plugin constants."""
import pkg_resources
CLI_DEFAULTS = dict(
server_root="/etc/apache2",
ctl="apache2ctl",
enmod="a2enmod",
dismod="a2dismod",
init_script="/etc/init.d/apache2",
le_vhost_ext="-le-ssl.conf",
)
"""CLI defaults."""
MOD_SSL_CONF_DEST = "options-ssl-apache.conf"
"""Name of the mod_ssl config file as saved in `IConfig.config_dir`."""
MOD_SSL_CONF_SRC = pkg_resources.resource_filename(
"letsencrypt_apache", "options-ssl-apache.conf")
"""Path to the Apache mod_ssl config file found in the Let's Encrypt
distribution."""
AUGEAS_LENS_DIR = pkg_resources.resource_filename(
"letsencrypt_apache", "augeas_lens")
"""Path to the Augeas lens directory"""
REWRITE_HTTPS_ARGS = [
"^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,QSA,R=permanent]"]
"""Apache rewrite rule arguments used for redirections to https vhost"""
HSTS_ARGS = ["always", "set", "Strict-Transport-Security",
"\"max-age=31536000; includeSubDomains\""]
"""Apache header arguments for HSTS"""
UIR_ARGS = ["always", "set", "Content-Security-Policy",
"upgrade-insecure-requests"]
HEADER_ARGS = {"Strict-Transport-Security": HSTS_ARGS,
"Upgrade-Insecure-Requests": UIR_ARGS}
| {
"repo_name": "sjerdo/letsencrypt",
"path": "letsencrypt-apache/letsencrypt_apache/constants.py",
"copies": "1",
"size": "1221",
"license": "apache-2.0",
"hash": -551372323606391230,
"line_mean": 28.7804878049,
"line_max": 72,
"alpha_frac": 0.6814086814,
"autogenerated": false,
"ratio": 3.1550387596899223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43364474410899223,
"avg_score": null,
"num_lines": null
} |
"""Apache plugin constants."""
import pkg_resources
CLI_DEFAULTS = dict(
server_root="/etc/apache2",
ctl="apache2ctl",
enmod="a2enmod",
dismod="a2dismod",
le_vhost_ext="-le-ssl.conf",
)
"""CLI defaults."""
MOD_SSL_CONF_DEST = "options-ssl-apache.conf"
"""Name of the mod_ssl config file as saved in `IConfig.config_dir`."""
MOD_SSL_CONF_SRC = pkg_resources.resource_filename(
"letsencrypt_apache", "options-ssl-apache.conf")
"""Path to the Apache mod_ssl config file found in the Let's Encrypt
distribution."""
AUGEAS_LENS_DIR = pkg_resources.resource_filename(
"letsencrypt_apache", "augeas_lens")
"""Path to the Augeas lens directory"""
REWRITE_HTTPS_ARGS = [
"^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,QSA,R=permanent]"]
"""Apache rewrite rule arguments used for redirections to https vhost"""
HSTS_ARGS = ["always", "set", "Strict-Transport-Security",
"\"max-age=31536000; includeSubDomains\""]
"""Apache header arguments for HSTS"""
UIR_ARGS = ["always", "set", "Content-Security-Policy",
"upgrade-insecure-requests"]
HEADER_ARGS = {"Strict-Transport-Security": HSTS_ARGS,
"Upgrade-Insecure-Requests": UIR_ARGS}
| {
"repo_name": "Sveder/letsencrypt",
"path": "letsencrypt-apache/letsencrypt_apache/constants.py",
"copies": "6",
"size": "1182",
"license": "apache-2.0",
"hash": 233862619065675070,
"line_mean": 28.55,
"line_max": 72,
"alpha_frac": 0.6827411168,
"autogenerated": false,
"ratio": 3.168900804289544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6851641921089543,
"avg_score": null,
"num_lines": null
} |
"""Apache plugin constants."""
import pkg_resources
MOD_SSL_CONF_DEST = "options-ssl-apache.conf"
"""Name of the mod_ssl config file as saved in `IConfig.config_dir`."""
UPDATED_MOD_SSL_CONF_DIGEST = ".updated-options-ssl-apache-conf-digest.txt"
"""Name of the hash of the updated or informed mod_ssl_conf as saved in `IConfig.config_dir`."""
ALL_SSL_OPTIONS_HASHES = [
'2086bca02db48daf93468332543c60ac6acdb6f0b58c7bfdf578a5d47092f82a',
'4844d36c9a0f587172d9fa10f4f1c9518e3bcfa1947379f155e16a70a728c21a',
'5a922826719981c0a234b1fbcd495f3213e49d2519e845ea0748ba513044b65b',
'4066b90268c03c9ba0201068eaa39abbc02acf9558bb45a788b630eb85dadf27',
'f175e2e7c673bd88d0aff8220735f385f916142c44aa83b09f1df88dd4767a88',
'cfdd7c18d2025836ea3307399f509cfb1ebf2612c87dd600a65da2a8e2f2797b',
'80720bd171ccdc2e6b917ded340defae66919e4624962396b992b7218a561791',
'c0c022ea6b8a51ecc8f1003d0a04af6c3f2bc1c3ce506b3c2dfc1f11ef931082',
]
"""SHA256 hashes of the contents of previous versions of all versions of MOD_SSL_CONF_SRC"""
AUGEAS_LENS_DIR = pkg_resources.resource_filename(
"certbot_apache", "augeas_lens")
"""Path to the Augeas lens directory"""
REWRITE_HTTPS_ARGS = [
"^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,NE,R=permanent]"]
"""Apache version<2.3.9 rewrite rule arguments used for redirections to
https vhost"""
REWRITE_HTTPS_ARGS_WITH_END = [
"^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[END,NE,R=permanent]"]
"""Apache version >= 2.3.9 rewrite rule arguments used for redirections to
https vhost"""
OLD_REWRITE_HTTPS_ARGS = [
["^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,QSA,R=permanent]"],
["^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[END,QSA,R=permanent]"]]
HSTS_ARGS = ["always", "set", "Strict-Transport-Security",
"\"max-age=31536000\""]
"""Apache header arguments for HSTS"""
UIR_ARGS = ["always", "set", "Content-Security-Policy",
"upgrade-insecure-requests"]
HEADER_ARGS = {"Strict-Transport-Security": HSTS_ARGS,
"Upgrade-Insecure-Requests": UIR_ARGS}
AUTOHSTS_STEPS = [60, 300, 900, 3600, 21600, 43200, 86400]
"""AutoHSTS increase steps: 1min, 5min, 15min, 1h, 6h, 12h, 24h"""
AUTOHSTS_PERMANENT = 31536000
"""Value for the last max-age of HSTS"""
AUTOHSTS_FREQ = 172800
"""Minimum time since last increase to perform a new one: 48h"""
MANAGED_COMMENT = "DO NOT REMOVE - Managed by Certbot"
MANAGED_COMMENT_ID = MANAGED_COMMENT+", VirtualHost id: {0}"
"""Managed by Certbot comments and the VirtualHost identification template"""
| {
"repo_name": "letsencrypt/letsencrypt",
"path": "certbot-apache/certbot_apache/constants.py",
"copies": "1",
"size": "2562",
"license": "apache-2.0",
"hash": -6849753915097495000,
"line_mean": 39.6666666667,
"line_max": 96,
"alpha_frac": 0.725214676,
"autogenerated": false,
"ratio": 2.633093525179856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8856636616535208,
"avg_score": 0.00033431692892949965,
"num_lines": 63
} |
'''Apache Sling Shell
#requires poster (http://atlee.ca/software/poster/index.html)
'''
import urllib2
import base64
import json
import subprocess
import os
#import poster
#
#def curl(url, username=None, password=None, method='GET', data=None, file_path=None):
# '''simple HTTP util. similar to curl
# data should be dict.
# if file_path is set, the file is uploaded through multipart encoding.
# when username and password are given, it uses basic auth.'''
#
# if method == 'POST' and data is None:
# #force POST by putting data
# data = {}
#
# print('%s %s %s:%s' % (method, url, username, password))
#
#
# if file_path is not None:
# if data is not None:
# data['*'] = open(file_path, 'rb')
# poster.encode.multipart_encode(
# req = urllib2.Request(url, urllib.urlencode(data) if data is not None else None)
#
# if username is not None and password is not None:
# #use basic auth
# cred = ('%s:%s' % (username, password)).replace('\n', '')
# encoded = base64.encodestring(cred)
# req.add_header('Authorization', 'Basic ' + encoded)
# return urllib2.urlopen(req)
class SlingCurl(object):
def __init__(self, curl='curl', host='localhost', port=8080, username='admin', password='admin', cwd='/'):
self.curl = curl
self.host = host
self.port = port
self.username = username
self.password = password
self.cwd = cwd #current working directory
self.json = None #current json. as a cache.
def _curl_cmd(self, path, props=None, file_path=None):
'''generates curl command line.
returns a list that can be used with subprocess.
@param props is dict. POST params.
@param file_path is for file uploading.'''
cmd = [self.curl, '-f', '-s', '-u', "%s:%s" % (self.username, self.password)]
if props is None:
props = {}
if file_path is not None:
cmd.append('-T')
cmd.append(file_path)
else:
for k,v in props.iteritems():
cmd.append('-F')
cmd.append("%s=%s" % (k,v))
cmd.append('http://%s:%d%s' % (self.host, self.port, path))
return cmd
def get_json(self, path=None, level=1, is_tidy=False):
if path is None:
path = self.cwd
tidy_selector = '.tidy' if is_tidy else ''
cmd = self._curl_cmd('%s%s.%d.json' % (path, tidy_selector, level))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out,err = p.communicate()
d = json.loads(out)
if path == self.cwd:
self.json = d #update cache
return d
def pwd(self):
return self.cwd
def ls(self, path=None):
if path is None:
path = self.cwd
path = os.path.join(self.cwd, path)
l = []
d = self.get_json(path)
for k,v in d.iteritems():
if isinstance(v, dict):
l.append(k + '/')
else:
l.append(k)
return l
def cd(self, path=None):
if path is None:
return
self.cwd = os.path.abspath(os.path.join(self.cwd, path))
#if path.startswith('/'):
# self.cwd = path
#else:
# self.cwd = self.cwd + path
self.json = None
def propget(self, prop_name):
if self.json is None:
self.json = self.get_json()
return self.json.get(prop_name)
def propset(self, prop_name, value, path=None):
if path is None:
path = self.cwd
props = {}
props[prop_name] = value
cmd = self._curl_cmd(path, props)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out,err = p.communicate()
if path == self.cwd:
self.json = None
return p.returncode
| {
"repo_name": "saml/rosling",
"path": "scripts/sling/__init__.py",
"copies": "1",
"size": "3895",
"license": "apache-2.0",
"hash": -6538723688089253000,
"line_mean": 28.5075757576,
"line_max": 110,
"alpha_frac": 0.5530166881,
"autogenerated": false,
"ratio": 3.560329067641682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46133457557416824,
"avg_score": null,
"num_lines": null
} |
# Apache Thrift Binary Protocol Struct 2.0 Writer in Python
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift import Thrift
class Trade:
def __init__(self):
symbol=""
price=0.0
size=0
timestamp=0.0
trans = TTransport.TFileObjectTransport(open("data","wb"))
proto = TBinaryProtocol.TBinaryProtocol(trans)
trade = Trade()
trade.symbol = "GE"
trade.price = 27.25
trade.size = 1700
trade.timestamp = 9.5
proto.writeStructBegin("Trade")
proto.writeFieldBegin("symbol", Thrift.TType.STRING, 1)
proto.writeString(trade.symbol)
proto.writeFieldEnd()
proto.writeFieldBegin("price", Thrift.TType.DOUBLE, 2)
proto.writeDouble(trade.price)
proto.writeFieldEnd()
proto.writeFieldBegin("size", Thrift.TType.I32, 3)
proto.writeI32(trade.size)
proto.writeFieldEnd()
proto.writeFieldBegin("timestamp", Thrift.TType.DOUBLE, 4)
proto.writeDouble(trade.timestamp)
proto.writeFieldEnd()
proto.writeFieldStop()
proto.writeStructEnd()
print("Wrote Trade: %s %d @ %f tm: %f" %
(trade.symbol, trade.size, trade.price, trade.timestamp))
| {
"repo_name": "RandyAbernethy/ThriftBook",
"path": "part2/protocols/bin_file_write.py",
"copies": "1",
"size": "1126",
"license": "apache-2.0",
"hash": 3228636701239496700,
"line_mean": 23.4782608696,
"line_max": 63,
"alpha_frac": 0.7317939609,
"autogenerated": false,
"ratio": 3.4968944099378882,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47286883708378885,
"avg_score": null,
"num_lines": null
} |
# Apache Thrift Transport Exceptions in Python
import pickle
import sys
from thrift import Thrift
from thrift.transport import TTransport
class Trade:
def __init__(self):
symbol=""
price=0.0
size=0
try:
trans = TTransport.TFileObjectTransport(open("data","wb"))
trade = Trade()
trade.symbol = "F"
trade.price = 13.10
trade.size = 2500
trans.write(pickle.dumps(trade));
trans.close()
if len(sys.argv) == 2:
raise TTransport.TTransportException(
TTransport.TTransportException.NOT_OPEN, "cmd line ex")
trans = TTransport.TFileObjectTransport(open("data",
("wb" if len(sys.argv) > 2 else "rb")))
bstr = trans.read(128)
trade = pickle.loads(bstr)
print("Trade(%d): %s %d @ %f" % (len(bstr), trade.symbol,
trade.size, trade.price))
except TTransport.TTransportException as tte:
print("TTransportException(%d): %s" % (tte.type, tte))
except Thrift.TException as te:
print("TException: %s" % te)
except Exception as e:
print("Exception: %s %s" % (type(e), e))
except:
print("BaseException: %s" % sys.exc_info()[0])
| {
"repo_name": "RandyAbernethy/ThriftBook",
"path": "part2/exceptions/trans_excep.py",
"copies": "1",
"size": "1237",
"license": "apache-2.0",
"hash": -5833475773840761000,
"line_mean": 28.4523809524,
"line_max": 75,
"alpha_frac": 0.5885206144,
"autogenerated": false,
"ratio": 3.5444126074498565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46329332218498565,
"avg_score": null,
"num_lines": null
} |
"""A package for creating Extract-Transform-Load (ETL) programs in Python.
The package contains a number of classes for filling fact tables
and dimensions (including snowflaked and slowly changing dimensions),
classes for extracting data from different sources, classes for defining
'steps' in an ETL flow, and convenient functions for often-needed ETL
functionality.
The package's modules are:
- datasources for access to different data sources
- tables for giving easy and abstracted access to dimension and fact tables
- parallel for parallelizing ETL operations
- JDBCConnectionWrapper and jythonmultiprocessing for support of Jython
- aggregators for aggregating data
- steps for defining steps in an ETL flow
- FIFODict for providing a dict with a limited size and where elements are
removed in first-in first-out order
"""
# Copyright (c) 2009-2014, Aalborg University (chr@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy as pcopy
import types
from datetime import date, datetime
from queue import Queue
from sys import modules
from threading import Thread
from . import FIFODict
import collections
__author__ = "Christian Thomsen"
__maintainer__ = "Christian Thomsen"
__version__ = '2.2'
__all__ = ['project', 'copy', 'renamefromto', 'rename', 'renametofrom',
'getint', 'getlong', 'getfloat', 'getstr', 'getstrippedstr',
'getstrornullvalue', 'getbool', 'getdate', 'gettimestamp',
'getvalue', 'getvalueor', 'setdefaults', 'rowfactory', 'endload',
'today', 'now', 'ymdparser', 'ymdhmsparser', 'datereader',
'datetimereader', 'datespan', 'toupper', 'tolower', 'keepasis',
'getdefaulttargetconnection', 'ConnectionWrapper']
_alltables = []
def project(atts, row, renaming={}):
"""Create a new dictionary with a subset of the attributes.
Arguments:
- atts is a sequence of attributes in row that should be copied to the
new result row.
- row is the original dictionary to copy data from.
- renaming is a mapping of names such that for each k in atts,
the following holds:
- If k in renaming then result[k] = row[renaming[k]].
- If k not in renaming then result[k] = row[k].
- renaming defaults to {}
"""
res = {}
for c in atts:
if c in renaming:
res[c] = row[renaming[c]]
else:
res[c] = row[c]
return res
def copy(row, **renaming):
"""Create a copy of a dictionary, but allow renamings.
Arguments:
- row the dictionary to copy
- **renaming allows renamings to be specified in the form
newname=oldname meaning that in the result, oldname will be
renamed to newname. The key oldname must exist in the row argument,
but it can be assigned to several newnames in the result as in
x='repeated', y='repeated'.
"""
if not renaming:
return row.copy()
tmp = row.copy()
res = {}
for k,v in list(renaming.items()):
res[k] = row[v]
if v in tmp: #needed for renamings like {'x':'repeated', 'y':'repeated'}
del tmp[v]
res.update(tmp)
return res
def renamefromto(row, renaming):
"""Rename keys in a dictionary.
For each (oldname, newname) in renaming.items(): rename row[oldname] to
row[newname].
"""
if not renaming:
return row
for old, new in list(renaming.items()):
row[new] = row[old]
del row[old]
rename = renamefromto # for backwards compatibility
def renametofrom(row, renaming):
"""Rename keys in a dictionary.
For each (newname, oldname) in renaming.items(): rename row[oldname] to
row[newname].
"""
if not renaming:
return row
for new, old in list(renaming.items()):
row[new] = row[old]
del row[old]
def getint(value, default=None):
"""getint(value[, default]) -> int(value) if possible, else default."""
try:
return int(value)
except Exception:
return default
def getlong(value, default=None):
"""getlong(value[, default]) -> long(value) if possible, else default."""
try:
return int(value)
except Exception:
return default
def getfloat(value, default=None):
"""getfloat(value[, default]) -> float(value) if possible, else default."""
try:
return float(value)
except Exception:
return default
def getstr(value, default=None):
"""getstr(value[, default]) -> str(value) if possible, else default."""
try:
return str(value)
except Exception:
return default
def getstrippedstr(value, default=None):
"""Convert given value to a string and use .strip() on the result.
If the conversion fails, the given default value is returned.
"""
try:
s = str(value)
return s.strip()
except Exception:
return default
def getstrornullvalue(value, nullvalue='None'):
"""Convert a given value different from None to a string.
If the given value is None, nullvalue (default: 'None') is returned.
"""
if value is None:
return nullvalue
else:
return str(value)
def getbool(value, default=None,
truevalues=set(( True, 1, '1', 't', 'true', 'True' )),
falsevalues=set((False, 0, '0', 'f', 'false', 'False'))):
"""Convert a given value to True, False, or a default value.
If the given value is in the given truevalues, True is returned.
If the given value is in the given falsevalues, False is returned.
Otherwise, the default value is returned.
"""
if value in truevalues:
return True
elif value in falsevalues:
return False
else:
return default
def getdate(targetconnection, ymdstr, default=None):
"""Convert a string of the form 'yyyy-MM-dd' to a Date object.
The returned Date is in the given targetconnection's format.
Arguments:
- targetconnection: a ConnectionWrapper whose underlying module's
Date format is used
- ymdstr: the string to convert
- default: The value to return if the conversion fails
"""
try:
(year, month, day) = ymdstr.split('-')
modref = targetconnection.getunderlyingmodule()
return modref.Date(int(year), int(month), int(day))
except Exception:
return default
def gettimestamp(targetconnection, ymdhmsstr, default=None):
"""Converts a string of the form 'yyyy-MM-dd HH:mm:ss' to a Timestamp.
The returned Timestamp is in the given targetconnection's format.
Arguments:
- targetconnection: a ConnectionWrapper whose underlying module's
Timestamp format is used
- ymdhmsstr: the string to convert
- default: The value to return if the conversion fails
"""
try:
(datepart, timepart) = ymdhmsstr.strip().split(' ')
(year, month, day) = datepart.split('-')
(hour, minute, second) = timepart.split(':')
modref = targetconnection.getunderlyingmodule()
return modref.Timestamp(int(year), int(month), int(day),\
int(hour), int(minute), int(second))
except Exception:
return default
def getvalue(row, name, mapping={}):
"""If name in mapping, return row[mapping[name]], else return row[name]."""
if name in mapping:
return row[mapping[name]]
else:
return row[name]
def getvalueor(row, name, mapping={}, default=None):
"""Return the value of name from row using a mapping and a default value."""
if name in mapping:
return row.get(mapping[name], default)
else:
return row.get(name, default)
def setdefaults(row, attributes, defaults=None):
"""Set default values for attributes not present in a dictionary.
Default values are set for "missing" values, existing values are not
updated.
Arguments:
- row is the dictionary to set default values in
- attributes is either
A) a sequence of attribute names in which case defaults must
be an equally long sequence of these attributes default values or
B) a sequence of pairs of the form (attribute, defaultvalue) in
which case the defaults argument should be None
- defaults is a sequence of default values (see above)
"""
if defaults and len(defaults) != len(attributes):
raise ValueError("Lists differ in length")
if defaults:
seqlist = list(zip(attributes, defaults))
else:
seqlist = attributes
for att, defval in seqlist:
if att not in row:
row[att] = defval
def rowfactory(source, names, close=True):
"""Generate dicts with key values from names and data values from source.
The given source should provide either next() or fetchone() returning
a tuple or fetchall() returning a sequence of tuples. For each tuple,
a dict is constructed such that the i'th element in names maps to
the i'th value in the tuple.
If close=True (the default), close will be called on source after
fetching all tuples.
"""
nextfunc = getattr(source, 'next', None)
if nextfunc is None:
nextfunc = getattr(source, 'fetchone', None)
try:
if nextfunc is not None:
try:
tmp = nextfunc()
if tmp is None:
return
else:
yield dict(list(zip(names, tmp)))
except (StopIteration, IndexError):
return
else:
for row in source.fetchall():
yield dict(list(zip(names, row)))
finally:
if close:
try:
source.close()
except:
return
def endload():
"""Signal to all Dimension and FactTable objects that all data is loaded."""
global _alltables
for t in _alltables:
method = getattr(t, 'endload', None)
if isinstance(method, collections.Callable):
method()
_today = None
def today(ignoredtargetconn=None, ignoredrow=None, ignorednamemapping=None):
"""Return the date of the first call this method as a datetime.date object.
"""
global _today
if _today is not None:
return _today
_today = date.today()
return _today
_now = None
def now(ignoredtargetconn=None, ignoredrow=None, ignorednamemapping=None):
"""Return the time of the first call this method as a datetime.datetime.
"""
global _now
if _now is not None:
return _now
_now = datetime.now()
return _now
def ymdparser(ymdstr):
"""Convert a string of the form 'yyyy-MM-dd' to a datetime.date.
If the input is None, the return value is also None.
"""
if ymdstr is None:
return None
(year, month, day) = ymdstr.split('-')
return date(int(year), int(month), int(day))
def ymdhmsparser(ymdhmsstr):
"""Convert a string 'yyyy-MM-dd HH:mm:ss' to a datetime.datetime.
If the input is None, the return value is also None.
"""
if ymdhmsstr is None:
return None
(datepart, timepart) = ymdhmsstr.strip().split(' ')
(year, month, day) = datepart.split('-')
(hour, minute, second) = timepart.split(':')
return datetime(int(year), int(month), int(day),\
int(hour), int(minute), int(second))
def datereader(dateattribute, parsingfunction=ymdparser):
"""Return a function that converts a certain dict member to a datetime.date
When setting, fromfinder for a tables.SlowlyChangingDimension, this
method can be used for generating a function that picks the relevant
dictionary member from each row and converts it.
Arguments:
- dateattribute: the attribute the generated function should read
- parsingfunction: the parsing function that converts the string
to a datetime.date
"""
def readerfunction(targetconnection, row, namemapping = {}):
atttouse = (namemapping.get(dateattribute) or dateattribute)
return parsingfunction(row[atttouse]) # a datetime.date
return readerfunction
def datetimereader(datetimeattribute, parsingfunction=ymdhmsparser):
"""Return a function that converts a certain dict member to a datetime
When setting, fromfinder for a tables.SlowlyChangingDimension, this
method can be used for generating a function that picks the relevant
dictionary member from each row and converts it.
Arguments:
- datetimeattribute: the attribute the generated function should read
- parsingfunction: the parsing function that converts the string
to a datetime.datetime
"""
def readerfunction(targetconnection, row, namemapping = {}):
atttouse = (namemapping.get(datetimeattribute) or datetimeattribute)
return parsingfunction(row[atttouse]) # a datetime.datetime
return readerfunction
def datespan(fromdate, todate, fromdateincl=True, todateincl=True,
key='dateid',
strings={'date':'%Y-%m-%d', 'monthname':'%B', 'weekday':'%A'},
ints={'year':'%Y', 'month':'%m', 'day':'%d'},
expander=None):
"""Return a generator yielding dicts for all dates in an interval.
Arguments:
- fromdate: The lower bound for the date interval. Should be a
datetime.date or a YYYY-MM-DD formatted string.
- todate: The upper bound for the date interval. Should be a
datetime.date or a YYYY-MM-DD formatted string.
- fromdateincl: Decides if fromdate is included. Default: True
- todateincl: Decides if todate is included. Default: True
- key: The name of the attribute where an int (YYYYMMDD) that uniquely
identifies the date is stored. Default: 'dateid'.
- strings: A dict mapping attribute names to formatting directives (as
those used by strftime). The returned dicts will have the specified
attributes as strings.
Default: {'date':'%Y-%m-%d', 'monthname':'%B', 'weekday':'%A'}
- ints: A dict mapping attribute names to formatting directives (as
those used by strftime). The returned dicts will have the specified
attributes as ints.
Default: {'year':'%Y', 'month':'%m', 'day':'%d'}
- expander: A callable f(date, dict) that is invoked on each created
dict. Not invoked if None. Default: None
"""
for arg in (fromdate, todate):
if not ((type(arg) in str and arg.count('-') == 2)\
or isinstance(arg, date)):
raise ValueError("fromdate and today must be datetime.dates or " + \
"YYYY-MM-DD formatted strings")
(year, month, day) = fromdate.split('-')
fromdate = date(int(year), int(month), int(day))
(year, month, day) = todate.split('-')
todate = date(int(year), int(month), int(day))
start = fromdate.toordinal()
if not fromdateincl:
start += 1
end = todate.toordinal()
if todateincl:
end += 1
for i in range(start, end):
d = date.fromordinal(i)
res = {}
res[key] = int(d.strftime('%Y%m%d'))
for (att, format) in strings.items():
res[att] = d.strftime(format)
for (att, format) in ints.items():
res[att] = int(d.strftime(format))
if expander is not None:
expander(d, res)
yield res
toupper = lambda s: s.upper()
tolower = lambda s: s.lower()
keepasis = lambda s: s
_defaulttargetconnection = None
def getdefaulttargetconnection():
"""Return the default target connection"""
global _defaulttargetconnection
return _defaulttargetconnection
class ConnectionWrapper(object):
"""Provide a uniform representation of different database connection types.
All Dimensions and FactTables communicate with the data warehouse using
a ConnectionWrapper. In this way, the code for loading the DW does not
have to care about which parameter format is used.
pygrametl's code uses the 'pyformat' but the ConnectionWrapper performs
translations of the SQL to use 'named', 'qmark', 'format', or 'numeric'
if the user's database connection needs this. Note that the
translations are simple and naive. Escaping as in %%(name)s is not
taken into consideration. These simple translations are enough for
pygrametl's code which is the important thing here; we're not trying to
make a generic, all-purpose tool to get rid of the problems with
different parameter formats. It is, however, possible to disable the
translation of a statement to execute such that 'problematic'
statements can be executed anyway.
"""
def __init__(self, connection, stmtcachesize=1000, paramstyle=None):
"""Create a ConnectionWrapper around the given PEP 249 connection
If no default ConnectionWrapper already exists, the new
ConnectionWrapper is set as the default.
Arguments:
- connection: An open PEP 249 connection to the database
- stmtcachesize: A number deciding how many translated statements to
cache. A statement needs to be translated when the connection
does not use 'pyformat' to specify parameters. When 'pyformat' is
used, stmtcachesize is ignored as no statements need to be
translated.
- paramstyle: A string holding the name of the PEP 249 connection's
paramstyle. If None, pygrametl will try to find the paramstyle
automatically (an AttributeError can be raised if that fails).
"""
self.__connection = connection
self.__cursor = connection.cursor()
self.nametranslator = lambda s: s
if paramstyle is None:
try:
paramstyle = \
modules[self.__connection.__class__.__module__].paramstyle
except AttributeError:
# Note: This is probably a better way to do this, but to avoid
# to break anything that worked before this fix, we only do it
# this way if the first approach didn't work
try:
paramstyle = \
modules[self.__connection.__class__.__module__.\
split('.')[0]].paramstyle
except AttributeError:
# To support, e.g., mysql.connector connections
paramstyle = \
modules[self.__connection.__class__.__module__.\
rsplit('.', 1)[0]].paramstyle
if not paramstyle == 'pyformat':
self.__translations = FIFODict.FIFODict(stmtcachesize)
try:
self.__translate = getattr(self, '_translate2' + paramstyle)
except AttributeError:
raise InterfaceError("The paramstyle '%s' is not supported" %\
paramstyle)
else:
self.__translate = None
global _defaulttargetconnection
if _defaulttargetconnection is None:
_defaulttargetconnection = self
def execute(self, stmt, arguments=None, namemapping=None, translate=True):
"""Execute a statement.
Arguments:
- stmt: the statement to execute
- arguments: a mapping with the arguments (default: None)
- namemapping: a mapping of names such that if stmt uses %(arg)s
and namemapping[arg]=arg2, the value arguments[arg2] is used
instead of arguments[arg]
- translate: decides if translation from 'pyformat' to the
undlying connection's format should take place. Default: True
"""
if namemapping and arguments:
arguments = copy(arguments, **namemapping)
if self.__translate and translate:
(stmt, arguments) = self.__translate(stmt, arguments)
self.__cursor.execute(stmt, arguments)
def executemany(self, stmt, params, translate=True):
"""Execute a sequence of statements."""
if self.__translate and translate:
# Idea: Translate the statement for the first parameter set. Then
# reuse the statement (but create new attribute sequences if needed)
# for the remaining paramter sets
newstmt = self.__translate(stmt, params[0])[0]
if type(self.__translations[stmt]) == str:
# The paramstyle is 'named' in this case and we don't have to
# put parameters into sequences
self.__cursor.executemany(newstmt, params)
else:
# We need to extract attributes and put them into sequences
names = self.__translations[stmt][1] # The attributes to extract
newparams = [[p[n] for n in names] for p in params]
self.__cursor.executemany(newstmt, newparams)
else:
# for pyformat when no translation is necessary
self.__cursor.executemany(stmt, params)
def _translate2named(self, stmt, row=None):
# Translate %(name)s to :name. No need to change row.
# Cache only the translated SQL.
res = self.__translations.get(stmt, None)
if res:
return (res, row)
res = stmt
while True:
start = res.find('%(')
if start == -1:
break
end = res.find(')s', start)
if end == -1:
break
name = res[start+2 : end]
res = res.replace(res[start:end+2], ':' + name)
self.__translations[stmt] = res
return (res, row)
def _translate2qmark(self, stmt, row=None):
# Translate %(name)s to ? and build a list of attributes to extract
# from row. Cache both.
(newstmt, names) = self.__translations.get(stmt, (None, None))
if newstmt:
return (newstmt, [row[n] for n in names])
names = []
newstmt = stmt
while True:
start = newstmt.find('%(')
if start == -1:
break
end = newstmt.find(')s', start)
if end == -1:
break
name = newstmt[start+2 : end]
names.append(name)
newstmt = newstmt.replace(newstmt[start:end+2], '?',1)#Replace once!
self.__translations[stmt] = (newstmt, names)
return (newstmt, [row[n] for n in names])
def _translate2numeric(self, stmt, row=None):
# Translate %(name)s to 1,2,... and build a list of attributes to
# extract from row. Cache both.
(newstmt, names) = self.__translations.get(stmt, (None, None))
if newstmt:
return (newstmt, [row[n] for n in names])
names = []
cnt = 0
newstmt = stmt
while True:
start = newstmt.find('%(')
if start == -1:
break
end = newstmt.find(')s', start)
if end == -1:
break
name = newstmt[start+2 : end]
names.append(name)
newstmt = newstmt.replace(newstmt[start:end+2], ':' + str(cnt))
cnt += 1
self.__translations[stmt] = (newstmt, names)
return (newstmt, [row[n] for n in names])
def _translate2format(self, stmt, row=None):
# Translate %(name)s to %s and build a list of attributes to extract
# from row. Cache both.
(newstmt, names) = self.__translations.get(stmt, (None, None))
if newstmt:
return (newstmt, [row[n] for n in names])
names = []
newstmt = stmt
while True:
start = newstmt.find('%(')
if start == -1:
break
end = newstmt.find(')s', start)
if end == -1:
break
name = newstmt[start+2 : end]
names.append(name)
newstmt = newstmt.replace(newstmt[start:end+2],'%s',1)#Replace once!
self.__translations[stmt] = (newstmt, names)
return (newstmt, [row[n] for n in names])
def rowfactory(self, names=None):
"""Return a generator object returning result rows (i.e. dicts)."""
rows = self.__cursor
self.__cursor = self.__connection.cursor()
if names is None:
if rows.description is None: # no query was executed ...
return (nothing for nothing in []) # a generator with no rows
else:
names = [self.nametranslator(t[0]) for t in rows.description]
return rowfactory(rows, names, True)
def fetchone(self, names=None):
"""Return one result row (i.e. dict)."""
if self.__cursor.description is None:
return {}
if names is None:
names = [self.nametranslator(t[0]) \
for t in self.__cursor.description]
values = self.__cursor.fetchone()
if values is None:
return dict([(n, None) for n in names])#A row with each att = None
else:
return dict(list(zip(names, values)))
def fetchonetuple(self):
"""Return one result tuple."""
if self.__cursor.description is None:
return ()
values = self.__cursor.fetchone()
if values is None:
return (None, ) * len(self.__cursor.description)
else:
return values
def fetchmanytuples(self, cnt):
"""Return cnt result tuples."""
if self.__cursor.description is None:
return []
return self.__cursor.fetchmany(cnt)
def fetchalltuples(self):
"""Return all result tuples"""
if self.__cursor.description is None:
return []
return self.__cursor.fetchall()
def rowcount(self):
"""Return the size of the result."""
return self.__cursor.rowcount
def getunderlyingmodule(self):
"""Return a reference to the underlying connection's module."""
return modules[self.__connection.__class__.__module__]
def commit(self):
"""Commit the transaction."""
endload()
self.__connection.commit()
def close(self):
"""Close the connection to the database,"""
self.__connection.close()
def rollback(self):
"""Rollback the transaction."""
self.__connection.rollback()
def setasdefault(self):
"""Set this ConnectionWrapper as the default connection."""
global _defaulttargetconnection
_defaulttargetconnection = self
def cursor(self):
"""Return a cursor object. Optional method."""
return self.__connection.cursor()
def resultnames(self):
if self.__cursor.description is None:
return None
else:
return tuple([t[0] for t in self.__cursor.description])
def __getstate__(self):
# In case the ConnectionWrapper is pickled (to be sent to another
# process), we need to create a new cursor when it is unpickled.
res = self.__dict__.copy()
del res['_ConnectionWrapper__cursor'] # a dirty trick, but...
return res
def __setstate__(self, dict):
self.__dict__.update(dict)
self.__cursor = self.__connection.cursor()
class BackgroundConnectionWrapper(object):
"""An alternative implementation of the ConnectionWrapper for experiments.
This implementation communicates with the database by using a
separate thread.
It is likely better to use ConnectionWrapper og a shared
ConnectionWrapper (see pygrametl.parallel).
This class offers the same methods as ConnectionWrapper. The
documentation is not repeated here.
"""
_SINGLE = 1
_MANY = 2
# Most of this class' code was just copied from ConnectionWrapper
# as we just want to do experiments with this class.
def __init__(self, connection, stmtcachesize=1000, paramstyle=None):
self.__connection = connection
self.__cursor = connection.cursor()
self.nametranslator = lambda s: s
if paramstyle is None:
try:
paramstyle = \
modules[self.__connection.__class__.__module__].paramstyle
except AttributeError:
# Note: This is probably a better way to do this, but to avoid
# to break anything that worked before this fix, we only do it
# this way if the first approach didn't work
try:
paramstyle = \
modules[self.__connection.__class__.__module__.\
split('.')[0]].paramstyle
except AttributeError:
# To support, e.g., mysql.connector connections
paramstyle = \
modules[self.__connection.__class__.__module__.\
rsplit('.', 1)[0]].paramstyle
if not paramstyle == 'pyformat':
self.__translations = FIFODict.FIFODict(stmtcachesize)
try:
self.__translate = getattr(self, '_translate2' + paramstyle)
except AttributeError:
raise InterfaceError("The paramstyle '%s' is not supported" %\
paramstyle)
else:
self.__translate = None
# Thread-stuff
self.__cursor = connection.cursor()
self.__queue = Queue(5000)
t = Thread(target=self.__worker)
t.daemon = True
t.start()
def execute(self, stmt, arguments=None, namemapping=None, translate=True):
if namemapping and arguments:
arguments = copy(arguments, **namemapping)
if self.__translate and translate:
(stmt, arguments) = self.__translate(stmt, arguments)
self.__queue.put((self._SINGLE, self.__cursor, stmt, arguments))
def executemany(self, stmt, params, translate=True):
if self.__translate and translate:
# Idea: Translate the statement for the first parameter set. Then
# reuse the statement (but create new attribute sequences if needed)
# for the remaining paramter sets
newstmt = self.__translate(stmt, params[0])[0]
if type(self.__translations[stmt]) == str:
# The paramstyle is 'named' in this case and we don't have to
# put parameters into sequences
self.__queue.put((self._MANY, self.__cursor, newstmt, params))
else:
# We need to extract attributes and put them into sequences
names = self.__translations[stmt][1] # The attributes to extract
newparams = [[p[n] for n in names] for p in params]
self.__queue.put((self._MANY,self.__cursor, newstmt, newparams))
else:
# for pyformat when no translation is necessary
self.__queue.put((self._MANY, self.__cursor, stmt, params))
def _translate2named(self, stmt, row=None):
# Translate %(name)s to :name. No need to change row.
# Cache only the translated SQL.
res = self.__translations.get(stmt, None)
if res:
return (res, row)
res = stmt
while True:
start = res.find('%(')
if start == -1:
break
end = res.find(')s', start)
name = res[start+2 : end]
res = res.replace(res[start:end+2], ':' + name)
self.__translations[stmt] = res
return (res, row)
def _translate2qmark(self, stmt, row=None):
# Translate %(name)s to ? and build a list of attributes to extract
# from row. Cache both.
(newstmt, names) = self.__translations.get(stmt, (None, None))
if newstmt:
return (newstmt, [row[n] for n in names])
names = []
newstmt = stmt
while True:
start = newstmt.find('%(')
if start == -1:
break
end = newstmt.find(')s', start)
name = newstmt[start+2 : end]
names.append(name)
newstmt = newstmt.replace(newstmt[start:end+2], '?',1)#Replace once!
self.__translations[stmt] = (newstmt, names)
return (newstmt, [row[n] for n in names])
def _translate2numeric(self, stmt, row=None):
# Translate %(name)s to 1,2,... and build a list of attributes to
# extract from row. Cache both.
(newstmt, names) = self.__translations.get(stmt, (None, None))
if newstmt:
return (newstmt, [row[n] for n in names])
names = []
cnt = 0
newstmt = stmt
while True:
start = newstmt.find('%(')
if start == -1:
break
end = newstmt.find(')s', start)
name = newstmt[start+2 : end]
names.append(name)
newstmt = newstmt.replace(newstmt[start:end+2], ':' + str(cnt))
cnt += 1
self.__translations[stmt] = (newstmt, names)
return (newstmt, [row[n] for n in names])
def _translate2format(self, stmt, row=None):
# Translate %(name)s to %s and build a list of attributes to extract
# from row. Cache both.
(newstmt, names) = self.__translations.get(stmt, (None, None))
if newstmt:
return (newstmt, [row[n] for n in names])
names = []
newstmt = stmt
while True:
start = newstmt.find('%(')
if start == -1:
break
end = newstmt.find(')s', start)
name = newstmt[start+2 : end]
names.append(name)
newstmt = newstmt.replace(newstmt[start:end+2],'%s',1)#Replace once!
self.__translations[stmt] = (newstmt, names)
return (newstmt, [row[n] for n in names])
def rowfactory(self, names=None):
self.__queue.join()
rows = self.__cursor
self.__cursor = self.__connection.cursor()
if names is None:
if rows.description is None: # no query was executed ...
return (nothing for nothing in []) # a generator with no rows
else:
names = [self.nametranslator(t[0]) for t in rows.description]
return rowfactory(rows, names, True)
def fetchone(self, names=None):
self.__queue.join()
if self.__cursor.description is None:
return {}
if names is None:
names = [self.nametranslator(t[0]) \
for t in self.__cursor.description]
values = self.__cursor.fetchone()
if values is None:
return dict([(n, None) for n in names])#A row with each att = None
else:
return dict(list(zip(names, values)))
def fetchonetuple(self):
self.__queue.join()
if self.__cursor.description is None:
return ()
values = self.__cursor.fetchone()
if values is None:
return (None, ) * len(self.__cursor.description)
else:
return values
def fetchmanytuples(self, cnt):
self.__queue.join()
if self.__cursor.description is None:
return []
return self.__cursor.fetchmany(cnt)
def fetchalltuples(self):
self.__queue.join()
if self.__cursor.description is None:
return []
return self.__cursor.fetchall()
def rowcount(self):
self.__queue.join()
return self.__cursor.rowcount
def getunderlyingmodule(self):
# No need to join the queue here
return modules[self.__connection.__class__.__module__]
def commit(self):
endload()
self.__queue.join()
self.__connection.commit()
def close(self):
self.__queue.join()
self.__connection.close()
def rollback(self):
self.__queue.join()
self.__connection.rollback()
def setasdefault(self):
global _defaulttargetconnection
_defaulttargetconnection = self
def cursor(self):
self.__queue.join()
return self.__connection.cursor()
def resultnames(self):
self.__queue.join()
if self.__cursor.description is None:
return None
else:
return tuple([t[0] for t in self.__cursor.description])
def __getstate__(self):
# In case the ConnectionWrapper is pickled (to be sent to another
# process), we need to create a new cursor when it is unpickled.
res = self.__dict__.copy()
del res['_ConnectionWrapper__cursor'] # a dirty trick, but...
def __setstate__(self, dict):
self.__dict__.update(dict)
self.__cursor = self.__connection.cursor()
def __worker(self):
while True:
(op, curs, stmt, args) = self.__queue.get()
if op == self._SINGLE:
curs.execute(stmt, args)
elif op == self._MANY:
curs.executemany(stmt, args)
self.__queue.task_done()
class Error(Exception):
pass
class InterfaceError(Error):
pass
| {
"repo_name": "haleemur/pygrametl-python3",
"path": "pygrametl/__init__.py",
"copies": "1",
"size": "38806",
"license": "bsd-2-clause",
"hash": 7515528065578614000,
"line_mean": 35.8878326996,
"line_max": 80,
"alpha_frac": 0.5969180024,
"autogenerated": false,
"ratio": 4.3305434661310125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004317833986896967,
"num_lines": 1052
} |
"""A package for managing OpenBSD's Packet Filter."""
__copyright__ = """
Copyright (c) 2008-2019, Daniele Mazzocchio
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the developer nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
__author__ = "Daniele Mazzocchio <danix@kernel-panic.it>"
__version__ = "0.2.1"
__OBSD_VERSION__ = "6.5"
from pf.exceptions import PFError
from pf.constants import *
from pf.status import *
from pf.state import *
from pf.table import *
from pf.rule import *
from pf.queue import *
from pf.filter import *
import pf.lib
__all__ = ['PFError',
'PFStatus',
'PFIface',
'PFUid',
'PFGid',
'PFAddr',
'PFPort',
'PFRuleAddr',
'PFPool',
'PFRule',
'PFRuleset',
'PFStatePeer',
'PFStateKey',
'PFState',
'PFTableAddr',
'PFTable',
'PFTStats',
'ServiceCurve',
'PFQueue',
'PacketFilter']
import pf.constants
__all__.extend(os._get_exports_list(pf.constants))
del pf.constants
| {
"repo_name": "dotpy/py-pf",
"path": "pf/__init__.py",
"copies": "1",
"size": "2483",
"license": "bsd-3-clause",
"hash": -8542995435415514000,
"line_mean": 31.6710526316,
"line_max": 80,
"alpha_frac": 0.6951268627,
"autogenerated": false,
"ratio": 4.237201365187714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006430796646514861,
"num_lines": 76
} |
"""A package (for Nevow) for defining the schema, validation and rendering of
HTML forms.
"""
version_info = (0, 12, 0)
version = '.'.join([str(i) for i in version_info])
from nevow import static
from formal.types import *
from formal.validation import *
from formal.widget import *
from formal.widgets.restwidget import *
from formal.widgets.multiselect import *
from formal.widgets.richtextarea import *
from formal.form import Form, Field, Group, ResourceMixin, renderForm
from formal import iformal
def widgetFactory(widgetClass, *a, **k):
def _(original):
return widgetClass(original, *a, **k)
return _
try:
import pkg_resources
except ImportError:
import os.path
defaultCSS = static.File(os.path.join(os.path.split(__file__)[0], 'formal.css'))
formsJS = static.File(os.path.join(os.path.split(__file__)[0], 'js'))
else:
from formal.util import LazyResource
defaultCSS = LazyResource(lambda: static.File(pkg_resources.resource_filename('formal', 'formal.css')))
formsJS = LazyResource(lambda: static.File(pkg_resources.resource_filename('formal', 'js')))
del LazyResource
# Register standard adapters
from twisted.python.components import registerAdapter
from formal import converters
from formal.util import SequenceKeyLabelAdapter
registerAdapter(TextInput, String, iformal.IWidget)
registerAdapter(TextInput, Integer, iformal.IWidget)
registerAdapter(TextInput, Float, iformal.IWidget)
registerAdapter(Checkbox, Boolean, iformal.IWidget)
registerAdapter(DatePartsInput, Date, iformal.IWidget)
registerAdapter(TextInput, Time, iformal.IWidget)
registerAdapter(FileUploadRaw, File, iformal.IWidget)
registerAdapter(TextAreaList, Sequence, iformal.IWidget)
registerAdapter(SequenceKeyLabelAdapter, tuple, iformal.IKey)
registerAdapter(SequenceKeyLabelAdapter, tuple, iformal.ILabel)
registerAdapter(converters.NullConverter, String, iformal.IStringConvertible)
registerAdapter(converters.DateToDateTupleConverter, Date, iformal.IDateTupleConvertible)
registerAdapter(converters.BooleanToStringConverter, Boolean, iformal.IBooleanConvertible)
registerAdapter(converters.BooleanToStringConverter, Boolean, iformal.IStringConvertible)
registerAdapter(converters.IntegerToStringConverter, Integer, iformal.IStringConvertible)
registerAdapter(converters.FloatToStringConverter, Float, iformal.IStringConvertible)
registerAdapter(converters.DateToStringConverter, Date, iformal.IStringConvertible)
registerAdapter(converters.TimeToStringConverter, Time, iformal.IStringConvertible)
registerAdapter(converters.NullConverter, File, iformal.IFileConvertible)
registerAdapter(converters.NullConverter, Sequence, iformal.ISequenceConvertible)
registerAdapter(converters.SequenceToStringConverter, Sequence, iformal.IStringConvertible)
try:
Decimal
except NameError:
pass
else:
registerAdapter(TextInput, Decimal, iformal.IWidget)
registerAdapter(converters.DecimalToStringConverter, Decimal, iformal.IStringConvertible)
del SequenceKeyLabelAdapter
del registerAdapter
| {
"repo_name": "emgee/formal",
"path": "formal/__init__.py",
"copies": "1",
"size": "3033",
"license": "mit",
"hash": -8349110148061537000,
"line_mean": 42.3285714286,
"line_max": 107,
"alpha_frac": 0.8143752061,
"autogenerated": false,
"ratio": 3.8103015075376883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010148503921647891,
"num_lines": 70
} |
# A package manager meant for Pythonista, built on StaSh.
import requests
import sys
import argparse
from os import remove, mkdir, rename, listdir, getcwd
from shutil import rmtree
cwd = getcwd()
documentsIndex = cwd.index("Documents")
documentsIndex += len("Documents")
ROOT = cwd[:documentsIndex]
class stansi: # Collection of Stash's ANSI escape codes.
bold = u"\x9b1m"
underscore = u"\x9b4m"
attr_end = u"\x9b0m"
fore_red = u"\x9b31m"
fore_green = u"\x9b32m"
fore_brown = u"\x9b33m"
fore_blue = u"\x9b34m"
fore_pink = u"\x9b35m"
fore_cyan = u"\x9b36m"
fore_white = u"\x9b37m"
fore_end = u"\x9b39m"
back_red = u"\x9b41m"
back_green = u"\x9b42m"
back_brown = u"\x9b43m"
back_blue = u"\x9b44m"
back_pink = u"\x9b45m"
back_cyan = u"\x9b46m"
back_white = u"\x9b47m"
back_end = u"\x9b49m"
def Red(text):
return stansi.fore_red+text+stansi.fore_end
def Blue(text):
return stansi.fore_blue+text+stansi.fore_end
def Green(text):
return stansi.fore_green+text+stansi.fore_end
def Cyan(text):
return stansi.fore_cyan+text+stansi.fore_end
class SWConfig (object): # Parser for the config files such as the repository listing.
def __init__(self, content):
self.data = {}
for line in content.splitlines():
key = line.split("=")[0]
value = line.split("=")[1]
self.data[key] = value
def __getitem__(self, key):
return self.data[key]
def keys(self):
return self.data.keys()
def download_package(url, package_name): # Handles the installation of packages directories (since they're no longer tarfiles)
content_listing = ["bin.py", "meta.latte"]
mkdir(ROOT+"/"+package_name)
for item in content_listing:
requested = requests.get(url+"/"+package_name+"/"+item)
content = requested.text
requested.close()
if content == "404: Not Found\n":
print(Red("ERROR") + ": Package not found.")
sys.exit()
opened = open(ROOT+"/"+package_name+"/"+item, "w")
opened.write(content)
opened.close()
def main(sargs):
parser = argparse.ArgumentParser()
parser.add_argument("method", help="What action to perform (install, remove, etc)", type=str)
parser.add_argument("package", help="Name of package", type=str)
args = parser.parse_args(sargs)
try:
opened = open(".latte-repos.swconf", "r")
opened.close()
except:
opened = open(".latte-repos.swconf", "w")
print(Red("WARNING") + ": Repository listing doesn't exist, rebuilding to default...")
opened.write("universe=https://raw.githubusercontent.com/Seanld/latte-universe/master")
opened.close()
repo_listing_opened = open(".latte-repos.swconf", "r")
listing_content = repo_listing_opened.read()
repo_listing_opened.close()
REPOSITORIES = SWConfig(listing_content)
if args.method == "install":
packageSplitted = args.package.split("/")
try:
package_name = packageSplitted[1]
repo_to_use = REPOSITORIES[packageSplitted[0]]
except IndexError:
repo_to_use = REPOSITORIES["universe"]
package_name = packageSplitted[0]
print(Red("WARNING") + ": No repository specified, using universe by default...")
try:
download_package(repo_to_use, package_name)
except:
stoutput("ERROR", "Couldn't find package", "error")
sys.exit()
# Move to correct locations
print("Installing")
try:
rename(ROOT+"/"+package_name+"/meta.latte", ROOT+"/stash_extensions/latte/"+package_name+".latte")
except:
mkdir(ROOT+"/stash_extensions/latte")
rename(ROOT+"/"+package_name+"/meta.latte", ROOT+"/stash_extensions/latte/"+package_name+".latte")
rename(ROOT+"/"+package_name+"/bin.py", ROOT+"/stash_extensions/bin/"+package_name+".py")
rmtree(ROOT+"/"+package_name)
print(Green("SUCCESS") + ": Package '"+package_name+"' successfully installed!")
elif args.method == "remove":
try:
remove(ROOT+"/stash_extensions/bin/"+args.package+".py")
remove(ROOT+"/stash_extensions/latte/"+args.package+".latte")
except:
print(Red("ERROR") + ": Couldn't remove package; not found in resources.")
sys.exit()
print(Green("SUCCESS") + ": '"+args.package+"' removed!")
elif args.method == "update":
print("Jeez! Sorry, but we are currently working on self-update capabilities. For now, just redo the install process to update.")
elif args.method == "new":
try:
mkdir(args.package)
config = open(args.package+"/meta.latte", "w")
config.write("developer=Your name here\ndescription=Enter description of your app here\nversion=0.1")
config.close()
index = open(args.package+"/bin.py", "w")
index.write("# This is just an example template. You can change this all you like.\n\nimport sys\nimport argparse\n\ndef main(sargs):\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('echo', help='What you want the command to echo back.')\n\targs = parser.parse_args(sargs)\n\t\n\tprint('Echoing back: '+args.echo)\n\nif __name__ == '__main__':\n\tmain(sys.argv[1:])")
index.close()
print(Green("SUCCESS") + ": Package '"+args.package+"' generated, check current working directory!")
except:
print(Red("ERROR") + ": Couldn't generate package; directory may already exist.")
elif args.method == "add-repo":
try:
request = requests.get(args.package+"/init.latte")
data = request.text
request.close()
data_org = SWConfig(data)
nickname = data_org["NICKNAME"]
repo_listing = open(".latte-repos.swconf", "a")
repo_listing.write("\n"+nickname+"="+args.package)
repo_listing.close()
print(Green("SUCCESS") + ": '"+nickname+"' added to repositories!")
except:
print(Red("ERROR") + ": Either repository doesn't exist, or does not contain an 'init.latte' file.")
elif args.method == "list-repos":
if args.package == "all":
opened = open(".latte-repos.swconf")
content = opened.read()
opened.close()
as_config = SWConfig(content)
for repo in as_config.keys():
print(Cyan(repo) + ": " + Green(as_config[repo]))
else:
print(Red("ERROR") + ": Unknown command '"+args.method+"'!")
if __name__ == "__main__":
main(sys.argv[1:])
| {
"repo_name": "cclauss/stash",
"path": "bin/latte.py",
"copies": "1",
"size": "5972",
"license": "mit",
"hash": 3187197564952764400,
"line_mean": 34.9759036145,
"line_max": 383,
"alpha_frac": 0.6790020094,
"autogenerated": false,
"ratio": 2.9785536159600996,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4157555625360099,
"avg_score": null,
"num_lines": null
} |
''' a package of data mapping classes '''
__author__ = 'rcj1492'
__created__ = '2016.01'
__license__ = 'MIT'
from jsonmodel.exceptions import ModelValidationError
class mapModel(object):
'''
a helper class of recursive methods to map the json model
'''
_dummy_int = 1
_dummy_float = 1.1
_datatype_names = [
'string',
'number',
'number',
'boolean',
'map',
'list',
'null'
]
_datatype_classes = [
''.__class__,
_dummy_int.__class__,
_dummy_float.__class__,
True.__class__,
{}.__class__,
[].__class__,
None.__class__
]
def __init__(self, input):
self.keyName = []
self.keyCriteria = []
if isinstance(input, dict):
self.keyName = [ '.' ]
self.keyCriteria = [ { 'required_field': True, 'value_datatype': 'map', 'extra_fields': False } ]
self.dict(input, '')
elif isinstance(input, list):
self.list(input, '')
else:
raise ModelValidationError('Input for mapModel must be a dictionary or list.')
def dict(self, input_dict, path_to_root):
for key, value in input_dict.items():
if not isinstance(key, str):
key_path = path_to_root + '.' + str(key)
raise ModelValidationError('Key name for field %s must be a string datatype.' % key_path)
key_path = path_to_root + '.' + key
self.keyName.append(key_path)
try:
class_index = self._datatype_classes.index(value.__class__)
except:
raise ModelValidationError('Value for field %s must be a json-valid datatype.' % key_path)
criteria_dict = {
'required_field': False,
'value_datatype': self._datatype_names[class_index]
}
# add integer data criteria to integer fields
if class_index == 1:
criteria_dict['integer_data'] = True
# enable required field if field has a non-empty value
if input_dict[key]:
criteria_dict['required_field'] = True
# add extra fields to dictionary fields
if isinstance(value, dict):
criteria_dict['extra_fields'] = True
if value:
criteria_dict['extra_fields'] = False
# add declared value to string, integer and boolean fields
if criteria_dict['value_datatype'] in ('boolean', 'string', 'number', 'list'):
criteria_dict['declared_value'] = value
self.keyCriteria.append(criteria_dict)
if isinstance(value, dict):
self.dict(input_dict=input_dict[key], path_to_root=key_path)
elif isinstance(value, list):
self.list(input_list=input_dict[key], path_to_root=key_path)
def list(self, input_list, path_to_root):
if input_list:
key_path = path_to_root + '[0]'
self.keyName.append(key_path)
try:
class_index = self._datatype_classes.index(input_list[0].__class__)
except:
raise ModelValidationError('Value for field %s must be a json-valid datatype.' % key_path)
criteria_dict = {
'required_field': False,
'value_datatype': self._datatype_names[class_index]
}
# add integer data criteria to integer fields
if class_index == 1:
criteria_dict['integer_data'] = True
# add extra fields to dictionary fields
if isinstance(input_list[0], dict):
criteria_dict['extra_fields'] = True
if input_list[0]:
criteria_dict['extra_fields'] = False
# add declared value to string, integer and boolean fields
if isinstance(input_list[0], bool) or isinstance(input_list[0], str) or isinstance(input_list[0], int) or isinstance(input_list[0], float):
criteria_dict['declared_value'] = input_list[0]
self.keyCriteria.append(criteria_dict)
if isinstance(input_list[0], dict):
self.dict(input_dict=input_list[0], path_to_root=key_path)
elif isinstance(input_list[0], list):
self.list(input_list=input_list[0], path_to_root=key_path)
| {
"repo_name": "collectiveacuity/jsonModel",
"path": "jsonmodel/mapping.py",
"copies": "1",
"size": "4428",
"license": "mit",
"hash": -4817052465847150000,
"line_mean": 38.5357142857,
"line_max": 151,
"alpha_frac": 0.5433604336,
"autogenerated": false,
"ratio": 4.138317757009346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027933156389684985,
"num_lines": 112
} |
''' a package of extensions to a jsonModel class object '''
__author__ = 'rcj1492'
__created__ = '2018.03'
__license__ = 'MIT'
def tabulate(json_model):
'''
a function to add the tabulate method to a jsonModel object
:param json_model: jsonModel object
:return: jsonModel object
'''
import types
from jsonmodel._extensions import tabulate as _tabulate
try:
from tabulate import tabulate
except:
import sys
print('jsonmodel.extensions.tabulate requires the tabulate module. try: pip install tabulate')
sys.exit(1)
setattr(json_model, 'tabulate', _tabulate.__get__(json_model, types.MethodType))
return json_model
if __name__ == '__main__':
from jsonmodel import __module__
from jsonmodel.loader import jsonLoader
from jsonmodel.validators import jsonModel
model_rules = jsonLoader(__module__, '../samples/sample-model.json')
model_rules['components']['.']['extra_fields'] = True
model_rules['components']['.datetime']['field_description'] = 'https://collectiveacuity.com'
rules_model = jsonModel(model_rules)
rules_model = tabulate(rules_model)
documentation = rules_model.tabulate(syntax='javascript')
with open('../docs/test.md', 'wt') as f:
f.write(documentation)
f.close()
| {
"repo_name": "collectiveacuity/jsonModel",
"path": "jsonmodel/extensions.py",
"copies": "1",
"size": "1346",
"license": "mit",
"hash": 5716098381959122000,
"line_mean": 30.3023255814,
"line_max": 102,
"alpha_frac": 0.647102526,
"autogenerated": false,
"ratio": 3.958823529411765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5105926055411765,
"avg_score": null,
"num_lines": null
} |
''' a package of functions for parsing STDOUT and STDERR '''
__author__ = 'rcj1492'
__created__ = '2018.02'
__license__ = 'MIT'
def convert_table(shell_output, delimiter='\t|\s{2,}', output='dict'):
'''
a method to convert a STDOUT shell table into a python data structure
:param shell_output: string from STDOUT with headers
:param delimiter: string with regex pattern delimiting headers
:param output: string with type of structure to output (dict, list or csv)
:return: list of dictionaries or list of lists or string with csv format
'''
# retrieve header columns
import re
gap_pattern = re.compile(delimiter)
output_lines = shell_output.splitlines()
column_headers = gap_pattern.split(output_lines[0])
blank_index = column_headers.index('')
if blank_index > -1:
column_headers.pop(blank_index)
# generate indices tuples
indices = []
for i in range(len(column_headers)):
if i + 1 < len(column_headers):
indices.append((
output_lines[0].find(column_headers[i]),
output_lines[0].find(column_headers[i + 1])
))
else:
indices.append((
output_lines[0].find(column_headers[i]),
-1
))
# add headers to output
python_list = []
csv_string = ''
if output == 'dict':
pass
elif output == 'list':
python_list.append(column_headers)
elif output == 'csv':
for i in range(len(column_headers)):
if i:
csv_string += ','
csv_string += column_headers[i]
else:
raise ValueError('output argument must be one of dict, list or csv values.')
# add rows to output
for i in range(1, len(output_lines)):
if output == 'dict':
row_details = {}
for j in range(len(column_headers)):
row_details[column_headers[j]] = output_lines[i][indices[j][0]:indices[j][1]].rstrip()
python_list.append(row_details)
elif output == 'list':
row_list = []
for j in range(len(column_headers)):
row_list.append(output_lines[i][indices[j][0]:indices[j][1]]).rstrip()
python_list.append(row_list)
elif output == 'csv':
csv_string += '\n'
for j in range(len(column_headers)):
if j:
csv_string += ','
csv_string += output_lines[i][indices[j][0]:indices[j][1]].rstrip()
# return output
if csv_string:
return csv_string
return python_list | {
"repo_name": "collectiveacuity/labPack",
"path": "labpack/parsing/shell.py",
"copies": "1",
"size": "2633",
"license": "mit",
"hash": 6357947268402882000,
"line_mean": 32.7692307692,
"line_max": 102,
"alpha_frac": 0.5620964679,
"autogenerated": false,
"ratio": 3.941616766467066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9956686803503054,
"avg_score": 0.009405286172802375,
"num_lines": 78
} |
''' a package of helper functions for extensions.py '''
__author__ = 'rcj1492'
__created__ = '2018.03'
__license__ = 'MIT'
def _segment_path(dot_path):
import re
digit_pat = re.compile('\[(\d+)\]')
key_list = dot_path.split('.')
segment_list = []
for key in key_list:
if key:
item_list = digit_pat.split(key)
for item in item_list:
if item:
segment_list.append(item)
return segment_list
def _add_links(text_string):
import re
url_parts = re.compile('(([A-Za-z]{3,9}:(?://)?)(?:[\-;:&=\+\$,\w]+@)?[A-Za-z0-9.\-]+(:[0-9]+)?|(?:www.|[\-;:&=\+\$,\w]+@)[A-Za-z0-9.\-]+)((?:/[\+~%/.\w\-_]*)?\??(?:[\-\+,=&;%@.\w_]*)#?(?:[\w]*))?')
url_pattern = re.compile('((([A-Za-z]{3,9}:(?://)?)(?:[\-;:&=\+\$,\w]+@)?[A-Za-z0-9.\-]+(:[0-9]+)?|(?:www.|[\-;:&=\+\$,\w]+@)[A-Za-z0-9.\-]+)((?:/[\+~%/.\w\-_]*)?\??(?:[\-\+,=&;%@.\w_]*)#?(?:[\w]*))?)')
def _replace_url(x):
url_string = x.group(0)
if not url_parts.findall(url_string)[0][1]:
return url_string
url_text = '<a href="%s">%s</a>' % (url_string, url_string)
return url_text
return url_pattern.sub(_replace_url, text_string)
def tabulate(self, format='html', syntax=''):
'''
a function to create a table from the class model keyMap
:param format: string with format for table output
:param syntax: [optional] string with linguistic syntax
:return: string with table
'''
from tabulate import tabulate as _tabulate
# define headers
headers = ['Field', 'Datatype', 'Required', 'Default', 'Examples', 'Conditionals', 'Description']
rows = []
default_values = False
additional_conditions = False
field_description = False
# construct rows
for key, value in self.keyMap.items():
key_segments = _segment_path(key)
if key_segments:
row = []
# add field column
field_name = ''
if len(key_segments) > 1:
for i in range(1,len(key_segments)):
field_name += ' '
if key_segments[-1] == '0':
field_name += '<i>item</i>'
else:
field_name += key_segments[-1]
row.append(field_name)
# add datatype column
value_datatype = value['value_datatype']
if 'integer_data' in value.keys():
if value['integer_data'] and syntax != 'javascript':
value_datatype = 'integer'
elif value['value_datatype'] == 'map':
if syntax == 'javascript':
value_datatype = 'object'
elif value['value_datatype'] == 'list':
if syntax == 'javascript':
value_datatype = 'array'
# retrieve datatype of item in list
item_key = key + '[0]'
item_datatype = self.keyMap[item_key]['value_datatype']
if syntax == 'javascript':
if item_datatype == 'list':
item_datatype = 'array'
elif item_datatype == 'map':
item_datatype = 'object'
elif 'integer_data' in self.keyMap[item_key].keys():
if self.keyMap[item_key]['integer_data']:
item_datatype = 'integer'
value_datatype += ' of %ss' % item_datatype
row.append(value_datatype)
# add required column
if value['required_field']:
row.append('yes')
else:
row.append('')
# add default column
if 'default_value' in value.keys():
default_values = True
if isinstance(value['default_value'], str):
row.append('"%s"' % value['default_value'])
elif isinstance(value['default_value'], bool):
row.append(str(value['default_value']).lower())
else:
row.append(str(value['default_value']))
else:
row.append('')
# define recursive example constructor
def determine_example(k, v):
example_value = ''
if 'example_values' in v.keys():
for i in v['example_values']:
if example_value:
example_value += ', '
if isinstance(i, str):
example_value += '"%s"' % i
else:
example_value += value
elif 'declared_value' in v.keys():
if isinstance(v['declared_value'], str):
example_value = '"%s"' % v['declared_value']
elif isinstance(v['declared_value'], bool):
example_value = str(v['declared_value']).lower()
else:
example_value = v['declared_value']
else:
if v['value_datatype'] == 'map':
example_value = '{...}'
elif v['value_datatype'] == 'list':
example_value = '[...]'
elif v['value_datatype'] == 'null':
example_value = 'null'
return example_value
# add examples column
row.append(determine_example(key, value))
# add additional conditions
conditions = ''
description = ''
for k, v in value.items():
extra_integer = False
if k == 'integer_data' and syntax == 'javascript':
extra_integer = True
if k not in ('example_values', 'value_datatype', 'required_field', 'declared_value', 'default_value', 'field_position', 'field_metadata') or extra_integer:
add_extra = False
if k == 'extra_fields':
if v:
add_extra = True
if k in ('field_description', 'field_title'):
field_description = True
if k == 'field_description':
description = v
elif not description:
description = v
elif k != 'extra_fields' or add_extra:
additional_conditions = True
if conditions:
conditions += '<br>'
condition_value = v
if isinstance(v, str):
condition_value = '"%s"' % v
elif isinstance(v, bool):
condition_value = str(v).lower()
conditions += '%s: %s' % (k, condition_value)
row.append(conditions)
row.append(description)
# add row to rows
rows.append(row)
# add rows for top field
top_dict = self.keyMap['.']
if top_dict['extra_fields']:
rows.append(['<i>**extra fields allowed</i>', '', '', '', '', '', ''])
if 'max_bytes' in top_dict.keys():
rows.append(['<i>**max bytes: %s</i>' % top_dict['max_bytes'], '', '', '', '', '', ''])
# eliminate unused columns
if not field_description:
headers.pop()
if not additional_conditions:
headers.pop()
if not default_values:
headers.pop(3)
for row in rows:
if not field_description:
row.pop()
if not additional_conditions:
row.pop()
if not default_values:
row.pop(3)
# construct table html
table_html = _tabulate(rows, headers, tablefmt='html')
# add links to urls in text
# markdown_url = re.compile('\[(.*?)\]\((.*)\)')
table_html = _add_links(table_html)
return table_html | {
"repo_name": "collectiveacuity/jsonModel",
"path": "jsonmodel/_extensions.py",
"copies": "1",
"size": "8008",
"license": "mit",
"hash": -8931232929679571000,
"line_mean": 37.3205741627,
"line_max": 206,
"alpha_frac": 0.4604145854,
"autogenerated": false,
"ratio": 4.214736842105263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003079472058727296,
"num_lines": 209
} |
''' a package of methods for compiling information about ISO 3166 2 US state codes '''
__author__ = 'rcj1492'
__created__ = '2017.10'
__license__ = 'MIT'
# TODO scrape an online table to get latest csv values
def update_csv(csv_url=''):
pass
def compile_list(csv_file='datasets/iso_3166_2_US.csv'):
from os import path
import csv
from labpack import __module__
from importlib.util import find_spec
# construct file path
module_path = find_spec(__module__).submodule_search_locations[0]
csv_path = path.join(module_path, csv_file)
# construct placeholder list
rows = []
# retrieve model from file
if not path.isfile(csv_path):
raise Exception('%s is not a valid file path.' % csv_path)
with open(csv_path, 'rt', errors='ignore') as f:
csv_reader = csv.reader(f)
for row in csv_reader:
item = []
for column in row:
item.append(column)
rows.append(item)
return rows
def compile_map(key_column='USPS', csv_list=None):
# determine csv list
if not csv_list:
csv_list = compile_list()
# validate inputs
if not csv_list or not isinstance(csv_list, list):
raise ValueError('csv_list argument must be a list generated from a csv table.')
elif not isinstance(csv_list[0], list):
raise ValueError('csv_list argument must be a list generated from a csv table.')
elif not key_column in csv_list[0]:
raise ValueError('key_column value "%s" is not a key in the csv table headers.' % key_column)
# determine index of key value
key_index = csv_list[0].index(key_column)
# construct default map
table_map = {}
# iterate over items in csv list
for i in range(len(csv_list)):
if i:
row = csv_list[i]
if row[key_index]:
if not isinstance(row, list):
raise ValueError('csv_list argument must be a list generated from a csv table.')
table_map[row[key_index]] = row
return table_map
if __name__ == '__main__':
code_map = compile_map()
print(code_map.keys()) | {
"repo_name": "collectiveacuity/labPack",
"path": "labpack/datasets/iso_3166_2_US.py",
"copies": "1",
"size": "2157",
"license": "mit",
"hash": -2093339243405998000,
"line_mean": 28.9722222222,
"line_max": 101,
"alpha_frac": 0.6142790913,
"autogenerated": false,
"ratio": 3.7447916666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9728886404753343,
"avg_score": 0.026036870642664536,
"num_lines": 72
} |
''' a package of methods for compiling information about ISO 3166 country codes '''
__author__ = 'rcj1492'
__created__ = '2017.10'
__license__ = 'MIT'
# TODO scrape an online table to get latest csv values
def update_csv(csv_url=''):
pass
def compile_list(csv_file='datasets/iso_3166.csv'):
from os import path
import csv
from labpack import __module__
from importlib.util import find_spec
# construct file path
module_path = find_spec(__module__).submodule_search_locations[0]
csv_path = path.join(module_path, csv_file)
# construct placeholder list
rows = []
# retrieve model from file
if not path.isfile(csv_path):
raise Exception('%s is not a valid file path.' % csv_path)
with open(csv_path, 'rt', errors='ignore') as f:
csv_reader = csv.reader(f)
for row in csv_reader:
item = []
for column in row:
item.append(column)
rows.append(item)
return rows
def compile_map(key_column='Alpha-3 code', csv_list=None):
# determine csv list
if not csv_list:
csv_list = compile_list()
# validate inputs
if not csv_list or not isinstance(csv_list, list):
raise ValueError('csv_list argument must be a list generated from a csv table.')
elif not isinstance(csv_list[0], list):
raise ValueError('csv_list argument must be a list generated from a csv table.')
elif not key_column in csv_list[0]:
raise ValueError('key_column value "%s" is not a key in the csv table headers.' % key_column)
# determine index of key value
key_index = csv_list[0].index(key_column)
# construct default map
table_map = {}
# iterate over items in csv list
for i in range(len(csv_list)):
if i:
row = csv_list[i]
if row[key_index]:
if not isinstance(row, list):
raise ValueError('csv_list argument must be a list generated from a csv table.')
table_map[row[key_index]] = row
return table_map
if __name__ == '__main__':
code_map = compile_map()
print(code_map.keys()) | {
"repo_name": "collectiveacuity/labPack",
"path": "labpack/datasets/iso_3166.py",
"copies": "1",
"size": "2157",
"license": "mit",
"hash": 6629656093320428000,
"line_mean": 28.9722222222,
"line_max": 101,
"alpha_frac": 0.6152063051,
"autogenerated": false,
"ratio": 3.770979020979021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48861853260790206,
"avg_score": null,
"num_lines": null
} |
''' a package of methods for managing ssl authentication '''
__author__ = 'rcj1492'
__created__ = '2018.02'
__license__ = 'MIT'
def generate_keystore(key_alias, key_folder='./', root_cert='', truststore='', password='', organization='', organization_unit='', locality='', country='', key_size=2048, verbose=True, overwrite=False):
''' a function to generate a keystore and cert files for self-signed ssl authentication '''
title = 'generate_keystore'
# import dependencies
import os
from subprocess import call, STDOUT, Popen, PIPE
DEVNULL = open(os.devnull, 'w')
# define system call
def _call(sys_command, ignore='', prompt_input='', title=''):
if title and verbose:
print('%s ... ' % title, end='', flush=True)
p = Popen(sys_command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
if prompt_input:
stdout, stderr = p.communicate(str('%s\n' % prompt_input).encode('utf-8'))
if p.returncode != 0:
cmd_err = p.stdout.read().decode()
if not cmd_err:
pass
elif ignore:
if not cmd_err.find(ignore) > -1:
raise Exception(cmd_err)
else:
if title and verbose:
print('ERROR.')
raise Exception(cmd_err)
if title and verbose:
print('done.')
# verify libraries
try:
call('openssl version', shell=True, stdout=DEVNULL)
except:
raise Exception('%s requires openssl library. try: sudo apt-get install openssl' % title)
try:
_call('keytool -help', ignore='Key and Certificate Management Tool')
except:
raise Exception('%s requires keytool library. try: sudo apt-get install openjdk-8-jre' % title)
# validate input
if not isinstance(key_alias, str):
raise ValueError('%s(key_alias="...") must be a string datatype.' % title)
if not key_size in (2048, 4096):
raise ValueError('%s(key_size=%s must be either 2048 or 4096' % (title, key_size))
import re
input_fields = {
'organization': organization,
'organization_unit': organization_unit,
'locality': locality,
'country': country
}
for key, value in input_fields.items():
if re.findall('[^\w_\-\s]', value):
raise ValueError('%s(%s="%s") must contain only alphanumeric characters, _, - and spaces.' % (title, key, value))
# construct key folder
from os import path, makedirs, remove
if not path.exists(key_folder):
makedirs(key_folder)
elif not path.isdir(key_folder):
raise ValueError('%s(key_folder="%s") must be a directory.' % (title, key_folder))
# validate cert path
cert_path = path.join(key_folder, '%s.crt' % key_alias)
if path.exists(cert_path) and not overwrite:
raise ValueError('%s.crt already exists in %s. to overwrite, set overwrite=true' % (key_alias, key_folder))
# validate root path
from copy import deepcopy
root_cert_copy = deepcopy(root_cert)
if root_cert_copy:
if not path.exists(root_cert_copy):
root_cert_copy = path.join(key_folder, root_cert_copy)
if not path.exists(root_cert_copy):
raise ValueError('%s(root_cert="%s") is not a valid path.' % (title, root_cert_copy))
root_node, root_ext = path.splitext(root_cert_copy)
root_key = root_node + '.key'
if not root_ext in ('.crt', '.pem'):
raise ValueError('%s(root_cert="%s") must be a .crt or .pem file type.' % (title, root_cert_copy))
elif not path.exists(root_key):
key_path, key_name = path.split(root_key)
raise ValueError('%s(root_cert="%s") requires a matching private key %s' % (title, root_cert_copy, key_name))
root_path = root_cert_copy
key_path = root_path.replace('.crt', '.key')
else:
key_path = path.join(key_folder, 'root.key')
root_path = path.join(key_folder, 'root.crt')
if path.exists(root_path) and not overwrite:
raise ValueError('root.crt already exists in %s. to overwrite, set overwrite=true' % key_folder)
# validate truststore path
truststore_copy = deepcopy(truststore)
if truststore_copy:
if not path.exists(truststore_copy):
truststore_copy = path.join(key_folder, truststore_copy)
if not path.exists(truststore_copy):
raise ValueError('%s(truststore="%s") is not a valid path.' % (title, truststore_copy))
trust_node, trust_ext = path.splitext(truststore_copy)
if not trust_ext in ('.jks'):
raise ValueError('%s(truststore="%s") must be a .jks file type.' % (title, truststore_copy))
truststore_path = truststore_copy
trust_root, trust_node = path.split(truststore_path)
trust_alias, trust_ext = path.splitext(trust_node)
else:
truststore_path = path.join(key_folder, 'truststore.jks')
trust_alias = 'truststore'
if path.exists(truststore_path) and not overwrite:
raise ValueError('%s.jks already exists in %s. to overwrite, set overwrite=true' % (trust_alias, key_folder))
# format DNAME fields
if not organization:
organization = 'None'
if not organization_unit:
organization_unit = 'None'
if not locality:
locality = 'None'
if not country:
country = 'None'
# create root certificate
if not root_cert_copy:
subject_args = '/CN=root/OU=%s/O=%s/L=%s/C=%s' % (
organization_unit,
organization,
locality,
country
)
root_subject = '-subj %s' % subject_args.replace(' ', '\ ')
password_text = '-passout pass:%s' % password
sys_command = 'openssl req -newkey rsa:%s -x509 -nodes -keyout %s -out %s -days 36500 %s %s' % (key_size, key_path, root_path, root_subject, password_text)
_call(sys_command, ignore='writing new private key', title='Generating root certificate')
# generate server cert
server_dname = '-dname "CN=%s, OU=%s, O=%s, L=%s, C=%s"' % (
key_alias, organization_unit, organization, locality, country
)
keystore_path = path.join(key_folder, '%s.jks' % key_alias)
if path.exists(keystore_path) and not overwrite:
raise ValueError('%s.jks already exists in %s. to overwrite, set overwrite=true' % (key_alias, key_folder))
elif path.exists(keystore_path):
remove(keystore_path)
sys_command = 'keytool -genkey -keyalg RSA -alias %s -validity 36500 -keystore %s -storepass %s -keypass %s -keysize %s %s' % (key_alias, keystore_path, password, password, key_size, server_dname)
_call(sys_command, ignore='JKS keystore uses a proprietary format', title='Generating keystore for %s' % key_alias)
# convert cert to pkcs12
sys_command = 'keytool -importkeystore -srckeystore %s -destkeystore %s -deststoretype pkcs12 -storepass %s -keypass %s' % (keystore_path, keystore_path, password, password)
_call(sys_command, prompt_input=password, title='Converting keystore to pkcs12 standard')
remove('%s.old' % keystore_path)
# generate certificate request
request_path = path.join(key_folder, '%s.csr' % key_alias)
sys_command = 'keytool -certreq -alias %s -file %s -keystore %s -storepass %s -keypass %s %s' % (
key_alias, request_path, keystore_path, password, password, server_dname
)
_call(sys_command, title='Generating certificate signing request for %s'% key_alias)
# sign server cert with root cert
cert_path = path.join(key_folder, '%s.crt' % key_alias)
sys_command = 'openssl x509 -req -CA %s -CAkey %s -in %s -out %s -days 36500 -CAcreateserial -passin pass:%s' % (root_path, key_path, request_path, cert_path, password)
_call(sys_command, ignore='Getting CA Private Key', title='Signing certificate for %s with root certificate' % key_alias)
# add root cert to server keystore
root_root, root_node = path.split(root_path)
root_alias, root_ext = path.splitext(root_node)
sys_command = 'keytool -importcert -keystore %s -alias %s -file %s -noprompt -keypass %s -storepass %s' % (keystore_path, root_alias, root_path, password, password)
_call(sys_command, ignore='Certificate was added to keystore', title='Adding root certificate to keystore for %s' % key_alias)
# add server cert to server keystore
sys_command = 'keytool -importcert -keystore %s -alias %s -file %s -noprompt -keypass %s -storepass %s' % (keystore_path, key_alias, cert_path, password, password)
_call(sys_command, ignore='Certificate reply was installed in keystore', title='Adding certificate for %s to keystore for %s' % (key_alias, key_alias))
# add root certificate to truststore
if not truststore_copy:
if path.exists(truststore_path):
remove(truststore_path)
sys_command = 'keytool -importcert -keystore %s -alias %s -file %s -noprompt -keypass %s -storepass %s' % (truststore_path, root_alias, root_path, password, password)
_call(sys_command, ignore='Certificate was added to keystore', title='Generating truststore for root certificate')
# add server cert to truststore
sys_command = 'keytool -importcert -keystore %s -alias %s -file %s -noprompt -keypass %s -storepass %s' % (truststore_path, key_alias, cert_path, password, password)
_call(sys_command, ignore='Certificate was added to keystore', title='Adding certificate for %s to truststore' % key_alias)
# remove .srl files
from os import listdir
for file_name in listdir('./'):
if file_name == '.srl':
remove(file_name)
for file_name in listdir(key_folder):
file_alias, file_ext = path.splitext(file_name)
if file_ext == '.srl':
remove(path.join(key_folder, file_name))
return True
if __name__ == '__main__':
generate_keystore(
key_alias='123.456.789.0',
key_folder='../../data/keys',
root_cert='',
truststore='',
password='cassandra',
organization='Collective Acuity',
organization_unit='Cassandra Cluster',
locality='',
country='US'
)
generate_keystore(
key_alias='123.456.789.1',
key_folder='../../data/keys',
root_cert='root.crt',
truststore='truststore.jks',
password='cassandra',
organization='Collective Acuity',
organization_unit='Cassandra Cluster',
locality='',
country='US'
) | {
"repo_name": "collectiveacuity/labPack",
"path": "labpack/authentication/ssl.py",
"copies": "1",
"size": "10536",
"license": "mit",
"hash": -1461070929057804800,
"line_mean": 45.6238938053,
"line_max": 202,
"alpha_frac": 0.6303151101,
"autogenerated": false,
"ratio": 3.5995900239152716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47299051340152715,
"avg_score": null,
"num_lines": null
} |
''' a package of methods to compile search filters '''
__author__ = 'rcj1492'
__created__ = '2017.07'
__license__ = 'MIT'
def positional_filter(positional_filters, title=''):
'''
a method to construct a conditional filter function to test positional arguments
:param positional_filters: dictionary or list of dictionaries with query criteria
:param title: string with name of function to use instead
:return: callable for filter_function
NOTE: query criteria architecture
each item in the path filters argument must be a dictionary
which is composed of integer-value key names that represent the
index value of the positional segment to test and key values
with the dictionary of conditional operators used to test the
string value in the indexed field of the record.
eg. positional_filters = [ { 0: { 'must_contain': [ '^lab' ] } } ]
this example filter looks at the first segment of each key string
in the collection for a string value which starts with the
characters 'lab'. as a result, it will match both the following:
lab/unittests/1473719695.2165067.json
'laboratory20160912.json'
NOTE: the filter function uses a query filters list structure to represent
the disjunctive normal form of a logical expression. a record is
added to the results list if any query criteria dictionary in the
list evaluates to true. within each query criteria dictionary, all
declared conditional operators must evaluate to true.
in this way, the positional_filters represents a boolean OR operator and
each criteria dictionary inside the list represents a boolean AND
operator between all keys in the dictionary.
each query criteria uses the architecture of query declaration in
the jsonModel.query method
NOTE: function function will lazy load a dictionary input
positional_filters:
[ { 0: { conditional operators }, 1: { conditional_operators }, ... } ]
conditional operators:
"byte_data": false,
"discrete_values": [ "" ],
"excluded_values": [ "" ],
'equal_to': '',
"greater_than": "",
"less_than": "",
"max_length": 0,
"max_value": "",
"min_length": 0,
"min_value": "",
"must_contain": [ "" ],
"must_not_contain": [ "" ],
"contains_either": [ "" ]
'''
# define help text
if not title:
title = 'positional_filter'
filter_arg = '%s(positional_filters=[...])' % title
# construct path_filter model
filter_schema = {
'schema': {
'byte_data': False,
'discrete_values': [ '' ],
'excluded_values': [ '' ],
'equal_to': '',
'greater_than': '',
'less_than': '',
'max_length': 0,
'max_value': '',
'min_length': 0,
'min_value': '',
'must_contain': [ '' ],
'must_not_contain': [ '' ],
'contains_either': [ '' ]
},
'components': {
'.discrete_values': {
'required_field': False
},
'.excluded_values': {
'required_field': False
},
'.must_contain': {
'required_field': False
},
'.must_not_contain': {
'required_field': False
},
'.contains_either': {
'required_field': False
}
}
}
from jsonmodel.validators import jsonModel
filter_model = jsonModel(filter_schema)
# lazy load path dictionary
if isinstance(positional_filters, dict):
positional_filters = [ positional_filters ]
# validate input
if not isinstance(positional_filters, list):
raise TypeError('%s must be a list.' % filter_arg)
for i in range(len(positional_filters)):
if not isinstance(positional_filters[i], dict):
raise TypeError('%s item %s must be a dictionary.' % (filter_arg, i))
for key, value in positional_filters[i].items():
_key_name = '%s : {...}' % key
if not isinstance(key, int):
raise TypeError('%s key name must be an int.' % filter_arg.replace('...', _key_name))
elif not isinstance(value, dict):
raise TypeError('%s key value must be a dictionary' % filter_arg.replace('...', _key_name))
filter_model.validate(value)
# construct segment value model
segment_schema = { 'schema': { 'segment_value': 'string' } }
segment_model = jsonModel(segment_schema)
# construct filter function
def filter_function(*args):
max_index = len(args) - 1
for filter in positional_filters:
criteria_match = True
for key, value in filter.items():
if key > max_index:
criteria_match = False
break
segment_criteria = { '.segment_value': value }
segment_data = { 'segment_value': args[key] }
if not segment_model.query(segment_criteria, segment_data):
criteria_match = False
break
if criteria_match:
return True
return False
return filter_function | {
"repo_name": "collectiveacuity/labPack",
"path": "labpack/compilers/filters.py",
"copies": "1",
"size": "5516",
"license": "mit",
"hash": 439457056498668740,
"line_mean": 36.277027027,
"line_max": 107,
"alpha_frac": 0.5598259608,
"autogenerated": false,
"ratio": 4.647009267059815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5706835227859816,
"avg_score": null,
"num_lines": null
} |
''' a package of methods to generate the differences between two data architectures '''
__author__ = 'rcj1492'
__created__ = '2015.08'
__license__ = 'MIT'
def compare_records(new_record, old_record):
'''
a method to generate the differences between two data architectures
:param new_record: set, list or dictionary with new details of an item
:param old_record: set, list or dictionary with old details of an item
:return: list with dictionary of changes between old and new records
[ {
'path': [ 'dict2', 'dict', 'list2', 4, 'key' ],
'action': 'UPDATE',
'value': 'newValue'
} ]
'''
if new_record.__class__ != old_record.__class__:
raise TypeError('Datatype of new and old data must match.')
from copy import deepcopy
new_map = deepcopy(new_record)
old_map = deepcopy(old_record)
if isinstance(new_map, dict):
return _compare_dict(new_map, old_map, [], [])
elif isinstance(new_map, list):
return _compare_list(new_map, old_map, [], [])
elif isinstance(new_map, set):
return _compare_set(new_map, old_map, [], [])
else:
raise TypeError('Records must be either sets, lists or dictionaries.')
def _compare_dict(new_dict, old_dict, change_list=None, root=None):
'''
a method for recursively listing changes made to a dictionary
:param new_dict: dictionary with new key-value pairs
:param old_dict: dictionary with old key-value pairs
:param change_list: list of differences between old and new
:patam root: string with record of path to the root of the main object
:return: list of differences between old and new
'''
from copy import deepcopy
new_keys = set(new_dict.keys())
old_keys = set(old_dict.keys())
missing_keys = old_keys - new_keys
extra_keys = new_keys - old_keys
same_keys = new_keys.intersection(old_keys)
for key in missing_keys:
new_path = deepcopy(root)
new_path.append(key)
change_list.append({'action': 'DELETE', 'value': None, 'path': new_path})
for key in extra_keys:
for k, v in new_dict.items():
if key == k:
new_path = deepcopy(root)
new_path.append(key)
change_list.append({'action': 'ADD', 'value': v, 'path': new_path})
for key in same_keys:
new_path = deepcopy(root)
new_path.append(key)
if new_dict[key].__class__ != old_dict[key].__class__:
change_list.append({'action': 'UPDATE', 'value': new_dict[key], 'path': new_path})
elif isinstance(new_dict[key], dict):
_compare_dict(new_dict[key], old_dict[key], change_list, new_path)
elif isinstance(new_dict[key], list):
_compare_list(new_dict[key], old_dict[key], change_list, new_path)
elif isinstance(new_dict[key], set):
_compare_set(new_dict[key], old_dict[key], change_list, new_path)
elif new_dict[key] != old_dict[key]:
change_list.append({'action': 'UPDATE', 'value': new_dict[key], 'path': new_path})
return change_list
def _compare_list(new_list, old_list, change_list=None, root=None):
'''
a method for recursively listing changes made to a list
:param new_list: list with new value
:param old_list: list with old values
:param change_list: list of differences between old and new
:param root: string with record of path to the root of the main object
:return: list of differences between old and new
'''
from copy import deepcopy
if len(old_list) > len(new_list):
same_len = len(new_list)
for i in reversed(range(len(new_list), len(old_list))):
new_path = deepcopy(root)
new_path.append(i)
change_list.append({'action': 'REMOVE', 'value': None, 'path': new_path})
elif len(new_list) > len(old_list):
same_len = len(old_list)
append_list = []
path = deepcopy(root)
for i in range(len(old_list), len(new_list)):
append_list.append(new_list[i])
change_list.append({'action': 'APPEND', 'value': append_list, 'path': path})
else:
same_len = len(new_list)
for i in range(0, same_len):
new_path = deepcopy(root)
new_path.append(i)
if new_list[i].__class__ != old_list[i].__class__:
change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path})
elif isinstance(new_list[i], dict):
_compare_dict(new_list[i], old_list[i], change_list, new_path)
elif isinstance(new_list[i], list):
_compare_list(new_list[i], old_list[i], change_list, new_path)
elif isinstance(new_list[i], set):
_compare_set(new_list[i], old_list[i], change_list, new_path)
elif new_list[i] != old_list[i]:
change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path})
return change_list
def _compare_set(new_set, old_set, change_list, root):
'''
a method for list changes made to a set
:param new_set: set with new values
:param old_set: set with old values
:param change_list: list of differences between old and new
:patam root: string with record of path to the root of the main object
:return: list of differences between old and new
'''
from copy import deepcopy
path = deepcopy(root)
missing_items = old_set - new_set
extra_items = new_set - old_set
for item in missing_items:
change_list.append({'action': 'REMOVE', 'key': None, 'value': item, 'path': path})
for item in extra_items:
change_list.append({'action': 'ADD', 'key': None, 'value': item, 'path': path})
return change_list
if __name__ == '__main__':
newRecord = {
'active': True,
'id': 'd53iedBwKNcFCJXLEAWHCfCT3zGLCu93rxTG',
'dT': 1440184621.607344,
'score': 400,
'dict1': { 'key': 'value' },
'list1': [ 'item' ],
'dict2': {
'key1': 'string',
'key2': 2.2,
'key3': 2,
'key4': True,
'dict': {
'key': 'value',
'list1': [ 'item' ],
'list2': [ 'item', 2, 2.2, True, { 'key': 'newValue' } ]
} },
'list2': [ 'item', 2, 2.2, True, { 'key': 'value', 'list': [ 2, 2.2, True, 'item' ] } ]
}
oldRecord = {
'active': True,
'id': 'd53iedBwKNcFCJXLEAWHCfCT3zGLCu93rxTG',
'dT': 1440184621.607344,
'score': 400,
'dict1': { 'key': 'value' },
'list1': [ 'item' ],
'dict2': {
'key1': 'string',
'key2': 2.2,
'key3': 2,
'key4': True,
'dict': {
'key': 'value',
'list1': [ 'item' ],
'list2': [ 'item', 2, 2.2, True, { 'key': 'oldValue' } ]
} },
'list2': [ 'item', 2, 2.2, True, { 'key': 'value', 'list': [ 2, 2.2, True, 'item' ] } ]
}
test_comparison = _compare_dict(newRecord, oldRecord, [], [])
assert test_comparison[0]['path'][4] == 'key'
print(test_comparison)
test_comparison = compare_records(newRecord, oldRecord)
print(test_comparison) | {
"repo_name": "collectiveacuity/labPack",
"path": "labpack/parsing/comparison.py",
"copies": "1",
"size": "7294",
"license": "mit",
"hash": -2678590979965319000,
"line_mean": 37.8031914894,
"line_max": 95,
"alpha_frac": 0.5681381958,
"autogenerated": false,
"ratio": 3.438943894389439,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45070820901894393,
"avg_score": null,
"num_lines": null
} |
''' a package of methods to handle nginx configurations '''
__author__ = 'rcj1492'
__created__ = '2017.06'
__license__ = 'MIT'
def compile_nginx(server_list, http_port=80, ssl_port=None, ssl_gateway=''):
'''
a method to compile nginx configurations for forwarding to containers
:param server_list: list of dictionaries with domain, port and default keys
:param http_port: integer with port of incoming http requests
:param ssl_port: integer with port of incoming ssl requests
:param ssl_gateway: string with name of ssl gateway connecting nginx to internet
:return: string with nginx config text
'''
# http://nginx.org/en/docs/http/server_names.html
# https://www.linode.com/docs/websites/nginx/how-to-configure-nginx/
# validate inputs
if ssl_port:
if not isinstance(ssl_port, int):
raise ValueError('compile_nginx(ssl_port=%s) must be an integer.' % str(ssl_port))
if not ssl_gateway:
raise ValueError('compile_nginx(ssl_port=%s) requires an ssl_gateway argument.' % str(ssl_port))
supported_gateways = ['elb', 'certbot']
if ssl_gateway and not ssl_gateway in supported_gateways:
from labpack.parsing.grammar import join_words
gateway_text = join_words(supported_gateways, operator='disjunction')
raise ValueError('compile_nginx(ssl_gateway=%s) must be either %s.' % (ssl_gateway, gateway_text))
# determine http port destinations
import re
domain_pattern = re.compile('^[^\.]*?\.[^\.]*?$')
domain_list = []
default_server = {}
for server in server_list:
if domain_pattern.match(server['domain']):
domain_list.append(server)
if 'default' in server.keys():
default_server = server
# determine localhost port
default_port = ''
default_domain = ''
if default_server:
default_port = default_server['port']
default_domain = default_server['domain']
else:
for server in domain_list:
default_port = server['port']
default_domain = server['domain']
break
# construct default nginx insert
nginx_insert = ''
proxy_headers = ''
ssl_map = {}
# determine proxy headers and health check localhost address for elb gateway
if ssl_gateway == 'elb':
proxy_headers = 'proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header Host $http_host; '
if default_port:
nginx_insert += 'server { listen %s; server_name localhost; location / { proxy_pass http://localhost:%s; } } ' % (http_port, default_port)
# determine ssl fields for certbot gateway
elif ssl_gateway == 'certbot':
for server in domain_list:
ssl_insert = 'ssl_certificate "/etc/letsencrypt/live/%s/fullchain.pem";' % server['domain']
ssl_insert += ' ssl_certificate_key "/etc/letsencrypt/live/%s/privkey.pem";' % server['domain']
ssl_insert += ' ssl_session_cache shared:SSL:1m;'
ssl_insert += ' ssl_session_timeout 10m;'
ssl_insert += ' ssl_protocols TLSv1.2;'
ssl_insert += ' ssl_ciphers "ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS";'
ssl_insert += ' ssl_prefer_server_ciphers on;'
ssl_map[server['domain']] = ssl_insert
# construct ssl only properties
if ssl_port and ssl_gateway:
for server in domain_list:
nginx_insert += 'server { listen %s; server_name .%s; rewrite ^ https://$server_name$request_uri? permanent; } ' % (http_port, server['domain'])
if default_port:
nginx_insert += 'server { listen %s default_server; location / { proxy_pass http://localhost:%s; %s} } ' % (http_port, default_port, proxy_headers)
# add ssl for each server subdomain
ssl_listener = ''
for server in server_list:
server_details = ''
if ssl_gateway == 'elb':
server_details = 'server { listen %s; server_name %s; location / { proxy_pass http://localhost:%s; %s} }' % (ssl_port, server['domain'], server['port'], proxy_headers)
elif ssl_gateway == 'certbot':
for key, value in ssl_map.items():
domain_index = server['domain'].rfind(key)
if domain_index > 0:
if server['domain'][:domain_index] + key == server['domain']:
server_details = 'server { listen %s ssl; server_name %s; %s location / { proxy_pass http://localhost:%s; } }' % (ssl_port, server['domain'], value, server['port'])
elif domain_index == 0:
server_details = 'server { listen %s ssl; server_name %s; %s location / { proxy_pass http://localhost:%s; } }' % (ssl_port, server['domain'], value, server['port'])
if ssl_listener:
ssl_listener += ' '
ssl_listener += server_details
# add redirection for all other subdomains
for server in domain_list:
if ssl_gateway == 'elb':
ssl_listener += ' server { listen %s; server_name www.%s; return 301 https://%s; }' % (ssl_port, server['domain'], server['domain'])
if default_domain and ssl_gateway == 'elb':
ssl_listener += ' server { listen %s default_server; rewrite ^ https://%s permanent; }' % (ssl_port, default_domain)
nginx_insert += ssl_listener
# construct http properties
else:
open_listener = ''
for server in server_list:
server_details = 'server { listen %s; server_name %s; location / { proxy_pass http://localhost:%s; %s} }' % (http_port, server['domain'], server['port'], proxy_headers)
if open_listener:
open_listener += ' '
open_listener += server_details
for server in domain_list:
open_listener += ' server { listen %s; server_name www.%s; return 301 http://%s; }' % (http_port, server['domain'], server['domain'])
open_listener += ' server { listen %s; server_name *.%s; rewrite ^ http://%s permanent; }' % (http_port, server['domain'], server['domain'])
if default_port:
open_listener += ' server { listen %s default_server; location / { proxy_pass http://localhost:%s; %s} }' % (http_port, default_port, proxy_headers)
nginx_insert += open_listener
# construct nginx properties
nginx_text = 'user nginx; worker_processes auto; events { worker_connections 1024; } pid /var/run/nginx.pid; http { %s }' % nginx_insert
return nginx_text
def extract_servers(nginx_text):
server_list = []
# define regex
import re
server_regex = re.compile('server\s\{.*?(?=server\s\{|$)', re.S)
domain_regex = re.compile('server_name\s(.*?);.*?\slocation\s/\s\{\sproxy_pass\shttp://localhost:(\d+);', re.S)
default_open = re.compile('default_server;\slocation\s/\s\{\sproxy_pass\shttp://localhost:(\d+);', re.S)
default_ssl = re.compile('default_server;\srewrite\s\^\shttps://(.*?)\spermanent;', re.S)
# search for matches
server_search = server_regex.findall(nginx_text)
default_port = 0
default_domain = ''
if server_search:
for server_text in server_search:
domain_search = domain_regex.findall(server_text)
if domain_search:
for match in domain_search:
name = match[0]
port = int(match[1])
if name != 'localhost':
server_list.append({'domain': name, 'port': port})
open_search = default_open.findall(server_text)
if open_search:
default_port = int(open_search[0])
ssl_search = default_ssl.findall(server_text)
if ssl_search:
default_domain = ssl_search[0]
for server in server_list:
if server['port'] == default_port:
server['default'] = True
elif server['domain'] == default_domain:
server['default'] = True
return server_list
if __name__ == '__main__':
# test open domains
container_list = [ { 'domain': 'collectiveacuity.com', 'port': 5000 } ]
nginx_text = compile_nginx(container_list)
assert nginx_text.find('listen 80; server_name collectiveacuity.com;') > -1
new_list = extract_servers(nginx_text)
assert len(new_list) == len(container_list)
assert new_list[0]['default']
# test ssl with elb
nginx_text = compile_nginx(container_list, ssl_port=443, ssl_gateway='elb')
assert nginx_text.find('rewrite ^ https://$server_name$request_uri?') > -1
# test multiple domains on elb
container_list.append({'domain': 'api.collectiveacuity.com', 'port': 5001})
nginx_text = compile_nginx(container_list, ssl_port=443, ssl_gateway='elb')
nginx_readable = nginx_text.replace(';', ';\n').replace('}', '}\n').replace('{', '{\n')
print(nginx_readable)
new_list = extract_servers(nginx_text)
assert len(new_list) == len(container_list)
assert new_list[0]['default']
# test multiple domains on certbot
nginx_text = open('../../../cred/conf/nginx.conf').read()
nginx_text = compile_nginx(container_list, ssl_port=443, ssl_gateway='certbot')
nginx_readable = nginx_text.replace(';', ';\n').replace('}', '}\n').replace('{', '{\n')
print(nginx_readable)
new_list = extract_servers(nginx_text)
assert len(new_list) == len(container_list)
assert new_list[0]['default']
print(nginx_text)
print(new_list)
| {
"repo_name": "collectiveacuity/pocketLab",
"path": "pocketlab/methods/nginx.py",
"copies": "1",
"size": "10418",
"license": "mit",
"hash": 4503319686365080000,
"line_mean": 49.068627451,
"line_max": 712,
"alpha_frac": 0.6045306201,
"autogenerated": false,
"ratio": 3.6073407202216066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47118713403216067,
"avg_score": null,
"num_lines": null
} |
''' a package of methods to handle node file generation '''
__author__ = 'rcj1492'
__created__ = '2020.12'
__license__ = '©2020 Collective Acuity'
def generate_json(template_path, model_path, fields_map, replacement_map, printer):
from os import path
if not path.exists(template_path):
import json
from pocketlab.methods.config import retrieve_template, replace_text
template_text = retrieve_template(model_path)
template_json = json.loads(template_text)
for key, value in fields_map.items():
if key in template_json.keys():
template_json[key] = value
template_text = json.dumps(template_json, indent=2)
template_text = replace_text(template_text, replacement_map=replacement_map)
with open(template_path, 'wt', encoding='utf-8') as f:
f.write(template_text)
f.close()
printer(template_path)
def generate_package(package_path, package_text, dependency_text, replacement_map, printer):
from os import path
if not path.exists(package_path):
import json
from pocketlab.methods.config import replace_text
replacement_map['<global-dependencies>'] = ''
replacement_map['<local-dependencies>'] = ''
package_text = replace_text(package_text, replacement_map=replacement_map)
package_json = json.loads(package_text)
package_local = package_json['devDependencies']
package_global = json.loads(dependency_text)
for key in package_local.keys():
if replacement_map['<local-dependencies>']:
replacement_map['<local-dependencies>'] += ' '
replacement_map['<local-dependencies>'] += key
for key, value in package_global.items():
if replacement_map['<global-dependencies>']:
replacement_map['<global-dependencies>'] += ' '
replacement_map['<global-dependencies>'] += key
package_json['devDependencies'][key] = value
package_text = json.dumps(package_json, indent=2)
with open(package_path, 'wt', encoding='utf-8') as f:
f.write(package_text)
f.close()
printer(package_path) | {
"repo_name": "collectiveacuity/pocketLab",
"path": "pocketlab/methods/node.py",
"copies": "1",
"size": "2221",
"license": "mit",
"hash": -1820349281759321600,
"line_mean": 44.3265306122,
"line_max": 92,
"alpha_frac": 0.6301801802,
"autogenerated": false,
"ratio": 4.043715846994536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5173896027194536,
"avg_score": null,
"num_lines": null
} |
''' a package of methods to merge two or more json documents preserving order '''
__author__ = 'rcj1492'
__created__ = '2021.03'
__license__ = '©2021 Collective Acuity'
import json as json_lib
from collections import OrderedDict
def walk_data(target, source):
''' method to recursively walk parse tree and merge source into target '''
from copy import deepcopy
# skip if target and source are different datatypes
if target.__class__.__name__ != source.__class__.__name__:
pass
# handle maps
elif isinstance(target, dict):
count = 0
target = OrderedDict(target)
for k, v in source.items():
# insert fields not found in target
if k not in target.keys():
target[k] = v
for i, key in enumerate(list(target.keys())):
if i >= count and key != k:
target.move_to_end(key)
# else walk down maps and sequences
elif isinstance(v, list) or isinstance(v, dict):
target[k] = walk_data(target[k], v)
count += 1
# handle sequences
elif isinstance(target, list):
# walk down maps and sequences
for i in range(len(target)):
item = target[i]
source_copy = deepcopy(source[0])
if isinstance(item, dict) or isinstance(item, list):
if source:
target[i] = walk_data(item, source_copy)
return target
def merge_json(*sources, output=''):
'''
method for merging two or more json files
this method walks the parse tree of json data to merge the fields
found in subsequent sources into the data structure of the initial source.
any number of sources can be added to the source args, but only new fields
from subsequent sources will be added. to overwrite values in the initial
source instead, it suffices to simply reverse the order of the sources
PLEASE NOTE: since there is no way to uniquely identify list items between
two json documents, items are not added to existing lists.
PLEASE NOTE: however, lists are transversed in order to evaluate keys of
nested dictionaries using the first item of any subsequent list
as a model for the scope
PLEASE NOTE: this method makes no checks to ensure the file path of the
sources exist nor the folder path to any output
:param sources: variable-length argument list of strings with json text
:param output: [optional] string with path to save the combined json data to file
:return: OrderedDict (or list of OrderedDict) with merged data
'''
# import libraries
from copy import deepcopy
# define variables
combined = None
# open and combine sources
src = [open(json_path).read() for json_path in sources]
for text in src:
data = json_lib.loads(text)
if not isinstance(data, list) and not isinstance(data, dict):
raise ValueError('Source documents must be either lists or dictionaries.')
if not combined:
combined = deepcopy(data)
elif combined.__class__.__name__ != data.__class__.__name__:
raise ValueError('Source documents must be the same top-level datatype.')
else:
combined = walk_data(combined, data)
# save file and return combined data
if output:
with open(output, 'w') as f:
f.write(json_lib.dumps(combined, indent=2))
return combined
| {
"repo_name": "collectiveacuity/labPack",
"path": "labpack/compilers/json.py",
"copies": "1",
"size": "3592",
"license": "mit",
"hash": 1411518717079732200,
"line_mean": 36.40625,
"line_max": 86,
"alpha_frac": 0.6209969368,
"autogenerated": false,
"ratio": 4.627577319587629,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.574857425638763,
"avg_score": null,
"num_lines": null
} |
''' a package of methods to merge two or more yaml documents preserving order & comments '''
__author__ = 'rcj1492'
__created__ = '2021.03'
__license__ = '©2021 Collective Acuity'
'''
PLEASE NOTE: yaml package requires the ruamel.yaml module.
(all platforms) pip3 install ruamel.yaml
'''
try:
from ruamel.yaml import YAML, version_info as ruamel_yaml_version
from ruamel.yaml.comments import CommentedMap, CommentedSeq, Comment
from ruamel.yaml.tokens import CommentToken
except:
import sys
print('yaml package requires the ruamel.yaml module. try: pip3 install ruamel.yaml')
sys.exit(1)
# add get comments attribute to Commented Objects
def get_comments_map(self, key, default=None):
coms = []
comments = self.ca.items.get(key)
if comments is None:
return default
for token in comments:
if token is None:
continue
elif isinstance(token, list):
coms.extend(token)
else:
coms.append(token)
return coms
def get_comments_seq(self, idx, default=None):
coms = []
comments = self.ca.items.get(idx)
if comments is None:
return default
for token in comments:
if token is None:
continue
elif isinstance(token, list):
coms.extend(token)
else:
coms.append(token)
return coms
setattr(CommentedMap, 'get_comments', get_comments_map)
setattr(CommentedSeq, 'get_comments', get_comments_seq)
def walk_data(target, source):
''' method to recursively walk parse tree and merge source into target '''
from copy import deepcopy
# skip if target and source are different datatypes
if target.__class__.__name__ != source.__class__.__name__:
pass
# handle maps
elif isinstance(target, CommentedMap):
count = 0
for k, v in source.items():
# retrieve comments of field in source
comments = source.get_comments(k)
if comments:
comments = '\n'.join([ comment.value for comment in comments ])
# insert fields not found in target
if k not in target.keys():
target.insert(count, k, v, comments)
else:
# add comments from source when missing from target
if comments:
if not target.get_comments(k):
target.ca.items[k] = source.ca.items.get(k)
# walk down maps and sequences
if isinstance(v, CommentedMap) or isinstance(v, CommentedSeq):
walk_data(target[k], v)
count += 1
# handle sequences
elif isinstance(target, CommentedSeq):
for idx, item in enumerate(target):
# walk down maps and sequences
source_copy = deepcopy(source[0])
if isinstance(item, CommentedMap) or isinstance(item, CommentedSeq):
if source:
walk_data(item, source_copy)
# add comments to items found in both target and source
elif source:
if item in source:
comments = source.ca.items.get(source.index(item))
if comments:
if not target.get_comments(idx):
target.ca.items[idx] = comments
def merge_yaml_strings(*sources, output=''):
'''
method for merging two or more yaml strings
this method walks the parse tree of yaml data to merge the fields
(and comments) found in subsequent sources into the data structure of the
initial sources. any number of sources can be added to the source args, but
only new fields and new comments from subsequent sources will be added. to
overwrite the values in the initial source, it suffices to simply reverse
the order of the sources
PLEASE NOTE: since there is no way to uniquely identify list items between
two yaml documents, items are not added to existing lists.
PLEASE NOTE: however, lists are transversed in order to evaluate comments
and keys of nested dictionaries using the first item of any
subsequent list as a model for the scope
PLEASE NOTE: the way that ruamel.yaml keeps track of multi-line comments
can create odd results for comments which appear at the start
or the end of lists and dictionaries when new fields and comments
are added. it is best to restrict comments to the start of lists
and dictionaries.
:param sources: variable-length argument list of strings with yaml text
:param output: [optional] string with type of output: '' [default], io
:return: string with merged data [or StringIO object]
'''
# import libraries
import re
from copy import deepcopy
from ruamel.yaml.compat import StringIO
from ruamel.yaml.util import load_yaml_guess_indent
yml = YAML(typ='rt')
yml.default_flow_style = False
# define variables
combined = None
indent = 2
seq_indent = 0
combined_head = None
# open and combine sources
for text in sources:
result, indent, seq_indent = load_yaml_guess_indent(text)
data = yml.load(text)
try:
head = [comment.value for comment in data.ca.comment[1]]
except:
if ruamel_yaml_version <= (0, 17, 0):
raise
else:
raise NotImplementedError
if not isinstance(data, CommentedMap) and not isinstance(data, CommentedSeq):
raise ValueError('Source documents must be either lists or dictionaries.')
if not combined:
combined = deepcopy(data)
combined_head = head
elif combined.__class__.__name__ != data.__class__.__name__:
pass
# raise ValueError('Source documents must be the same top-level datatype or use rule="overwrite"')
else:
walk_data(combined, data)
# add comments to head of document
if head:
for comment in head:
if comment not in combined_head:
combined_head.append(comment)
head_comments = []
for comment in combined_head:
comment = re.sub('^# ?','',comment)
comment = re.sub('\n$','',comment)
head_comments.append(comment)
lines = '\n'.join(head_comments)
combined.yaml_set_start_comment(lines, indent=0)
# apply indentation and return string
stream = StringIO()
yml.indent(sequence=indent, offset=seq_indent)
yml.dump(combined, stream)
if output == 'io':
return stream
return stream.getvalue()
def merge_yaml(*sources, output=''):
'''
method for merging two or more yaml strings
this method walks the parse tree of yaml data to merge the fields
(and comments) found in subsequent sources into the data structure of the
initial sources. any number of sources can be added to the source args, but
only new fields and new comments from subsequent sources will be added. to
overwrite the values in the initial source, it suffices to simply reverse
the order of the sources
PLEASE NOTE: since there is no way to uniquely identify list items between
two yaml documents, items are not added to existing lists.
the overwrite rule also has no effect on items in lists
PLEASE NOTE: however, lists are transversed in order to evaluate comments
and keys of nested dictionaries using the first item of any
subsequent list as a model for the scope
PLEASE NOTE: the way that ruamel.yaml keeps track of multi-line comments
can create odd results for comments which appear at the start
or the end of lists and dictionaries when new fields and comments
are added. it is best to restrict comments to the start of lists
and dictionaries.
PLEASE NOTE: this method makes no checks to ensure the file path of the
sources exist nor the folder path to any output
:param sources: variable-length argument list of strings with path to yaml files
:param output: [optional] string with path to save the combined yaml data to file
:return: string with merged data
'''
# open and combine sources
src = [ open(yaml_path).read() for yaml_path in sources ]
stream = merge_yaml_strings(*src, output='io')
# save to file and return combined string
if output:
from shutil import copyfileobj
with open(output, 'w') as f:
stream.seek(0)
copyfileobj(stream, f)
f.close()
return stream.getvalue()
| {
"repo_name": "collectiveacuity/labPack",
"path": "labpack/compilers/yaml.py",
"copies": "1",
"size": "8945",
"license": "mit",
"hash": 5721571771957876000,
"line_mean": 37.8869565217,
"line_max": 110,
"alpha_frac": 0.6180679785,
"autogenerated": false,
"ratio": 4.579621095750128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004934807678729447,
"num_lines": 230
} |
# A palindromic number reads the same both ways.
# The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
# Find the largest palindrome made from the product of two 3-digit numbers.
from Problem import Problem
class LargestPalindromeProduct(Problem):
def __init__(self):
self.answer = 906609
def do(self):
return self.largest_palindrome_product_3_digit()
def largest_palindrome_product_3_digit(self):
products = self.sorted_products_3_digits()
for product in products:
if self.is_palindrome(product):
return product
# check if x is a palindrome
def is_palindrome(self, x):
digits = str(x)
l = len(digits)
for i in range(int(l/2) + 1):
if digits[i] != digits[l - (1 + i)]:
return False
return True
# return sorted list of the products of 3 digits number
# kind of brute force but oh wells it's fast enough
def sorted_products_3_digits(self):
sorted_products = []
for i in range(1000):
for j in range(1000):
sorted_products.append(i*j)
sorted_products.sort(reverse=True)
return sorted_products
| {
"repo_name": "hperreault/ProjectEuler",
"path": "004_LargestPalindromeProduct.py",
"copies": "1",
"size": "1244",
"license": "mit",
"hash": 4122042784353559600,
"line_mean": 31.7105263158,
"line_max": 88,
"alpha_frac": 0.6202735318,
"autogenerated": false,
"ratio": 3.7552870090634443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48755605408634445,
"avg_score": null,
"num_lines": null
} |
""" a panel that dispaly user information """
from subprocess import Popen
from PyQt5.QtWidgets import QWidget, QMessageBox, QFileDialog
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import Qt
from PyQt5 import QtCore, QtGui, QtWidgets
class UserInfo(QWidget):
def __init__(self, parent=None):
super(UserInfo, self).__init__(parent)
self.user_id = None
self.setupUi(self)
def setupUi(self, QWidget):
QWidget.setObjectName("QWidget")
QWidget.resize(576, 646)
self.verticalLayout = QtWidgets.QVBoxLayout(QWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.widget_3 = QtWidgets.QWidget(QWidget)
self.widget_3.setMinimumSize(QtCore.QSize(0, 100))
self.widget_3.setObjectName("widget_3")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget_3)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.widget_3)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.widget_4 = QtWidgets.QWidget(self.widget_3)
self.widget_4.setObjectName("widget_4")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget_4)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pushButton_2 = QtWidgets.QPushButton(self.widget_4)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout_2.addWidget(self.pushButton_2)
self.pushButton = QtWidgets.QPushButton(self.widget_4)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout_2.addWidget(self.pushButton)
self.horizontalLayout.addWidget(self.widget_4)
self.verticalLayout.addWidget(self.widget_3)
self.widget_7 = QtWidgets.QWidget(QWidget)
self.widget_7.setMinimumSize(QtCore.QSize(0, 50))
self.widget_7.setObjectName("widget_7")
self.formLayout = QtWidgets.QFormLayout(self.widget_7)
self.formLayout.setObjectName("formLayout")
self.label_2 = QtWidgets.QLabel(self.widget_7)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.lineEdit = QtWidgets.QLineEdit(self.widget_7)
self.lineEdit.setReadOnly(True)
self.lineEdit.setObjectName("lineEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEdit)
self.label_3 = QtWidgets.QLabel(self.widget_7)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.lineEdit_2 = QtWidgets.QLineEdit(self.widget_7)
self.lineEdit_2.setReadOnly(True)
self.lineEdit_2.setObjectName("lineEdit_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lineEdit_2)
self.verticalLayout.addWidget(self.widget_7)
self.widget = QtWidgets.QWidget(QWidget)
self.widget.setMinimumSize(QtCore.QSize(0, 50))
self.widget.setObjectName("widget")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.widget_2 = QtWidgets.QWidget(self.widget)
self.widget_2.setObjectName("widget_2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.widget_2)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.checkBox = QtWidgets.QCheckBox(self.widget_2)
self.checkBox.setObjectName("checkBox")
self.verticalLayout_3.addWidget(self.checkBox)
self.checkBox_2 = QtWidgets.QCheckBox(self.widget_2)
self.checkBox_2.setObjectName("checkBox_2")
self.verticalLayout_3.addWidget(self.checkBox_2)
self.horizontalLayout_3.addWidget(self.widget_2)
self.widget_6 = QtWidgets.QWidget(self.widget)
self.widget_6.setObjectName("widget_6")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget_6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.checkBox_3 = QtWidgets.QCheckBox(self.widget_6)
self.checkBox_3.setObjectName("checkBox_3")
self.verticalLayout_2.addWidget(self.checkBox_3)
self.checkBox_4 = QtWidgets.QCheckBox(self.widget_6)
self.checkBox_4.setObjectName("checkBox_4")
self.verticalLayout_2.addWidget(self.checkBox_4)
self.horizontalLayout_3.addWidget(self.widget_6)
self.widget_5 = QtWidgets.QWidget(self.widget)
self.widget_5.setObjectName("widget_5")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.widget_5)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.checkBox_5 = QtWidgets.QCheckBox(self.widget_5)
self.checkBox_5.setObjectName("checkBox_5")
self.verticalLayout_4.addWidget(self.checkBox_5)
self.checkBox_6 = QtWidgets.QCheckBox(self.widget_5)
self.checkBox_6.setObjectName("checkBox_6")
self.verticalLayout_4.addWidget(self.checkBox_6)
self.horizontalLayout_3.addWidget(self.widget_5)
self.verticalLayout.addWidget(self.widget)
self.tableWidget = QtWidgets.QTableWidget(QWidget)
self.tableWidget.setMinimumSize(QtCore.QSize(200, 200))
self.tableWidget.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.tableWidget.setTextElideMode(QtCore.Qt.ElideLeft)
self.tableWidget.setCornerButtonEnabled(True)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(5)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(4, item)
self.tableWidget.horizontalHeader().setCascadingSectionResizes(True)
self.tableWidget.horizontalHeader().setMinimumSectionSize(45)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.verticalHeader().setCascadingSectionResizes(True)
self.verticalLayout.addWidget(self.tableWidget)
self.label.setFixedSize(100, 100)
self.label.setScaledContents(True)
self.lineEdit.setMinimumWidth(400)
self.lineEdit_2.setMinimumWidth(400)
self.retranslateUi(QWidget)
QtCore.QMetaObject.connectSlotsByName(QWidget)
def retranslateUi(self, QWidget):
_translate = QtCore.QCoreApplication.translate
QWidget.setWindowTitle(_translate("QWidget", "QWidget"))
self.label.setText(_translate("QWidget", "TextLabel"))
self.pushButton_2.setText(_translate("QWidget", "View Resume"))
self.pushButton.setText(_translate("QWidget", "View Addition Info"))
self.label_2.setText(_translate("QWidget", "Email:"))
self.label_3.setText(_translate("QWidget", "Adress:"))
self.checkBox.setText(_translate("QWidget", "IOS"))
self.checkBox_2.setText(_translate("QWidget", "Java"))
self.checkBox_3.setText(_translate("QWidget", "Android"))
self.checkBox_4.setText(_translate("QWidget", "Python"))
self.checkBox_5.setText(_translate("QWidget", "DesktopApp"))
self.checkBox_6.setText(_translate("QWidget", "CPP"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("QWidget", "Project ID"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("QWidget", "Project Rating"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("QWidget", "Reviewer"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("QWidget", "Project Review"))
item = self.tableWidget.horizontalHeaderItem(4)
item.setText(_translate("QWidget", "Project Detail"))
# connect button
self.pushButton_2.clicked.connect(self.viewresume)
def setpic(self, user_id):
self.user_id = user_id
""" set pic to the path image"""
pixmap = QPixmap("../resources/pictures/" + self.user_id)
# when path not found
if (pixmap.isNull()):
pixmap = QPixmap("../img/unknown-user.png")
# scaled and set
pixmap.scaled(60, 60, Qt.KeepAspectRatio)
self.label.setPixmap(pixmap)
def viewresume(self):
download = QFileDialog.getSaveFileName(self.mainWindow, "Download Project", "", "pdf (*.pdf)")
try:
copyfile("../resources/resumes/" + self.user_id + ".pdf", download[0])
except TypeError:
QMessageBox.about(self, "Error", "This user not yet has a resume")
| {
"repo_name": "whuang001/cts",
"path": "gui/UserInfo.py",
"copies": "1",
"size": "9137",
"license": "mit",
"hash": -6596318253798461000,
"line_mean": 49.2032967033,
"line_max": 102,
"alpha_frac": 0.6919120061,
"autogenerated": false,
"ratio": 3.9332759362892813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005034411766138973,
"num_lines": 182
} |
""" a panflute filter to find citations in markdown,
with attributes and prefixes,
- extract the attributes, classes and prefix
- wrap the cite with a Span with class 'attribute-Cite',
- add the classes, attributes and prefix to the Span, and
- remove the attribute string and prefix
For example:
```
+@label {}.class-name a=1} xyz *@label2* @label3{ .b}
```
would be converted to this html:
```html
<p>
<span class="class-name attribute-Cite" data-a="1" data-prefix="+">
<span class="citation" data-cites="label">@label</span>
</span>
xyz
<em><span class="citation" data-cites="label2">@label2</span></em>
<span class="b attribute-Cite" data-prefix="{">
<span class="citation" data-cites="label3">@label3</span>
</span>
</p>
```
Optionally, this can be turned off by adding to the Document metadata
meta["ipub"]["pandoc"]["at_notation"] = False
"""
from panflute import Element, Doc, Cite, RawInline, Link # noqa: F401
import panflute as pf
from ipypublish.filters_pandoc.utils import find_attributes, get_pf_content_attr
from ipypublish.filters_pandoc.definitions import (
ATTRIBUTE_CITE_CLASS,
IPUB_META_ROUTE,
PREFIX_MAP,
)
def process_citations(element, doc):
# type: (Element, Doc) -> Element
if not doc.get_metadata(IPUB_META_ROUTE + ".at_notation", True):
return None
content_attr = get_pf_content_attr(element, pf.Cite)
if not content_attr:
return None
initial_content = getattr(element, content_attr)
if not initial_content:
return None
final_content = []
skip = 0
for subel in initial_content:
if skip:
skip -= 1
continue
if not isinstance(subel, pf.Cite):
final_content.append(subel)
continue
classes = []
attributes = {}
append = None
# check if the cite has a valid prefix, if so extract it
if (
isinstance(subel.prev, pf.Str)
and subel.prev.text
and (subel.prev.text[-1] in dict(PREFIX_MAP))
):
prefix = subel.prev.text[-1]
mapping = dict(dict(PREFIX_MAP)[prefix])
classes.extend(mapping["classes"])
attributes.update(mapping["attributes"])
# remove prefix from preceding string
string = final_content.pop()
if len(string.text) > 1:
final_content.append(pf.Str(string.text[:-1]))
# check if the cite has a preceding class/attribute container
attr_dict = find_attributes(subel, allow_space=True)
if attr_dict:
classes.extend(attr_dict["classes"])
attributes.update(attr_dict["attributes"])
skip = len(attr_dict["elements"])
append = attr_dict["append"]
if classes or attributes:
classes.append(ATTRIBUTE_CITE_CLASS)
final_content.append(
pf.Span(subel, classes=sorted(set(classes)), attributes=attributes)
)
else:
final_content.append(subel)
if append:
final_content.append(append)
setattr(element, content_attr, final_content)
return element
def prepare(doc):
# type: (Doc) -> None
pass
def finalize(doc):
# type: (Doc) -> None
pass
def main(doc=None, extract_formats=True):
# type: (Doc) -> None
"""if extract_formats then convert citations defined in
latex, rst or html formats to special Span elements
"""
return pf.run_filter(process_citations, prepare, finalize, doc=doc)
if __name__ == "__main__":
main()
| {
"repo_name": "chrisjsewell/ipypublish",
"path": "ipypublish/filters_pandoc/prepare_cites.py",
"copies": "1",
"size": "3600",
"license": "bsd-3-clause",
"hash": -7728732918395981000,
"line_mean": 25.6666666667,
"line_max": 83,
"alpha_frac": 0.6188888889,
"autogenerated": false,
"ratio": 3.6659877800407332,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9783978504462322,
"avg_score": 0.0001796328956822784,
"num_lines": 135
} |
""" a panflute filter to find raw elements
and convert them to format agnostic Span elements
"""
import re
from typing import Union # noqa: F401
from panflute import Element, Doc, Cite, RawInline, Link # noqa: F401
import panflute as pf
from ipypublish.filters_pandoc.definitions import (
ATTRIBUTE_CITE_CLASS,
PREFIX_MAP,
PREFIX_MAP_LATEX_R,
PREFIX_MAP_RST_R,
RST_KNOWN_ROLES,
RAWSPAN_CLASS,
RAWDIV_CLASS,
CONVERTED_CITE_CLASS,
CONVERTED_OTHER_CLASS,
CONVERTED_DIRECTIVE_CLASS,
)
from ipypublish.filters_pandoc.utils import get_panflute_containers, get_pf_content_attr
def create_cite_span(identifiers, rawformat, is_block, prefix="", alt=None):
"""create a cite element from an identifier """
citations = [pf.Citation(identifier) for identifier in identifiers]
pmapping = dict(dict(PREFIX_MAP)[prefix])
classes = list(pmapping["classes"])
classes += [RAWSPAN_CLASS, CONVERTED_CITE_CLASS, ATTRIBUTE_CITE_CLASS]
attributes = dict(pmapping["attributes"])
attributes["raw-format"] = rawformat
if alt is not None:
attributes["alt"] = str(alt)
cite = Cite(citations=citations)
span = pf.Span(cite, classes=classes, attributes=attributes)
if is_block:
return pf.Plain(span)
else:
return span
def process_internal_links(link, doc):
# type: (Link, Doc) -> Element
"""extract links that point to internal items, e.g. [text](#label)"""
if not isinstance(link, pf.Link):
return None
match = re.match(r"#(.+)$", link.url)
if not match:
return None
return create_cite_span(
[match.group(1)],
"markdown",
False,
prefix=dict(PREFIX_MAP_LATEX_R).get("cref"),
alt=pf.stringify(pf.Plain(*list(link.content))).strip(),
)
def process_html_cites(container, doc):
# type: (pf.Block, Doc) -> Element
"""extract raw html <cite data-cite="cite_key">text</cite>"""
# if not (isinstance(block, get_panflute_containers(pf.RawInline))
# or isinstance(block, get_panflute_containers(pf.RawBlock))):
# return None
content_attr = get_pf_content_attr(container, pf.RawInline)
if not content_attr:
content_attr = get_pf_content_attr(container, pf.RawBlock)
if not content_attr:
return None
initial_content = getattr(container, content_attr)
if not initial_content:
return None
new_content = []
skip = 0
for element in initial_content:
if skip > 0:
skip = skip - 1
continue
if not (
isinstance(element, (pf.RawInline, pf.RawBlock))
and element.format in ("html", "html4", "html5")
):
new_content.append(element)
continue
match = re.match(r"<cite\s*data-cite\s*=\"?([^>\"]*)\"?>", element.text)
if not match:
new_content.append(element)
continue
# look for the closing tag
span_content = []
closing = element.next
while closing:
if isinstance(closing, pf.RawInline) and closing.format in (
"html",
"html5",
):
endmatch = re.match(r"^\s*</cite>\s*$", closing.text)
if endmatch:
break
span_content.append(closing)
closing = closing.next
if not closing:
new_content.append(element)
continue
# TODO include original content
new_content.append(
create_cite_span([match.group(1)], "html", isinstance(element, pf.RawBlock))
)
skip = len(span_content) + 1
setattr(container, content_attr, new_content)
return container
def process_latex_raw(element, doc):
# type: (Union[pf.RawInline, pf.RawBlock], pf.Doc) -> pf.Element
"""extract all latex adhering to \\tag{content} or \\tag[options]{content}
to a Span element with class RAWSPAN_CLASS attributes:
::
attributes={"format": "latex",
"tag": tag, "content": content, "options": options}
- Cref, cref, ref, and cite will aslo have class CONVERTED_CITE_CLASS
- everything else will also have class CONVERTED_OTHER_CLASS
"""
if not (
isinstance(element, (pf.RawInline, pf.RawBlock))
and element.format in ("tex", "latex")
):
return None
return assess_latex(element.text, isinstance(element, pf.RawBlock))
def process_latex_str(block, doc):
# type: (pf.Block, Doc) -> Union[pf.Block,None]
"""see process_latex_raw
same but sometimes pandoc doesn't convert to a raw element
"""
# TODO why is pandoc sometimes converting latex tags to Str?
# >> echo "\cite{a}" | pandoc -f markdown -t json
# {"blocks":[{"t":"Para","c":[{"t":"RawInline","c":["tex","\\cite{a}"]}]}],"pandoc-api-version":[1,17,5,4],"meta":{}}
content_attr = get_pf_content_attr(block, pf.Str)
if not content_attr:
return None
initial_content = getattr(block, content_attr)
if not initial_content:
return None
new_content = []
for element in initial_content:
if not isinstance(element, pf.Str):
new_content.append(element)
continue
for string in re.split(
r"(\\[^\{\[]+\{[^\}]+\}|\\[^\{\[]+\[[^\]]*\]\{[^\}]+\})", element.text
):
if not string:
continue
new_element = assess_latex(string, False)
if new_element is None:
new_content.append(pf.Str(string))
else:
new_content.append(assess_latex(string, False))
setattr(block, content_attr, new_content)
return block
def assess_latex(text, is_block):
""" test if text is a latex command
``\\tag{content}`` or ``\\tag[options]{content}``
if so return a panflute.Span, with attributes:
- format: "latex"
- tag: <tag>
- options: <options>
- content: <content>
- original: <full text>
"""
# TODO these regexes do not match labels containing nested {} braces
# use recursive regexes (https://stackoverflow.com/a/26386070/5033292)
# with https://pypi.org/project/regex/
# find tags with no option, i.e \tag{label}
match_latex_noopts = re.match(r"^\s*\\([^\{\[]+)\{([^\}]+)\}\s*$", text)
if match_latex_noopts:
tag = match_latex_noopts.group(1)
content = match_latex_noopts.group(2)
if tag in dict(PREFIX_MAP_LATEX_R):
new_element = create_cite_span(
content.split(","),
"latex",
is_block,
prefix=dict(PREFIX_MAP_LATEX_R).get(tag, ""),
)
return new_element
span = pf.Span(
classes=[RAWSPAN_CLASS, CONVERTED_OTHER_CLASS],
attributes={
"format": "latex",
"tag": tag,
"content": content,
"original": text,
},
)
if is_block:
return pf.Plain(span)
else:
return span
# find tags with option, i.e \tag[options]{label}
match_latex_wopts = re.match(r"^\s*\\([^\{\[]+)\[([^\]]*)\]\{([^\}]+)\}\s*$", text)
if match_latex_wopts:
tag = match_latex_wopts.group(1)
options = match_latex_wopts.group(2)
content = match_latex_wopts.group(3)
span = pf.Span(
classes=[RAWSPAN_CLASS, CONVERTED_OTHER_CLASS],
attributes={
"format": "latex",
"tag": tag,
"content": content,
"options": options,
"original": text,
},
)
if is_block:
return pf.Plain(span)
else:
return span
return None
def process_rst_roles(block, doc):
# type: (pf.Block, Doc) -> Union[pf.Block,None]
"""extract rst adhering to ``:role:`label```, where role is a known
to a Cite element with class RAWSPAN_CLASS and CONVERTED_CITE_CLASS
and attributes:
::
attributes={"format": "rst",
"role": tag, "content": content}
"""
# "a :ref:`label` b" is converted to:
# (Str(a) Space Str(:ref:) Code(label) Space Str(b))
# if not (isinstance(block, get_panflute_containers(pf.Str))):
# return None
content_attr = get_pf_content_attr(block, pf.Str)
if not content_attr:
return None
initial_content = getattr(block, content_attr)
if not initial_content:
return None
# match_rst_role = re.match(
# "^\\s*\\:([a-z]+)\\:\\`([^\\`]+)\\`$", element.text)
new_content = []
skip_next = False
for element in initial_content:
if skip_next:
skip_next = False
continue
if not (isinstance(element, pf.Str) and isinstance(element.next, pf.Code)):
new_content.append(element)
continue
if not (
len(element.text) > 2
and element.text.startswith(":")
and element.text.endswith(":")
):
new_content.append(element)
continue
role = element.text[1:-1]
content = element.next.text
if role in dict(PREFIX_MAP_RST_R):
new_element = create_cite_span(
content.split(","),
"rst",
False,
prefix=dict(PREFIX_MAP_RST_R).get(role, ""),
)
new_content.append(new_element)
skip_next = True
elif role in RST_KNOWN_ROLES:
new_element = pf.Span(
classes=[RAWSPAN_CLASS, CONVERTED_OTHER_CLASS],
attributes={
"format": "rst",
"role": role,
"content": content,
"original": "{0}`{1}`".format(element.text, element.next.text),
},
)
new_content.append(new_element)
skip_next = True
else:
new_content.append(element)
# if len(new_content) != len(block.content):
# block.content = new_content
# return block
setattr(block, content_attr, new_content)
return block
def gather_processors(element, doc):
""" we gather the processors,
so that we don't have to do multiple passes
"""
# apply processors that change one elements
new_element = process_internal_links(element, doc)
if new_element is not None:
return new_element
new_element = process_latex_raw(element, doc)
if new_element is not None:
return new_element
# apply processors that change multiple inline elements in a block
if isinstance(element, get_panflute_containers(pf.Inline)) or isinstance(
pf.Table, pf.DefinitionItem
):
new_element = process_html_cites(element, doc)
if new_element is not None:
element = new_element
new_element = process_latex_str(element, doc)
if new_element is not None:
element = new_element
new_element = process_rst_roles(element, doc)
if new_element is not None:
element = new_element
# apply processors that change multiple block elements
if isinstance(element, get_panflute_containers(pf.Block)):
new_element = process_html_cites(element, doc)
if new_element is not None:
element = new_element
return element
def wrap_rst_directives(doc):
"""search for rst directives and wrap them in divs
with top line starting ``Str(..)Space()Str(name::)``, above a CodeBlock,
and rst labels of the form ``Str(..)Space()Str(_name:)``
"""
final_blocks = []
skip_next = False
for block in doc.content:
if skip_next:
skip_next = False
continue
if not isinstance(block, pf.Para):
final_blocks.append(block)
continue
if len(block.content) < 3:
final_blocks.append(block)
continue
if (
isinstance(block.content[0], pf.Str)
and block.content[0].text == ".."
and isinstance(block.content[1], pf.Space)
and isinstance(block.content[2], pf.Str)
):
if (
len(block.content) == 3
and block.content[2].text.startswith("_")
and block.content[2].text.endswith(":")
):
# the block is an rst label
new_block = pf.Div(
block,
classes=[RAWDIV_CLASS, CONVERTED_OTHER_CLASS],
attributes={"format": "rst"},
)
final_blocks.append(new_block)
continue
if block.content[2].text.endswith("::") and isinstance(
block.next, pf.CodeBlock
):
# the block is a directive with body content
# TODO at present we allow any directive name
# the block may contain option directives, e.g. :width:
skip_next = True
inline_arg = ""
if len(block.content) > 3:
inline_content = []
for el in block.content[3:]:
if isinstance(el, pf.SoftBreak):
break
inline_content.append(el)
if inline_content:
inline_arg = (
pf.stringify(pf.Para(*inline_content))
.replace("\n", "")
.strip()
)
new_block = pf.Div(
block,
*pf.convert_text(block.next.text),
classes=[RAWDIV_CLASS, CONVERTED_DIRECTIVE_CLASS],
attributes={
"format": "rst",
"directive": block.content[2].text[:-2],
"inline": inline_arg,
"has_body": True,
}
)
final_blocks.append(new_block)
continue
if block.content[2].text.endswith("::"):
# the block is a directive without body content
# TODO at present we allow any directive name
# the block may contain option directives, e.g. :width:
inline_arg = ""
if len(block.content) > 3:
inline_content = []
for el in block.content[3:]:
if isinstance(el, pf.SoftBreak):
break
inline_content.append(el)
if inline_content:
inline_arg = (
pf.stringify(pf.Para(*inline_content))
.replace("\n", "")
.strip()
)
new_block = pf.Div(
block,
classes=[RAWDIV_CLASS, CONVERTED_DIRECTIVE_CLASS],
attributes={
"format": "rst",
"directive": block.content[2].text[:-2],
"inline": inline_arg,
"has_body": False,
},
)
final_blocks.append(new_block)
continue
final_blocks.append(block)
doc.content = final_blocks
def prepare(doc):
# type: (Doc) -> None
wrap_rst_directives(doc)
def finalize(doc):
# type: (Doc) -> None
pass
def main(doc=None, extract_formats=True):
# type: (Doc, bool) -> None
"""if extract_formats then convert citations defined in
latex, rst or html formats to special Span elements
"""
return pf.run_filter(gather_processors, prepare, finalize, doc=doc)
if __name__ == "__main__":
main()
| {
"repo_name": "chrisjsewell/ipypublish",
"path": "ipypublish/filters_pandoc/prepare_raw.py",
"copies": "1",
"size": "16093",
"license": "bsd-3-clause",
"hash": -2014222299903282000,
"line_mean": 30.0675675676,
"line_max": 121,
"alpha_frac": 0.530230535,
"autogenerated": false,
"ratio": 4.043467336683417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5073697871683417,
"avg_score": null,
"num_lines": null
} |
""" a panflute filter to format Cite elements
The :py:mod:`ipypublish.filters_pandoc.prepare_cites` filter should be run
first to access the functionality below:
"""
from panflute import Element, Doc, Span, Cite # noqa: F401
import panflute as pf
from ipypublish.filters_pandoc.definitions import (
ATTRIBUTE_CITE_CLASS,
CONVERTED_CITE_CLASS,
IPUB_META_ROUTE,
CITE_HTML_NAMES,
)
from ipypublish.filters_pandoc.html_bib import read_bibliography, process_bib_entry
def format_cites(cite, doc):
# type: (Cite, Doc) -> Element
"""
originally adapted from:
`pandoc-xnos <https://github.com/tomduck/pandoc-xnos/>`_
"""
if not isinstance(cite, pf.Cite):
return None
# default tags for latex and rst
cite_tag = doc.get_metadata(IPUB_META_ROUTE + ".reftag", "cite")
cite_role = "cite"
html_capitalize = False
# check is the Cite has a surrounding Span to supply attributed
span = None
if isinstance(cite.parent, pf.Span) and ATTRIBUTE_CITE_CLASS in cite.parent.classes:
span = cite.parent
cite_tag = span.attributes.get("latex", cite_tag)
cite_role = span.attributes.get("rst", cite_role)
html_capitalize = "capital" in span.classes
if cite_role == "numref" and (
not doc.get_metadata(IPUB_META_ROUTE + ".use_numref", False)
):
cite_role = "ref"
if doc.format in ("latex", "tex"):
if cite_tag in ["cite", "cref", "Cref"] or len(cite.citations) == 1:
# multiple labels are allowed
return pf.RawInline(
"\\{0}{{{1}}}".format(
cite_tag, ",".join([c.id for c in cite.citations])
),
format="tex",
)
else:
tex = ", ".join(
["\\{0}{{{1}}}".format(cite_tag, c.id) for c in cite.citations[:-1]]
) + " and \\{0}{{{1}}}".format(cite_tag, cite.citations[-1].id)
return pf.RawInline(tex, format="tex")
if doc.format == "rst":
if len(cite.citations) == 1:
raw = pf.RawInline(
":{0}:`{1}`".format(cite_role, cite.citations[0].id), format="rst"
)
elif cite_role == "cite":
raw = pf.RawInline(
":{0}:`{1}`".format(
cite_role, ",".join([c.id for c in cite.citations])
),
format="rst",
)
else:
raw = pf.RawInline(
", ".join(
[":{0}:`{1}`".format(cite_role, c.id) for c in cite.citations[:-1]]
)
+ " and :{0}:`{1}`".format(cite_role, cite.citations[-1].id),
format="rst",
)
# in testing, rst cite roles required space either side
# to render correctly
# TODO check if spacing is required for :cite: roles (and others)
if cite_role == "cite":
elem = span if span else cite # type: pf.Inline
raw = [raw]
if elem.prev and not isinstance(elem.prev, pf.Space):
raw.insert(0, pf.Space())
if elem.next and not isinstance(elem.next, pf.Space):
raw.append(pf.Space())
return raw
if doc.format in ("html", "html5"):
elements = []
cites = set()
names = dict()
unknown = set()
for citation in cite.citations:
ref = doc.get_metadata("$$references.{}".format(citation.id), False)
if ref:
# ref -> e.g. {"type": "Math", "number": 1}
prefix = dict(CITE_HTML_NAMES).get(ref["type"], ref["type"])
prefix = prefix.capitalize() if html_capitalize else prefix
# label = "{} {}".format(prefix, ref["number"])
# elements.append(pf.RawInline(
# '<a href="#{0}">{1}</a>'.format(citation.id, label),
# format=doc.format))
# found_ref = True
names.setdefault(prefix, set()).add(
'<a href="#{0}">{1}</a>'.format(citation.id, ref["number"])
)
elif citation.id in doc.bibdatabase:
cites.add(process_bib_entry(citation.id, doc.bibdatabase, doc.bibnums))
# elements.append(pf.RawInline(
# process_bib_entry(
# citation.id, doc.bibdatabase, doc.bibnums),
# format=doc.format))
# found_ref = True
else:
unknown.add(citation.id)
# elements.append(pf.Cite(citations=[citation]))
# if found_ref:
# return elements
# else:
# return pf.RawInline(
# '<span style="background-color:rgba(225, 0, 0, .5)">'
# # 'No reference found for: {}</span>'.format(
# '{}</span>'.format(
# ", ".join([c.id for c in cite.citations])))
elements = []
if cites:
# TODO sort
elements.append(
pf.RawInline(
"<span>[{}]</span>".format(",".join(c for c in cites)),
format=doc.format,
)
)
if names:
# TODO sort
for prefix, labels in names.items():
elements.append(
pf.RawInline(
"<span>{} {}</span>".format(
prefix, ",".join(l for l in labels)
),
format=doc.format,
)
)
if unknown:
elements.append(
pf.RawInline(
'<span style="background-color:rgba(225, 0, 0, .5)">'
# 'No reference found for: {}</span>'.format(
"{}</span>".format(", ".join([l for l in unknown]))
)
)
return elements
def format_span_cites(span, doc):
# type: (Cite, Doc) -> Element
if not isinstance(span, pf.Span):
return None
if CONVERTED_CITE_CLASS not in span.classes:
return None
if doc.format in ("latex", "tex"):
cite_tag = "cite"
if span.attributes["format"] == "latex":
cite_tag = span.attributes["tag"]
# TODO use cref for rst ref/numref
return pf.RawInline(
"\\{0}{{{1}}}".format(cite_tag, span.identifier, format="tex")
)
if doc.format == "rst":
cite_role = "cite"
if span.attributes["format"] == "rst":
cite_role = span.attributes["role"]
# TODO use ref for latex ref/cref/Cref
return [
pf.RawInline(":{0}:`{1}`".format(cite_role, span.identifier), format="rst")
]
if doc.format in ("html", "html5"):
# <cite data-cite="cite_key">text</cite>
return (
[
pf.RawInline(
'<cite data-cite="{}">'.format(span.identifier), format="html"
)
]
+ list(span.content)
+ [pf.RawInline("</cite>", format="html")]
)
def prepare(doc):
# type: (Doc) -> None
doc.bibnums = {}
doc.bibdatabase = {}
if doc.format in ("html", "html5"):
bib_path = doc.get_metadata("ipub.bibliography", None)
if bib_path:
doc.bibdatabase = read_bibliography(bib_path)
def finalize(doc):
# type: (Doc) -> None
del doc.bibnums
del doc.bibdatabase
def strip_cite_spans(span, doc):
# type: (Span, Doc) -> Element
if isinstance(span, pf.Span) and ATTRIBUTE_CITE_CLASS in span.classes:
return list(span.content)
def main(doc=None, strip_spans=True):
# type: (Doc) -> None
to_run = [format_cites]
if strip_spans:
to_run.append(strip_cite_spans)
return pf.run_filters(to_run, prepare, finalize, doc=doc)
if __name__ == "__main__":
main()
| {
"repo_name": "chrisjsewell/ipypublish",
"path": "ipypublish/filters_pandoc/format_cite_elements.py",
"copies": "1",
"size": "7980",
"license": "bsd-3-clause",
"hash": -7911466033817192000,
"line_mean": 31.8395061728,
"line_max": 88,
"alpha_frac": 0.4996240602,
"autogenerated": false,
"ratio": 3.691026827012026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9687874091216602,
"avg_score": 0.0005553591990848023,
"num_lines": 243
} |
""" a panflute filter to format Span element
representations of RawInline elements
The :py:mod:`ipypublish.filters_pandoc.prepare_raw` filter should be run
first to access the functionality below:
"""
import itertools
# from textwrap import fill as textwrap
from panflute import Element, Doc, Span # noqa: F401
import panflute as pf
from ipypublish.filters_pandoc.definitions import (
CONVERTED_OTHER_CLASS,
CONVERTED_DIRECTIVE_CLASS,
IPUB_META_ROUTE,
)
def process_raw_spans(container, doc):
# type: (Span, Doc) -> Element
if not isinstance(container, (pf.Span, pf.Div)):
return None
hide_raw = doc.get_metadata(IPUB_META_ROUTE + ".hide_raw", False)
if CONVERTED_OTHER_CLASS in container.classes and isinstance(container, pf.Span):
if doc.format == "rst" and container.attributes["format"] == "latex":
if container.attributes["tag"] in ["todo"]:
return pf.Str(
"\n\n.. {}:: {}\n\n".format(
container.attributes["tag"], container.attributes["content"]
)
)
if container.attributes["tag"] == "ensuremath":
return pf.RawInline(
":math:`{}`".format(container.attributes["content"]), format="rst"
)
return pf.RawInline(
container.attributes.get("original"), format=container.attributes["format"]
)
if CONVERTED_DIRECTIVE_CLASS in container.classes and isinstance(container, pf.Div):
# convert the directive head, which will be e.g.
# Para(Str(..) Space Str(toctree::) SoftBreak Str(:maxdepth:) Space Str(2) SoftBreak Str(:numbered:)) # noqa
# we need to spilt on the soft breaks,
# place them on a new line and re-indent them
if doc.format in ("rst"):
# split into lines by soft breaks
header_lines = [
list(y)
for x, y in itertools.groupby(
container.content[0].content, lambda z: isinstance(z, pf.SoftBreak)
)
if not x
]
# wrap each line in a Para and convert block with pandoc
head_doc = pf.Doc(*[pf.Para(*l) for l in header_lines])
head_doc.api_version = doc.api_version
head_str = pf.convert_text(
head_doc, input_format="panflute", output_format=doc.format
)
# remove blank lines and indent
head_str = head_str.replace("\n\n", "\n ") + "\n\n"
head_block = pf.RawBlock(head_str, format=doc.format)
if len(container.content) == 1:
return head_block
# split into lines by soft breaks, we use indicators to tell
# us where to indent in the converted text
body_blocks = []
for block in container.content[1:]:
new_elements = [pf.RawInline("%^*", format=doc.format)]
for el in block.content:
if isinstance(el, pf.SoftBreak):
new_elements.append(pf.RawInline("?&@", format=doc.format))
else:
new_elements.append(el)
block.content = new_elements
body_blocks.append(block)
# convert body content with pandoc
body_doc = pf.Doc(*body_blocks)
body_doc.api_version = doc.api_version
body_str = pf.convert_text(
body_doc, input_format="panflute", output_format=doc.format
)
# raise ValueError(body_blocks)
body_str = body_str.replace("%^*", " ").replace("?&@", "\n ")
# ensure all lines are indented correctly
# (doesn't occur by default?)
body_str = (
"\n".join(
[
" " + l.lstrip() if l.strip() else l
for l in body_str.splitlines()
]
)
+ "\n\n"
)
body_block = pf.RawBlock(body_str, format=doc.format)
return [head_block, body_block]
elif (
doc.format in ("html", "html5") and container.attributes["format"] == "rst"
):
if hide_raw:
return []
head_para = pf.Para(
*[
pf.RawInline("<br>" + " " * 4)
if isinstance(c, pf.SoftBreak)
else c
for c in container.content[0].content
]
)
head_str = pf.convert_text(
head_para, input_format="panflute", output_format=doc.format
)
if len(container.content) > 1:
body_doc = pf.Doc(*container.content[1:])
body_doc.api_version = doc.api_version
body_str = pf.convert_text(
body_doc, input_format="panflute", output_format=doc.format
)
body_str = (
'<p></p><div style="margin-left: 20px">' "{0}</div>"
).format(body_str)
else:
body_str = ""
return pf.RawBlock(
'<div {0} style="background-color:rgba(10, 225, 10, .2)">'
"{1}{2}"
"</div>".format(
container.attributes.get("directive", ""), head_str, body_str
),
format="html",
)
elif doc.format in ("tex", "latex") and container.attributes["format"] == "rst":
if hide_raw:
return []
directive = container.attributes.get("directive", "")
inline = container.attributes.get("inline", "")
# TODO handle directive with options and/or inline body
# e.g. .. figure:: path/to/figure
# :centre:
box_open = (
"\\begin{{mdframed}}"
"[frametitle={{{0}}},frametitlerule=true]".format(directive)
)
if inline:
box_open += "\n\\mdfsubtitle{{{0}}}".format(inline)
box_close = "\\end{mdframed}"
if len(container.content) == 1:
return pf.RawBlock(box_open + box_close, format="tex")
else:
return (
[pf.RawBlock(box_open, format="tex")]
+ list(container.content[1:])
+ [pf.RawBlock(box_close, format="tex")]
)
return pf.RawBlock(
pf.stringify(pf.Doc(*container.content)),
format=container.attributes["format"],
)
if CONVERTED_OTHER_CLASS in container.classes and isinstance(container, pf.Div):
return pf.RawBlock(
pf.stringify(pf.Doc(*container.content)),
format=container.attributes["format"],
)
# now unused
# def split_soft_breaks(container,
# indent=4, fmt="rst", indent_first=False,
# pre_content="", post_content="",
# pre_chunk="", post_chunk="",
# linebreak="\n", raw_indent=None):
# """rst conversion doesn't recognise soft breaks as new lines,
# so add them manually and return a list containing the new elements
# """
# content = []
# if pre_content:
# content.append(pf.RawBlock(pre_content, fmt))
# chunks = [list(y) for x, y in itertools.groupby(
# container.content,
# lambda z: isinstance(z, pf.SoftBreak)) if not x]
# for i, chunk in enumerate(chunks):
# if i > 0 or indent_first:
# if raw_indent is not None:
# chunk = [pf.RawInline(raw_indent, fmt)] * indent + chunk
# else:
# chunk = [pf.Space()] * indent + chunk
# if pre_chunk:
# content.append(pf.RawBlock(pre_chunk, fmt))
# content.append(pf.Plain(*chunk))
# content.append(pf.RawBlock(linebreak, fmt))
# if post_chunk:
# content.append(pf.RawBlock(post_chunk, fmt))
# # if isinstance(container, pf.Para):
# # content.append(pf.RawBlock(linebreak, fmt))
# if post_content:
# content.append(pf.RawBlock(post_content, fmt))
# return content
def process_code_latex(code, doc):
# type: (pf.CodeBlock, Doc) -> Element
if doc.format not in ("tex", "latex"):
return None
if not isinstance(code, pf.CodeBlock):
return None
# TODO line wrapping
return [
pf.RawBlock("\\begin{mdframed}", format=doc.format),
code,
pf.RawBlock("\\end{mdframed}", format=doc.format),
]
def prepare(doc):
# type: (Doc) -> None
pass
def finalize(doc):
# type: (Doc) -> None
pass
def main(doc=None):
# type: (Doc) -> None
"""
"""
return pf.run_filters(
[process_raw_spans, process_code_latex], prepare, finalize, doc=doc
)
if __name__ == "__main__":
main()
| {
"repo_name": "chrisjsewell/ipypublish",
"path": "ipypublish/filters_pandoc/format_raw_spans.py",
"copies": "1",
"size": "9123",
"license": "bsd-3-clause",
"hash": 9211647737611015000,
"line_mean": 33.0410447761,
"line_max": 117,
"alpha_frac": 0.5139756659,
"autogenerated": false,
"ratio": 4.008347978910369,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005977685642127712,
"num_lines": 268
} |
""" a panflute filter to override vs-code Markdown Preview Enhanced (MPE)
interpretation of RMarkdown python cells.
MPE expects cells to have braced attributes after code name;
::
```python {cmnd=true}
print("hi")
```
whereas, in RMarkdown, the code name is included in the brace:
::
```{python ipub={'figure': {'caption': 'this is a caption'}}}
print("hi")
```
See https://github.com/shd101wyy/vscode-markdown-preview-enhanced/issues/185
"""
# TODO remove when fixed?
import sys
import re
from panflute import Element, Doc, CodeBlock # noqa: F401
import panflute as pf
def format_code_html(code, doc):
# type: (CodeBlock, Doc) -> None
if not (isinstance(code, CodeBlock) and doc.format in ("html", "html5")):
return None
if "python" in code.attributes.get("data-info", ""):
attr = code.attributes.get("data-info", "")
parsed = "cmd='{}'".format(sys.executable)
normed = '{{"cmd":"{}"'.format(sys.executable)
if doc.last_id is None:
this_id = 1
else:
this_id = doc.last_id + 1
parsed = parsed + " id='{0}'".format(this_id)
normed = normed + ',"id": "{0}"'.format(this_id)
match = re.search("\\scontinue=([0-9]+)", attr)
if match:
parsed = parsed + " continue='{0}'".format(match.group(1))
normed = normed + ',"continue":"{0}"'.format(match.group(1))
if "matplotlib=true" in attr:
parsed = parsed + " matplotlib=true"
normed = normed + ',"matplotlib":"true"'
normed = normed + "}"
code.attributes["data-info"] = "python {{{0}}}".format(parsed)
code.attributes[
"data-parsed-info"
] = '{{"language":"python","attributes":{0}}}'.format(normed)
code.attributes[
"data-normalized-info"
] = '{{"language":"python","attributes":{0}}}'.format(normed)
doc.last_id = this_id
return [pf.Para(pf.Str("In [{}]:".format(this_id))), code]
def prepare(doc):
# type: (Doc) -> None
doc.last_id = None
def finalize(doc):
# type: (Doc) -> None
del doc.last_id
def main(doc=None):
# type: (Doc) -> None
return pf.run_filter(format_code_html, prepare, finalize, doc=doc)
if __name__ == "__main__":
main()
| {
"repo_name": "chrisjsewell/ipypublish",
"path": "ipypublish/filters_pandoc/rmarkdown_to_mpe.py",
"copies": "1",
"size": "2324",
"license": "bsd-3-clause",
"hash": -5917130672298677000,
"line_mean": 25.1123595506,
"line_max": 77,
"alpha_frac": 0.5722891566,
"autogenerated": false,
"ratio": 3.3828238719068415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44551130285068413,
"avg_score": null,
"num_lines": null
} |
""" a panflute filter to prepare document labelling in markdown files:
1) Add a ``$$reference`` key to the Document metadata
Then, for each Image, Math and Table found;
2) Extract labels and attributes to the right of Math or Table captions,
in the form; ``{#id .class-name a="an attribute"}``
3) If attributes found, remove them from the document and wrap the associated
Math/Table in a Span/Div with the attributes and an additional class:
``labelled-Math`` or ``labelled-Table``
4) For all labelled Tables, Math and Images,
place in metadata as e.g.
meta["$$references"][label] = {"type": "Math", "number": 1}
For example:
'$$a=1$$ {#a b=$2$}'
would be converted to this html:
.. code-block:: html
<p>
<span id="a" class="labelled-Math" data-b="2">
<span class="math inline"><em>a</em> = 1</span>
</span>
</p>
"""
from panflute import Element, Doc, Table, Inline # noqa: F401
import panflute as pf
from ipypublish.filters_pandoc.utils import (
compare_version,
get_panflute_containers,
find_attributes,
)
LABELLED_IMAGE_CLASS = "labelled-Image"
LABELLED_MATH_CLASS = "labelled-Math"
LABELLED_TABLE_CLASS = "labelled-Table"
REFTYPE_TABLE = "Table"
REFTYPE_IMAGE = "Image"
REFTYPE_MATH = "Math"
def resolve_tables(element, doc):
# type: (Table, Doc) -> None
if not isinstance(element, (pf.Table)):
return None
ref_type = REFTYPE_TABLE
attributes = None
if element.caption: # type: Inline
# attributes = _find_attribute(element.caption[0],
# allow_any=True, delete_preceding=False)
attributes = find_attributes(
element.caption[-1], search_left=True, include_element=True
)
if not attributes:
return None
# update count
doc.refcount[ref_type] += 1
# add to metadata
doc.metadata["$$references"][attributes["id"]] = pf.MetaMap(
**{"type": pf.MetaString(ref_type), "number": doc.refcount[ref_type]}
)
# remove attribute from caption
element.caption = [el for el in element.caption if el not in attributes["elements"]]
# wrap in a div
return pf.Div(
element,
classes=["labelled-{}".format(ref_type)] + attributes["classes"],
attributes=attributes["attributes"],
identifier=attributes["id"],
)
def resolve_equations_images(element, doc):
# type: (Element, Doc) -> None
# attribute equations in table captions / definition items?
if not isinstance(element, get_panflute_containers(pf.Math)):
return None
if not element.content:
return None
to_delete = set()
to_wrap = dict()
subel = element.content[0]
while subel: # type: Element
ref_type = None
if isinstance(subel, pf.Math):
ref_type = REFTYPE_MATH
# elif isinstance(subel, pf.Table):
# ref_type = "Table"
elif isinstance(subel, pf.Image):
ref_type = REFTYPE_IMAGE
else:
subel = subel.next
continue
if isinstance(subel, pf.Image) and compare_version("1.16", ">="):
# pandoc >= 1.16 already supports this
# TODO for pandoc < 1.16 also look for attributes attached,
# to the image path, as occurs with image references
# see https://github.com/tomduck/pandoc-fignos/issues/14
attributes = {
"id": subel.identifier,
# "classes": subel.classes,
# "attributes": subel.attributes,
"elements": [],
}
else:
attributes = find_attributes(subel)
if attributes:
to_wrap[subel] = attributes
for _ in attributes["elements"]:
subel = subel.next
if attributes and attributes["id"]:
# update count
doc.refcount[ref_type] += 1
# add to metadata
doc.metadata["$$references"][attributes["id"]] = pf.MetaMap(
**{"type": pf.MetaString(ref_type), "number": doc.refcount[ref_type]}
)
to_delete.update(attributes["elements"])
subel = subel.next
new_content = [
pf.Span(
el,
classes=["labelled-{}".format(ref_type)] + to_wrap[el]["classes"],
attributes=to_wrap[el]["attributes"],
identifier=to_wrap[el]["id"],
)
if el in to_wrap
else el
for el in element.content
if el not in to_delete
]
# if isinstance(element, pf.Plain):
# return pf.Plain(*new_content)
# else:
# return pf.Para(*new_content)
element.content = new_content
return element
def prepare(doc):
# type: (Doc) -> None
doc.refcount = {"Table": 0, "Image": 0, "Math": 0}
doc.metadata["$$references"] = pf.MetaMap()
def finalize(doc):
# type: (Doc) -> None
del doc.refcount
def main(doc=None):
# type: (Doc) -> None
return pf.run_filters(
[resolve_tables, resolve_equations_images], prepare, finalize, doc=doc
)
if __name__ == "__main__":
main()
| {
"repo_name": "chrisjsewell/ipypublish",
"path": "ipypublish/filters_pandoc/prepare_labels.py",
"copies": "1",
"size": "5171",
"license": "bsd-3-clause",
"hash": 2912570272974019000,
"line_mean": 26.6524064171,
"line_max": 88,
"alpha_frac": 0.5851866177,
"autogenerated": false,
"ratio": 3.722822174226062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48080087919260617,
"avg_score": null,
"num_lines": null
} |
# A parallelized "find(1)" using the thread module.
# This demonstrates the use of a work queue and worker threads.
# It really does do more stats/sec when using multiple threads,
# although the improvement is only about 20-30 percent.
# (That was 8 years ago. In 2002, on Linux, I can't measure
# a speedup. :-( )
# I'm too lazy to write a command line parser for the full find(1)
# command line syntax, so the predicate it searches for is wired-in,
# see function selector() below. (It currently searches for files with
# world write permission.)
# Usage: parfind.py [-w nworkers] [directory] ...
# Default nworkers is 4
import sys
import getopt
import string
import time
import os
from stat import *
import thread
# Work queue class. Usage:
# wq = WorkQ()
# wq.addwork(func, (arg1, arg2, ...)) # one or more calls
# wq.run(nworkers)
# The work is done when wq.run() completes.
# The function calls executed by the workers may add more work.
# Don't use keyboard interrupts!
class WorkQ:
# Invariants:
# - busy and work are only modified when mutex is locked
# - len(work) is the number of jobs ready to be taken
# - busy is the number of jobs being done
# - todo is locked iff there is no work and somebody is busy
def __init__(self):
self.mutex = thread.allocate()
self.todo = thread.allocate()
self.todo.acquire()
self.work = []
self.busy = 0
def addwork(self, func, args):
job = (func, args)
self.mutex.acquire()
self.work.append(job)
self.mutex.release()
if len(self.work) == 1:
self.todo.release()
def _getwork(self):
self.todo.acquire()
self.mutex.acquire()
if self.busy == 0 and len(self.work) == 0:
self.mutex.release()
self.todo.release()
return None
job = self.work[0]
del self.work[0]
self.busy = self.busy + 1
self.mutex.release()
if len(self.work) > 0:
self.todo.release()
return job
def _donework(self):
self.mutex.acquire()
self.busy = self.busy - 1
if self.busy == 0 and len(self.work) == 0:
self.todo.release()
self.mutex.release()
def _worker(self):
time.sleep(0.00001) # Let other threads run
while 1:
job = self._getwork()
if not job:
break
func, args = job
apply(func, args)
self._donework()
def run(self, nworkers):
if not self.work:
return # Nothing to do
for i in range(nworkers-1):
thread.start_new(self._worker, ())
self._worker()
self.todo.acquire()
# Main program
def main():
nworkers = 4
opts, args = getopt.getopt(sys.argv[1:], '-w:')
for opt, arg in opts:
if opt == '-w':
nworkers = string.atoi(arg)
if not args:
args = [os.curdir]
wq = WorkQ()
for dir in args:
wq.addwork(find, (dir, selector, wq))
t1 = time.time()
wq.run(nworkers)
t2 = time.time()
sys.stderr.write('Total time %r sec.\n' % (t2-t1))
# The predicate -- defines what files we look for.
# Feel free to change this to suit your purpose
def selector(dir, name, fullname, stat):
# Look for world writable files that are not symlinks
return (stat[ST_MODE] & 0002) != 0 and not S_ISLNK(stat[ST_MODE])
# The find procedure -- calls wq.addwork() for subdirectories
def find(dir, pred, wq):
try:
names = os.listdir(dir)
except os.error, msg:
print repr(dir), ':', msg
return
for name in names:
if name not in (os.curdir, os.pardir):
fullname = os.path.join(dir, name)
try:
stat = os.lstat(fullname)
except os.error, msg:
print repr(fullname), ':', msg
continue
if pred(dir, name, fullname, stat):
print fullname
if S_ISDIR(stat[ST_MODE]):
if not os.path.ismount(fullname):
wq.addwork(find, (fullname, pred, wq))
# Call the main program
main()
| {
"repo_name": "wskplho/sl4a",
"path": "python/src/Demo/threads/find.py",
"copies": "47",
"size": "4215",
"license": "apache-2.0",
"hash": 2872756967697825000,
"line_mean": 26.1935483871,
"line_max": 71,
"alpha_frac": 0.5779359431,
"autogenerated": false,
"ratio": 3.6056458511548333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00018433179723502304,
"num_lines": 155
} |
#A parameter testing script for presentsGame
import presentsGame as pg
import numpy as np
import csv
numberOfPresents = 648
sleighDim = 1000
numberOfGenerations = 15000
num_ordered = 0
numberOfGenes = 30
fileOut = 'NumPresents' + str(numberOfPresents) + 'NumGenerations' + str(numberOfGenerations) + '.csv'
with open(fileOut, 'wb') as ofile:
writer = csv.writer(ofile)
header=['numberOfGenes','rotateFrac','htBufFrac','newPosFrac','mx_ht_param','crossoverFrac','p','k','numChildren','num_ordered','mean_fitness', 'std_fitness', 'min_fitness', 'max_fitness']
writer.writerow(header)
dtype=[('PresentID','int64'),('x1','uint16'),('y1','uint16'),('z1','uint32'),('x8','uint16'),('y8','uint16'),('z8','uint32')]
topdown=np.loadtxt('TopDown.csv', dtype=dtype,delimiter=',',skiprows=1, usecols = [0,1,2,3,22,23,24])
dtype=[('PresentID','int64'),('Dimension1','uint16'),('Dimension2','uint16'),('Dimension3','uint16')]
presents=np.loadtxt('presents.csv',dtype=dtype,delimiter=',',skiprows=1)
#runPresentsGA(Presents, TopDownPacking, sleighDim, number_of_presents, number_of_chromosomes,number_of_generations_, rotate_frac, ht_buffer_frac, new_pos_frac, mx_ht_param, crossover_frac, p_, k_, num_children, num_ordered)
#print pg.runPresentsGA(presents, topdown, 1000, 50, 25,50, 0.10, 0.10, 0.10, 4, 0.50, 0.8, 12, 8, 0.1)
#for numberOfGenes in [25,50,100]:
for rotateFrac in [0.05]:
for htBufFrac in [0]:
for newPosFrac in [0.05]:
for mx_ht_param in [10000]:
for crossoverFrac in [0.1]:
for p in [0.95]:
for k in [5]:
for numChildren in [8]:
topdownbyval = np.copy(topdown)
presentsbyval = np.copy(presents)
input = [numberOfGenes,rotateFrac,htBufFrac,newPosFrac,mx_ht_param,crossoverFrac,p,k,numChildren,num_ordered]
print input
result = pg.runPresentsGA(presents, topdownbyval, sleighDim, numberOfPresents, numberOfGenes,numberOfGenerations, rotateFrac, htBufFrac, newPosFrac, mx_ht_param, crossoverFrac, p, k, numChildren, num_ordered)
print result
writer.writerow(input + result)
ofile.close()
| {
"repo_name": "rhsimplex/santas-sleigh",
"path": "testparameters.py",
"copies": "1",
"size": "2439",
"license": "mit",
"hash": 3194216105770140000,
"line_mean": 52.0217391304,
"line_max": 244,
"alpha_frac": 0.5990159902,
"autogenerated": false,
"ratio": 3.3781163434903045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44771323336903046,
"avg_score": null,
"num_lines": null
} |
"""A parser and instruction generator for transformation rule strings."""
from functools import lru_cache
import re
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
from treepace.instructions import (AddNode, AddReference, Find, GoToParent,
GroupEnd, GroupStart, SearchReference, SetRelation)
from treepace.relations import Child, NextSibling, Parent, Sibling
GRAMMAR = Grammar('''
rule = pattern '->' replacement
_ = (' ' / '\t')*
pattern = group (rel_group)*
group = node / (group_start pattern group_end)
rel_group = (relation group) / parent_any
node = any / constant / code / reference
any = _'.'_
constant = _((~r'\w'+) / ('"' (!'"' ~'.')+ '"'))_
code = _'[' python_code ']'_
python_code = expr_part+
expr_part = (!('[' / ']') ~'.')+ / ('[' expr_part ']')
reference = _ '$' reference_num _
reference_num = ~r'\d'+
group_start = _'{'_
group_end = _'}'_
relation = child / sibling / next_sibling
child = _'<'_
sibling = _'&'_
next_sibling = _','_
parent_any = _'>'_
replacement = repl_node (repl_rel_node)*
repl_node = constant / code / reference
repl_rel_node = (repl_relation node) / parent_any
repl_relation = child / next_sibling
''')
class Compiler:
"""A compiler from rule, pattern and replacement strings to instructions."""
@staticmethod
@lru_cache()
def compile_pattern(pattern):
"""Parse the pattern and return an instruction list."""
return SearchGenerator().visit(GRAMMAR['pattern'].parse(pattern))
@staticmethod
@lru_cache()
def compile_replacement(replacement):
"""Parse the replacement and return an instruction list."""
ast = GRAMMAR['replacement'].parse(replacement)
return BuildGenerator().visit(ast)
@staticmethod
@lru_cache()
def compile_rule(rule):
"""Parse the rule and return two instruction lists -- searching
instructions and replacing instructions."""
ast = GRAMMAR['rule'].parse(rule)
search_instructions = SearchGenerator().visit(ast.children[0])
replace_instructions = BuildGenerator().visit(ast.children[2])
return (search_instructions, replace_instructions)
class InstructionGenerator(NodeVisitor):
"""A base class with common behavior for post-order visitors which
generate a list of virtual machine instructions from an AST."""
def __init__(self):
"""Initialize the instruction list and a level counter."""
self._instructions = []
self._child_level = 0
def generic_visit(self, node, visited_children):
"""Just continue with the traversal."""
pass
def visit_child(self, node, visited_children):
"""Add the instruction 'REL child'."""
self._add(SetRelation(Child))
self._child_level += 1
def visit_next_sibling(self, node, visited_children):
"""Add the instruction 'REL next_sib'."""
self._add(SetRelation(NextSibling))
def _add(self, instruction):
self._instructions.append(instruction)
def _check_child_level(self):
if self._child_level < 0:
raise CompileError('Too many parent relations')
def _text_constant(self, node):
return repr(re.search('"?([^"]*)"?', node.text.strip()).group(1))
class SearchGenerator(InstructionGenerator):
"""A generator of tree-searching instructions."""
def __init__(self):
"""Initialize the group counters."""
super().__init__()
self._started_group = 0
self._ended_groups = set()
def visit_pattern(self, node, visited_children):
"""Return the generated instruction list (at the top of the AST)."""
return self._instructions
def visit_any(self, node, visited_children):
"""Add an instruction which matches any node."""
self._add(Find('True'))
def visit_constant(self, node, visited_children):
"""Add an instruction which matches the constant."""
self._add(Find('str(_) == str(%s)' % self._text_constant(node)))
def visit_python_code(self, node, visited_children):
"""Add an instruction which matches the predicate."""
self._add(Find(node.text))
def visit_reference_num(self, node, visited_children):
"""Add a back-referencing instruction."""
group_num = int(node.text)
if group_num not in self._ended_groups:
raise CompileError('Group %d cannot be referenced yet' % group_num)
self._add(SearchReference(group_num))
def visit_group_start(self, node, visited_children):
"""Add a group-starting instruction and adjust the counters."""
self._started_group += 1
self._add(GroupStart(self._started_group))
def visit_group_end(self, node, visited_children):
"""Add a group-ending instruction and adjust the counter."""
end = max(set(range(1, self._started_group + 1)) - self._ended_groups)
self._ended_groups.add(end)
self._add(GroupEnd(end))
def visit_sibling(self, node, visited_children):
"""Add the instruction 'REL sibling'."""
self._add(SetRelation(Sibling))
def visit_parent_any(self, node, visited_children):
"""The 'parent' relation followed by an implicit 'any' pattern."""
self._add(SetRelation(Parent))
self._add(Find('True'))
self._child_level -= 1
self._check_child_level()
class BuildGenerator(InstructionGenerator):
"""A generator of instructions which build a replacement tree."""
def visit_replacement(self, node, visited_children):
"""Return the generated instruction list (at the top of the AST)."""
return self._instructions
def visit_constant(self, node, visited_children):
"""Add an instruction which appends a node with a constant value
to the tree."""
self._add(AddNode(self._text_constant(node)))
def visit_python_code(self, node, visited_children):
"""Add an instruction which appends a dynamically generated node
to the tree."""
self._add(AddNode(node.text))
def visit_reference_num(self, node, visited_children):
"""Add a back-referencing instruction."""
self._add(AddReference(int(node.text)))
def visit_parent_any(self, node, visited_children):
"""Add an instruction which navigates up in the tree being built."""
self._add(GoToParent())
self._child_level -= 1
self._check_child_level()
class CompileError(Exception):
"""Raised when a non-parser related error occurs during compilation."""
pass
| {
"repo_name": "sulir/treepace",
"path": "treepace/compiler.py",
"copies": "1",
"size": "6904",
"license": "mit",
"hash": -1363488909267531500,
"line_mean": 36.3189189189,
"line_max": 80,
"alpha_frac": 0.614571263,
"autogenerated": false,
"ratio": 4.222629969418961,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.533720123241896,
"avg_score": null,
"num_lines": null
} |
""" A parser and other parser related classes. """
import pyparsing
from .models import Command, Input, Output, Grammar
from .models import command_template_factory as ctf
class Parser(object):
def __init__(self, string):
self.string = string
self.commands = []
self.paths = []
self.files = []
def consume(self, cwd=None):
""" Converts the lexer tokens into valid statements. This process
also checks command syntax.
"""
first_pass = Grammar.overall.parseString(self.string)
lowered = { key.lower(): val for key, val in first_pass.iteritems() }
self.commands = ['\n'.join(self._get('commands', lowered))]
self.job_options = self._get('job_options', lowered)
self.global_options = self._get('options', lowered)
self.files = self._get('files', lowered)
self.paths = self._get('paths', lowered)
self.files = self._parse(self.files, Grammar.file, True)
self.paths = self._parse(self.paths, Grammar.path, True)
self.job_options = self._parse(self.job_options, Grammar.line)
try:
command_lines = self._parse(self.commands, Grammar.command_lines)[0]
except IndexError:
raise ValueError('Did you write any commands?')
self.commands = []
for command_line in command_lines:
comments, command = command_line
self.commands.append([comments.asList(),
self._parse([''.join(command)], Grammar.command)])
self.job_options = [opt.asList() for opt in self.job_options]
self.paths = ctf.get_paths(self.paths)
self.files = ctf.get_files(self.files)
self.paths.reverse()
self.files.reverse()
self.commands.reverse()
return ctf.get_command_templates(self.commands, self.files[:],
self.paths[:], self.job_options)
def _get(self, key, parser_result):
""" Given a type and a dict of parser results, return
the items as a list.
"""
try:
list_data = parser_result[key].asList()
if any(isinstance(obj, str) for obj in list_data):
txt_lines = [''.join(list_data)]
else:
txt_lines = [''.join(f) for f in list_data]
except KeyError:
txt_lines = []
return txt_lines
def _parse(self, lines, grammar, ignore_comments=False):
""" Given a type and a list, parse it using the more detailed
parse grammar.
"""
results = []
for c in lines:
if c != '' and not (ignore_comments and c[0] == '#'):
try:
results.append(grammar.parseString(c))
except pyparsing.ParseException as e:
raise ValueError('Invalid syntax. Verify line {} is '
'correct.\n{}\n\n{}'.format(e.lineno, c, e))
return results
| {
"repo_name": "Sonictherocketman/metapipe",
"path": "metapipe/parser.py",
"copies": "2",
"size": "2968",
"license": "mit",
"hash": -5576982518445397000,
"line_mean": 34.3333333333,
"line_max": 80,
"alpha_frac": 0.5717654987,
"autogenerated": false,
"ratio": 4.0491132332878585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010669887337275035,
"num_lines": 84
} |
"""A parser for ASN1 object encoded using BER
The doc string just sketches the names of objects in the module.
Consult the documentation for more details.
Burton S. Kaliski Jr. wrote a helpful introduction to ASN.1 and the
BER encoding titled 'A Layman's Guide to a Subset of ASN.1, BER, and
DER.' It is available from http://www.rsasecurity.com/rsalabs/pkcs/.
The text version is available at
ftp://ftp.rsasecurity.com/pub/pkcs/ascii/layman.asc.
functions:
parse(buf: string) -> ASN1Object
display(obj: ASN1Object)
parseCfg(path) -> {oid:name}
classes:
ASN1Object
plus subclasses for each asn.1 type, e.q. Sequence, Set, etc.
constants:
INTEGER, BIT_STRING, OCTET_STRING, NULL, OBJECT_IDENTIFIER,
SEQUENCE, SET, PrintableString, T61String, IA5String, UTCTIME,
BOOLEAN
The following objects are not part of the user-visible API:
Displayer
ASN1Parser
unparseLengthXXX functions
"""
import string
import struct
import operator
import types
import UserList
import time
try:
import cStringIO
StringIO = cStringIO.StringIO
except ImportError:
import StringIO
StringIO = StringIO.StringIO
class EOFError(IOError):
pass
INTEGER = 0x02
BIT_STRING = 0x03
OCTET_STRING = 0x04
NULL = 0x05
OBJECT_IDENTIFIER = 0x06
SEQUENCE = 0x10
SET = 0x11
PrintableString = 0x13
T61String = 0x14
IA5String = 0x16 # IA5 == ASCII
UTCTIME = 0x17
BOOLEAN = 0x01
class Displayer:
def __init__(self, oids=None):
if oids:
self.oids = oids
def __call__(self, obj, indent=0):
try:
if obj.atomic:
if self.oids and isinstance(obj, OID) \
and self.oids.has_key(obj):
info = self.oids[obj]
if info.has_key('Warning'):
print " " * indent, "OID", info['Description'], \
"Warning"
else:
print " " * indent, "OID", info['Description']
return
print " " * indent, str(obj)
else:
if isinstance(obj, Contextual):
print " " * indent, "[%d]"% obj.tag
display(obj.val, indent+1)
else:
print " " * indent, obj.__class__.__name__, "{"
for elt in obj.val:
display(elt, indent+1)
print " " * indent, "}"
except AttributeError:
print " " * indent, `obj`
def display(obj, indent=0):
# XXX doesn't work on constructed yet
try:
if obj.atomic:
print " " * indent + str(obj)
else:
if isinstance(obj, Contextual):
print " " * indent, "[%d]"% obj.tag
display(obj.val, indent+1)
else:
print " " * indent, obj.__class__.__name__, "{"
for elt in obj.val:
display(elt, indent+1)
print " " * indent, "}"
except AttributeError:
print " " * indent, `obj`
class ASN1Object:
atomic = 1
def __init__(self, val):
self.val = val
# XXX need to make sure this really works everywhere; it's a late
# addition. it requires that all objects have a val that is a
# list.
def __cmp__(self, other):
if isinstance(other, ASN1Object):
return cmp(self.val, other.val)
return -1
def __repr__(self):
return "%s:%s" % (self.__class__.__name__, repr(self.val))
def encode(self, io=None):
if io is None:
io = StringIO()
ioFlag = 1
else:
ioFlag = 0
try:
encode = self._encode
except AttributeError:
raise AttributeError, \
("%s instance does not implement _encode" %
self.__class__.__name__)
else:
encode(io)
if ioFlag:
return io.getvalue()
class Sequence(ASN1Object, UserList.UserList):
atomic = 0
def __init__(self, val=None):
if not val:
val = []
self.val = self.data = val
def __repr__(self):
return "SEQUENCE {" + repr(self.val)[1:-1] + "}"
def _encode(self, io):
encObjs = []
for elt in self.data:
_type = type(elt)
if _type == types.InstanceType:
encObjs.append(elt.encode())
elif _type == types.IntType or _type == types.LongType:
encObjs.append(unparseInteger(elt))
else:
raise RuntimeError, "can't encode sequence containg %s" % _type
io.write(unparseSequence(encObjs))
class Set(ASN1Object, UserList.UserList):
atomic = 0
def __init__(self, val):
# XXX I don't remember why I only get a single value here
self.val = self.data = [val]
def __repr__(self):
return "SET {" + repr(self.val) + "}"
class UTCTime(ASN1Object):
"""Standard ASN.1 type for time expressed in GMT
draft-ietf-pkix-ipki-part1-08.txt notes:
For the purposes of this profile, UTCTime values shall be expressed
Greenwich Mean Time (Zulu) and shall include seconds (i.e., times are
YYMMDDHHMMSSZ), even where the number of seconds is zero. Conforming
systems shall interpret the year field (YY) as follows:
Where YY is greater than or equal to 50, the year shall be inter-
preted as 19YY; and
Where YY is less than 50, the year shall be interpreted as 20YY.
"""
def __init__(self, val=None):
if type(val) == types.StringType:
self.val = val
self._val = None
else:
self.unparse(val)
def __cmp__(self, other):
return cmp(self.val, other.val)
def _encode(self, io):
io.write(chr(UTCTIME) + unparseLength(len(self.val)) + self.val)
def unparse(self, val):
"""Convert a Python time representation to UTC time.
Argument must be in UTC.
"""
# Python dates might be represented as seconds or time tuples.
# I'll simply require that both times have the same repr.
# UTC is easier to cope with because the user can make sure a
# time tuple is in UTC, but it would be a pain for me to do that.
self._val = time.mktime(val)
if type(val) != types.TupleType:
try:
val = time.gmtime(val)
except TypeError:
raise TypeError, "time must be seconds or time-tuple"
yy = val[0]
if yy >= 2000:
yy = yy - 2000
if yy >= 50:
# man this is braind-dead
raise ValueError, "can't handle data that far in future"
elif yy < 2000:
yy = yy - 1900
if yy < 50:
raise ValueError, "can't handle data that far in past"
self.val = "%02d%02d%02d%02d%02d%02dZ" % (yy, val[1], val[2],
val[3], val[4], val[5])
def _parse(self):
if self._val:
return self._val
yy = string.atoi(self.val[:2])
mm1 = string.atoi(self.val[2:4])
dd = string.atoi(self.val[4:6])
hh = string.atoi(self.val[6:8])
mm2 = string.atoi(self.val[8:10])
ss = string.atoi(self.val[10:12])
assert self.val[-1] == 'Z'
if yy >= 50:
yy = 1900 + yy
else:
yy = 2000 + yy
self._val = time.mktime((yy, mm1, dd, hh, mm2, ss, -1, -1, -1)) \
- time.timezone
return self._val
class Contextual(ASN1Object):
"""Wrapper for optional and choice encoded items (primarily)
For contextual encoding, we can't necessarily tell what the type
of the value is without looking at the ASN.1 type decl. Of
course, the whole purpose of this module is to avoid looking at
the type decl -- so we can't win (directly).
The solution is this thunk object. When the decoded structure is
actually used, it should be clear whether this is, say, an
OPTIONAL integer type, some other tagged, known type, or an
encoded CHOICE. Call the decode method when the encoding includes
the full DER encoding. Call choose when the value doesn't have
the appropriate tag/len info.
"""
def __init__(self, tag, len, val):
self.tag = tag
self.len = len
self.val = val
self.unknown = 1
if self.val:
self.atomic = 0
else:
self.atomic = 1
def __repr__(self):
if self.unknown:
return '<contextual %d %d>' % (self.tag, self.len)
elif self.val:
return "[%d] {" % self.tag + repr(self.val) + "}"
else:
return "[%d]" % self.tag
def decode(self):
if self.unknown:
self.val = parse(self.val)
self.unknown = 0
return self.val
def choose(self, tag):
if self.unknown:
p = parse(self.val)
p.id = 0
p.length = self.len
self.val = p._parse(tag, self.len)
self.unknown = 0
return self.val
class Boolean(ASN1Object):
def __nonzero__(self):
if self.val:
return 1
else:
return 0
def __repr__(self):
if self.val:
return 'TRUE'
else:
return 'FALSE'
def _encode(self, io):
io.write(chr(BOOLEAN) + chr(1) + chr(self.val))
class OID(ASN1Object):
def __init__(self, val):
self.val = tuple(val)
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = reduce(operator.xor, self.val)
return self._hash
def __cmp__(self, other):
return cmp(self.val, other.val)
def __repr__(self):
return string.join(map(str, self.val), '.')
def _encode(self, io):
elts = self.val
bytes = []
bytes.append(40 * elts[0] + elts[1])
for elt in elts[2:]:
if elt < 0x7F:
bytes.append(elt)
else:
temp = []
div = rem = elt
while div:
div, rem = divmod(div, 128)
temp.append(rem)
temp.reverse()
head = map(lambda x:x | 0x80, temp[:-1])
bytes = bytes + head + temp[-1:]
io.write(chr(OBJECT_IDENTIFIER) + unparseLength(len(bytes))
+ string.join(map(chr, bytes), ''))
class ASN1Parser:
# Keeps some state around between method invocations, which
# simplifies programming
#
# This code can safely raise EOFError inside methods, which will
# be caught by parse and raise ValueError, "unexpected end of input"
def __init__(self, io):
self.io = io
# all these instance variables store information about the
# more recently read tag
self.tag = None
self.id = None
self.length = 0
self.indefinite = None
self.constructed = None
def getTag(self):
c = self.io.read(1)
if c == '':
raise EOFError
tag = ord(c)
self.id = tag & ~0x1F
self.tag = tag & 0x1F
if tag & 0x20:
self.constructed = 1
if self.tag == 0x1F:
# high-tag-number
tag = 0
while 1:
c = ord(io.read(1))
tag = (tag << 7) | (value & 0x7F)
if c & 0x80:
break
self.tag = tag
return self.tag
def getLength(self):
oct1 = ord(self.io.read(1))
if oct1 == 0x80:
self.length = 0
self.indefinite = 1
if oct1 & 0x80:
# lower bits indicate number of octets to represent length
l = convertOctetsToInt(self.io.read(oct1 & 0x7F))
self.length = l
else:
self.length = oct1 & 0x7F
return self.length
def getBody(self):
buf = self.io.read(self.length)
if len(buf) != self.length:
raise EOFError
return buf
def ord(self, char):
if len(char) == 0:
raise EOFError
return ord(char)
def parse(self):
try:
tag = self.getTag()
len = self.getLength()
except EOFError:
raise ValueError, "unexpected end of encoded data"
return self._parse(tag, len)
def _parse(self, tag, len):
if (self.id & 0xC0) == 0:
# class is universal
try:
method = self.__dispatch[tag]
except KeyError:
self.val = self.parseUnknown()
else:
self.val = method(self)
elif (self.id & 0xC0) == 0x80:
# class is context-specific
self.val = self.parseContextSpecific()
else:
raise ValueError, \
"invalid or unsupported tag: %s (id %s)" % (self.tag,
self.id
& 0xC0)
return self.val
def parseBoolean(self):
b = self.ord(self.getBody())
return Boolean(b)
def parseContextSpecific(self):
# If the encoded object is a CHOICE, then the encoding carries
# *no* information about the type of the encoded value. The
# best we can do as create a Choice object that can be told
# what the right value is. Fuck.
if self.length == 0 and not self.indefinite:
raise ValueError, "don't know how to handle CHOICE with indefinite length"
buf = self.getBody()
return Contextual(self.tag, self.length, buf)
def parseSet(self):
return Set(parse(self.getBody()))
def parseUnknown(self):
return self.getBody()
def parseInteger(self):
buf = self.getBody()
if len(buf) == 0:
raise EOFError
return getInteger(buf)
def parseZero(self):
# XXX why is this zero? what does it all mean?
if self.id & 0x80:
# this hack retrieves the version number from x509
return self.length
def parseSequence(self):
seq = Sequence()
base = self.io.tell()
newIo = StringIO(self.getBody())
try:
while 1:
obj = ASN1Parser(newIo).parse()
seq.append(obj)
except (EOFError, ValueError):
pass
return seq
def parseUTCTime(self):
return UTCTime(self.getBody())
def parseBitString(self):
# XXX this isn't right yet
unused = self.ord(self.io.read(1))
if unused != 0:
print "XXX", unused, "unused bits"
return self.io.read(self.length - 1)
def parsePrintableString(self):
return self.getBody()
def parseOctetString(self):
return self.getBody()
def parseSet(self):
contains = parse(self.getBody())
return Set(contains)
def parseObjectIdentifier(self):
buf = self.getBody()
try:
o1 = self.ord(buf[0])
except IndexError:
raise EOFError
x = o1 / 40
y = o1 % 40
if x > 2:
y = y + (x - 2) * 40
x = 2
oid = [x, y]
num = None
for oct in map(self.ord, buf[1:]):
if oct & 0x80:
if num:
num = (num << 7L) | (oct & 0x7F)
else:
num = long(oct & 0x7f)
else:
if num:
final = (num << 7L) | oct
# Is there a better way to do this?
# Should I just make it long all the time?
try:
oid.append(int(final))
except OverflowError:
oid.append(final)
num = None
else:
oid.append(oct)
return OID(oid)
def parseNull(self):
self.getBody()
return None
__dispatch = {SEQUENCE: parseSequence,
INTEGER: parseInteger,
SET: parseSet,
UTCTIME: parseUTCTime,
BIT_STRING: parseBitString,
OCTET_STRING: parseOctetString,
PrintableString: parsePrintableString,
SET: parseSet,
OBJECT_IDENTIFIER: parseObjectIdentifier,
NULL: parseNull,
BOOLEAN: parseBoolean,
0: parseZero,
}
def getInteger(buf):
bytes = map(ord, buf)
if bytes[0] & 0x80:
sign = -1
else:
sign = 1
value = long(bytes[0] & 0x7F)
for byte in bytes[1:]:
value = (value << 8) | byte
if sign == 1:
return value
else:
return -value
def unparseContextual(tag, enc, constructed=1):
return chr((constructed and 0x40) | 0x80 | tag) \
+ unparseLength(len(enc)) + enc
def unparseSequence(encObjs, constructed=1):
buf = string.join(encObjs, '')
return chr(constructed and 0x20 | SEQUENCE or SEQUENCE) \
+ unparseLength(len(buf)) + buf
def unparseNull():
return '\005\000'
def unparseSet(encObjs, constructed=1):
# XXX actually, you need to sort the elements in the set before encoding
buf = string.join(encObjs, '')
return chr(constructed and 0x20 |SET or SET) \
+ unparseLength(len(buf)) + buf
def unparseBitString(str):
unused = 0
return chr(BIT_STRING) + unparseLength(len(str) + 1) + chr(unused) + str
def unparsePrintableString(str):
unused = 0
return chr(PrintableString) + unparseLength(len(str)) + str
def unparseOctetString(str):
unused = 0
return chr(OCTET_STRING) + unparseLength(len(str)) + str
def unparseInteger(num):
if num < 0:
sign = -1
num = -num
else:
sign = 1
if num == 0:
bytes = [0]
else:
bytes = []
div = num
rem = 0
while div:
div, rem = divmod(div, 256)
bytes.append(int(rem))
last = bytes[-1]
if last & 0x80:
bytes.append(0)
if sign == -1:
bytes[-1] = bytes[-1] | 0x80
bytes.reverse()
return chr(INTEGER) + unparseLength(len(bytes)) \
+ string.join(map(chr, bytes), '')
def unparseLength(length):
if length <= 127:
return chr(length)
bytes = []
div = length
while div:
div, rem = divmod(div, 256)
bytes.append(rem)
bytes.reverse()
return chr(0x80|len(bytes)) + string.join(map(chr, bytes), '')
def convertOctetsToInt(buf):
# XXX this really is a kludge
l = len(buf)
if l <= 4:
return struct.unpack(">l", chr(0) * (4 - l) + buf)[0]
else:
val = 0L
for byte in map(ord, buf):
val = (val << 8) | byte
return val
def parseCfg(io):
"""Parse dumpasn1 Object Identifier configuration file
Returns a dictionary mapping OID objects to human-readable
descriptions.
The configuration file is available at the following URL:
http://www.cs.auckland.ac.nz/~pgut001/dumpasn1.cfg
(Last verified Apr 10, 2000.)
"""
oids = {}
oid = None
while 1:
line = io.readline()
if line == '':
break
line = string.strip(line)
if not line or line[0] == '#':
continue
try:
name, val = map(string.strip, string.split(line, '=', 1))
except ValueError:
name = line
val = None
if name == 'OID':
if oid:
oids[oid] = dict
bytes = string.join(map(chr,
map(eval,
map(lambda s:"0x"+s,
string.split(val)))), '')
oid = parse(bytes)
key = oid
dict = {}
else:
dict[name] = val
if oid:
oids[oid] = dict
return oids
def parse(buf):
return ASN1Parser(StringIO(buf)).parse()
| {
"repo_name": "danieljohnlewis/pisces",
"path": "pisces/asn1.py",
"copies": "1",
"size": "20341",
"license": "mit",
"hash": -7129103285800786000,
"line_mean": 28.9572901325,
"line_max": 86,
"alpha_frac": 0.5218524163,
"autogenerated": false,
"ratio": 3.8900363358194685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9870936206097358,
"avg_score": 0.00819050920442189,
"num_lines": 679
} |
"""A parser for axfs file system images"""
from stat import *
import zlib
from . import *
from ..io import *
from ..util import *
AxfsHeader = Struct('AxfsHeader', [
('magic', Struct.STR % 4),
('signature', Struct.STR % 16),
('digest', Struct.STR % 40),
('blockSize', Struct.INT32),
('files', Struct.INT64),
('size', Struct.INT64),
('blocks', Struct.INT64),
('mmapSize', Struct.INT64),
('regions', Struct.STR % 144),
('...', 13),
], Struct.BIG_ENDIAN)
axfsHeaderMagic = b'\x48\xA0\xE4\xCD'
axfsHeaderSignature = b'Advanced XIP FS\0'
AxfsRegionDesc = Struct('AxfsRegionDesc', [
('offset', Struct.INT64),
('size', Struct.INT64),
('compressedSize', Struct.INT64),
('maxIndex', Struct.INT64),
('tableByteDepth', Struct.INT8),
('incore', Struct.INT8),
], Struct.BIG_ENDIAN)
axfsRegions = [
'strings',
'xip',
'byteAligned',
'compressed',
# tableRegions:
'nodeType',
'nodeIndex',
'cnodeOffset',
'cnodeIndex',
'banodeOffset',
'cblockOffset',
'fileSize',
'nameOffset',
'numEntries',
'modeIndex',
'arrayIndex',
'modes',
'uids',
'gids',
]
def isAxfs(file):
header = AxfsHeader.unpack(file)
return header and header.magic == axfsHeaderMagic and header.signature == axfsHeaderSignature
def readAxfs(file):
header = AxfsHeader.unpack(file)
if header.magic != axfsHeaderMagic or header.signature != axfsHeaderSignature:
raise Exception('Wrong magic')
regions = {}
tables = {}
for i, k in enumerate(axfsRegions):
region = AxfsRegionDesc.unpack(file, parse64be(header.regions[i*8:(i+1)*8]))
regions[k] = FilePart(file, region.offset, region.size)
if i >= 4:
regionData = regions[k].read()
tables[k] = [sum([ord(regionData[j*region.maxIndex+i:j*region.maxIndex+i+1]) << (8*j) for j in range(region.tableByteDepth)]) for i in range(region.maxIndex)]
def readInode(id, path=''):
size = tables['fileSize'][id]
nameOffset = tables['nameOffset'][id]
mode = tables['modes'][tables['modeIndex'][id]]
uid = tables['uids'][tables['modeIndex'][id]]
gid = tables['gids'][tables['modeIndex'][id]]
numEntries = tables['numEntries'][id]
arrayIndex = tables['arrayIndex'][id]
name = b''
regions['strings'].seek(nameOffset)
while b'\0' not in name:
name += regions['strings'].read(1024)
name = name.partition(b'\0')[0].decode('ascii')
path += name if id != 0 else ''
isDir = S_ISDIR(mode)
def generateChunks(arrayIndex=arrayIndex, numEntries=numEntries, size=size):
read = 0
for i in range(numEntries):
nodeType = tables['nodeType'][arrayIndex + i]
nodeIndex = tables['nodeIndex'][arrayIndex + i]
if nodeType == 0:
regions['xip'].seek(nodeIndex << 12)
contents = regions['xip'].read(4096)
elif nodeType == 1:
cnodeIndex = tables['cnodeIndex'][nodeIndex]
regions['compressed'].seek(tables['cblockOffset'][cnodeIndex])
contents = zlib.decompress(regions['compressed'].read(tables['cblockOffset'][cnodeIndex+1] - tables['cblockOffset'][cnodeIndex]))
elif nodeType == 2:
regions['byteAligned'].seek(tables['banodeOffset'][nodeIndex])
contents = regions['byteAligned'].read(size - read)
else:
raise Exception('Unknown type')
yield contents
read += len(contents)
yield UnixFile(
path = path,
size = size if not isDir else 0,
mtime = 0,
mode = mode,
uid = uid,
gid = gid,
contents = ChunkedFile(generateChunks, size) if S_ISREG(mode) or S_ISLNK(mode) else None,
)
if isDir:
for i in range(numEntries):
for f in readInode(arrayIndex + i, path + '/'):
yield f
for f in readInode(0):
yield f
| {
"repo_name": "ma1co/fwtool.py",
"path": "fwtool/archive/axfs.py",
"copies": "1",
"size": "3572",
"license": "mit",
"hash": -7714316123491403000,
"line_mean": 26.6899224806,
"line_max": 161,
"alpha_frac": 0.6637737962,
"autogenerated": false,
"ratio": 3.114210985178727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9182979919395192,
"avg_score": 0.019000972396706974,
"num_lines": 129
} |
"""A parser for Backup.bin, the settings file used on Sony cameras"""
# see /usr/kmod/backup.ko
from collections import namedtuple
from ..util import *
BackupHeader = Struct('BackupHeader', [
('magic', Struct.INT32),
('cookie', Struct.INT32),
('writeComp', Struct.INT32),
('version', Struct.STR % 4),
('numSubsystems', Struct.INT32),
])
backupHeaderMagic = [0x82ec0000, 0x832c0000]
SubsystemTableEntry = Struct('SubsystemTableEntry', [
('numProperties', Struct.INT16),
('ptr', Struct.INT32),
])
PropertyTableEntryV1 = Struct('PropertyTableEntryV1', [
('attr', Struct.INT8),
('ptr', Struct.INT32),
])
PropertyTableEntryV4 = Struct('PropertyTableEntryV4', [
('attr', Struct.INT16),
('ptr', Struct.INT32),
])
OversizeProperty = Struct('OversizeProperty', [
('size', Struct.INT16),
])
VariableSizeProperty = Struct('VariableSizeProperty', [
('size', Struct.INT16),
('maxSize', Struct.INT16),
])
BackupProperty = namedtuple('BackupProperty', 'id, attr, data, resetData')
def readBackup(file):
header = BackupHeader.unpack(file)
if header.magic not in backupHeaderMagic:
raise Exception('Wrong magic')
if header.version[:2] != b'BK':
raise Exception('Wrong version number')
version = int(header.version[2:3])
headerLength = 0x100 if version >= 2 else 0x20
PropertyTableEntry = PropertyTableEntryV4 if version >= 4 else PropertyTableEntryV1
subsystemTableOffset = headerLength
propertyTableOffset = subsystemTableOffset + header.numSubsystems * SubsystemTableEntry.size
for i in range(header.numSubsystems):
subsystem = SubsystemTableEntry.unpack(file, subsystemTableOffset + i * SubsystemTableEntry.size)
for j in range(subsystem.numProperties):
id = i << 16 | j
property = PropertyTableEntry.unpack(file, propertyTableOffset + (subsystem.ptr + j) * PropertyTableEntry.size)
if property.ptr == 0xffffffff:
continue
attr = property.attr
size = property.ptr >> 24
offset = property.ptr & 0xffffff
maxSize = size
if size == 0xff:
op = OversizeProperty.unpack(file, offset)
size = op.size
maxSize = op.size
offset += OversizeProperty.size
elif size == 0:
vp = VariableSizeProperty.unpack(file, offset)
size = vp.size
maxSize = vp.maxSize
offset += VariableSizeProperty.size
file.seek(offset)
data = file.read(size)
resetData = None
if attr & 0x01:# property is read only, cannot be written with Backup_write()
pass
if attr & 0x02:# property is protected, won't be changed by Backup_protect()
pass
if attr & 0x08:# callbacks are triggered when this property is written with Backup_write()
pass
if attr & 0x74:# property can be reset with Backup_reset()
file.seek(offset + maxSize)
resetData = file.read(size)
if attr & 0x80:# property data is an array that can be read with Backup_read_setting_attr()
# there are ord(backupProperties[0x3e000c].data)+1 elements in the array
pass
yield BackupProperty(id, attr, data, resetData)
| {
"repo_name": "ma1co/fwtool.py",
"path": "fwtool/sony/backup.py",
"copies": "1",
"size": "2986",
"license": "mit",
"hash": 216220243290607970,
"line_mean": 28.5643564356,
"line_max": 114,
"alpha_frac": 0.7123241795,
"autogenerated": false,
"ratio": 3.48018648018648,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.469251065968648,
"avg_score": null,
"num_lines": null
} |
"""A parser for cramfs file system images"""
from collections import namedtuple, OrderedDict
import io
import os
import posixpath
from stat import *
import zlib
from . import *
from ..io import *
from .. import lz77
from ..util import *
CramfsSuper = Struct('CramfsSuper', [
('magic', Struct.STR % 4),
('size', Struct.INT32),
('flags', Struct.INT32),
('future', Struct.INT32),
('signature', Struct.STR % 16),
('crc', Struct.INT32),
('edition', Struct.INT32),
('blocks', Struct.INT32),
('files', Struct.INT32),
('name', Struct.STR % 16),
])
cramfsBlockSize = 4096
cramfsSuperMagic = b'\x45\x3d\xcd\x28'
cramfsSuperSignature = b'Compressed ROMFS'
CramfsInode = Struct('CramfsInode', [
('mode', Struct.INT16),
('uid', Struct.INT16),
('size_gid', Struct.INT32),
('nameLen_offset', Struct.INT32),
])
def isCramfs(file):
super = CramfsSuper.unpack(file)
return super and super.magic == cramfsSuperMagic and super.signature == cramfsSuperSignature
def readCramfs(file):
super = CramfsSuper.unpack(file)
if super.flags & 0x10000000:
raise Exception('LZO compression not supported')
elif super.flags & 0x20000000:
decompress = lambda data: lz77.inflateLz77(io.BytesIO(data))
else:
decompress = zlib.decompress
if super.magic != cramfsSuperMagic or super.signature != cramfsSuperSignature:
raise Exception('Wrong magic')
if crc32(FilePart(file, 0, 32), io.BytesIO(4 * b'\0'), FilePart(file, 36)) != super.crc:
raise Exception('Wrong checksum')
def readInode(path=''):
off = file.tell()
inode = CramfsInode.unpack(file, off)
size = inode.size_gid & 0xffffff
gid = inode.size_gid >> 24
nameLen = (inode.nameLen_offset & 0x3f) * 4
offset = (inode.nameLen_offset >> 6) * 4
file.seek(off + CramfsInode.size)
name = file.read(nameLen).rstrip(b'\0').decode('ascii')
path += name
isDir = S_ISDIR(inode.mode)
def generateChunks(offset=offset, size=size):
nBlocks = (size - 1) // cramfsBlockSize + 1
file.seek(offset)
blockPointers = [offset + nBlocks * 4] + [parse32le(file.read(4)) for i in range(nBlocks)]
for i in range(len(blockPointers) - 1):
file.seek(blockPointers[i])
block = file.read(blockPointers[i+1] - blockPointers[i])
yield decompress(block)
yield UnixFile(
path = path,
size = size if not isDir else 0,
mtime = 0,
mode = inode.mode,
uid = inode.uid,
gid = gid,
contents = ChunkedFile(generateChunks, size) if S_ISREG(inode.mode) or S_ISLNK(inode.mode) else None,
)
if isDir:
file.seek(offset)
while file.tell() < offset + size:
for f in readInode(path + '/'):
yield f
file.seek(off + CramfsInode.size + nameLen)
file.seek(CramfsSuper.size)
for f in readInode():
yield f
def _pad(file, n, char=b'\0'):
off = file.tell()
if off % n > 0:
file.write(char * (n - off % n))
def writeCramfs(files, outFile):
files = {f.path: f for f in files}
tree = {'': set()}
for path in files:
while path != '':
parent = posixpath.dirname(path).rstrip('/')
tree.setdefault(parent, set()).add(path)
path = parent
outFile.seek(0)
outFile.write(b'\0' * CramfsSuper.size)
stack = OrderedDict()
StackItem = namedtuple('StackItem', 'inodeOffset, inodeSize, file, childrenPaths')
tail = ['']
while tail:
file = files.get(tail[0], UnixFile(tail[0], 0, 0, S_IFDIR | 0o775, 0, 0, None))
childrenPaths = sorted(tree.get(file.path, set()))
offset = outFile.tell()
outFile.write(b'\0' * CramfsInode.size)
outFile.write(posixpath.basename(file.path).encode('ascii'))
_pad(outFile, 4)
stack[file.path] = StackItem(offset, outFile.tell() - offset, file, childrenPaths)
tail = tail[1:] + childrenPaths
blocks = 0
for item in stack.values():
if S_ISDIR(item.file.mode):
if item.childrenPaths:
offset = stack[item.childrenPaths[0]].inodeOffset
size = stack[item.childrenPaths[-1]].inodeOffset + stack[item.childrenPaths[-1]].inodeSize - offset
else:
offset = 0
size = 0
elif S_ISREG(item.file.mode) or S_ISLNK(item.file.mode):
offset = outFile.tell()
item.file.contents.seek(0, os.SEEK_END)
size = item.file.contents.tell()
nBlocks = (size - 1) // cramfsBlockSize + 1
blocks += nBlocks
outFile.write(b'\0' * (nBlocks * 4))
item.file.contents.seek(0)
for i in range(nBlocks):
outFile.write(zlib.compress(item.file.contents.read(cramfsBlockSize), 9))
o = outFile.tell()
outFile.seek(offset + i * 4)
outFile.write(dump32le(o))
outFile.seek(o)
_pad(outFile, 4)
else:
offset = 0
size = item.file.size
o = outFile.tell()
outFile.seek(item.inodeOffset)
outFile.write(CramfsInode.pack(
mode = item.file.mode,
uid = item.file.uid,
size_gid = item.file.gid << 24 | size,
nameLen_offset = (offset // 4) << 6 | (item.inodeSize - CramfsInode.size) // 4
))
outFile.seek(o)
_pad(outFile, cramfsBlockSize)
size = outFile.tell()
outFile.seek(0)
outFile.write(CramfsSuper.pack(
magic = cramfsSuperMagic,
size = size,
flags = 3,
future = 0,
signature = cramfsSuperSignature,
crc = 0,
edition = 0,
blocks = blocks,
files = len(stack),
name = b'Compressed',
))
outFile.seek(0)
crc = crc32(outFile)
outFile.seek(32)
outFile.write(dump32le(crc))
| {
"repo_name": "ma1co/fwtool.py",
"path": "fwtool/archive/cramfs.py",
"copies": "1",
"size": "5212",
"license": "mit",
"hash": -3229518717769806000,
"line_mean": 25.7282051282,
"line_max": 104,
"alpha_frac": 0.6686492709,
"autogenerated": false,
"ratio": 2.942970073404856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4111619344304856,
"avg_score": null,
"num_lines": null
} |
"""A parser for ext2 file system images"""
from stat import *
from . import *
from ..io import *
from ..util import *
Ext2Header = Struct('Ext2Header', [
('bootRecord', 1024),
('inodesCount', Struct.INT32),
('blocksCount', Struct.INT32),
('...', 16),
('blockSize', Struct.INT32),
('...', 4),
('blocksPerGroup', Struct.INT32),
('...', 4),
('inodesPerGroup', Struct.INT32),
('...', 12),
('magic', Struct.STR % 2),
('...', 966),
])
ext2HeaderMagic = b'\x53\xef'
Ext2Bgd = Struct('Ext2BlockGroupDescriptor', [
('...', 8),
('inodeTableBlock', Struct.INT32),
('...', 20),
])
Ext2Inode = Struct('Ext2Inode', [
('mode', Struct.INT16),
('uid', Struct.INT16),
('size', Struct.INT32),
('atime', Struct.INT32),
('ctime', Struct.INT32),
('mtime', Struct.INT32),
('dtime', Struct.INT32),
('gid', Struct.INT16),
('...', 14),
('blocks', Struct.STR % 60),
('...', 28),
])
Ext2DirEntry = Struct('Ext2DirEntry', [
('inode', Struct.INT32),
('size', Struct.INT16),
('nameSize', Struct.INT8),
('fileType', Struct.INT8),
])
def isExt2(file):
header = Ext2Header.unpack(file)
return header and header.magic == ext2HeaderMagic
def readExt2(file):
header = Ext2Header.unpack(file)
if header.magic != ext2HeaderMagic:
raise Exception('Wrong magic')
blockSize = 1024 << header.blockSize
bdgOffset = max(blockSize, 2048)
numBlockGroups = (header.blocksCount-1) // header.blocksPerGroup + 1
inodeTables = [Ext2Bgd.unpack(file, bdgOffset + i * Ext2Bgd.size).inodeTableBlock for i in range(numBlockGroups)]
def readInode(i, path = ''):
inode = Ext2Inode.unpack(file, inodeTables[(i-1) // header.inodesPerGroup] * blockSize + ((i-1) % header.inodesPerGroup) * Ext2Inode.size)
def generateChunks(contents=inode.blocks, size=inode.size, mode=inode.mode):
if S_ISLNK(mode) and size <= len(contents):
# Fast symlinks
yield contents[:size]
return
ptrs = []
for i in range(15, 11, -1):
# resolve indirect pointers
contents = contents[:i*4]
for ptr in ptrs[i:]:
if ptr != 0:
file.seek(ptr * blockSize)
contents += file.read(blockSize)
ptrs = [parse32le(contents[j:j+4]) for j in range(0, len(contents), 4)]
read = 0
for ptr in ptrs:
if read < size:
if ptr == 0:
block = b'\0' * blockSize
else:
file.seek(ptr * blockSize)
block = file.read(blockSize)
yield block[:size-read]
read += len(block)
isDir = S_ISDIR(inode.mode)
contents = ChunkedFile(generateChunks, inode.size)
yield UnixFile(
path = path,
size = inode.size if not isDir else 0,
mtime = inode.mtime,
mode = inode.mode,
uid = inode.uid,
gid = inode.gid,
contents = contents if S_ISREG(inode.mode) or S_ISLNK(inode.mode) else None,
)
if isDir:
while contents.tell() < inode.size:
entry = Ext2DirEntry.unpack(contents.read(Ext2DirEntry.size))
name = contents.read(entry.nameSize).decode('ascii')
if name != '.' and name != '..':
for f in readInode(entry.inode, path + '/' + name):
yield f
contents.read(entry.size - Ext2DirEntry.size - entry.nameSize)
for f in readInode(2):
yield f
| {
"repo_name": "ma1co/fwtool.py",
"path": "fwtool/archive/ext2.py",
"copies": "1",
"size": "3131",
"license": "mit",
"hash": -8448278426219147000,
"line_mean": 24.8760330579,
"line_max": 140,
"alpha_frac": 0.6349409134,
"autogenerated": false,
"ratio": 3.0398058252427185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41747467386427184,
"avg_score": null,
"num_lines": null
} |
"""A parser for FAT file system images"""
import io
import posixpath
import shutil
from stat import *
import time
from . import *
from ..io import *
from ..util import *
FatHeader = Struct('FatHeader', [
('jump', Struct.STR % 3),
('oemName', Struct.STR % 8),
('bytesPerSector', Struct.INT16),
('sectorsPerCluster', Struct.INT8),
('reservedSectors', Struct.INT16),
('fatCopies', Struct.INT8),
('rootEntries', Struct.INT16),
('...', 2),
('mediaDescriptor', Struct.INT8),
('sectorsPerFat', Struct.INT16),
('...', 8),
('sectors', Struct.INT32),
('...', 2),
('extendedSignature', Struct.STR % 1),
('serialNumber', Struct.INT32),
('volumeLabel', Struct.STR % 11),
('fsType', Struct.STR % 8),
('...', 448),
('signature', Struct.STR % 2),
])
fatHeaderSignature = b'\x55\xaa'
fatHeaderExtendedSignature = b'\x29'
FatDirEntry = Struct('FatDirEntry', [
('name', Struct.STR % 8),
('ext', Struct.STR % 3),
('attr', Struct.INT8),
('...', 1),
('ctimeCs', Struct.INT8),
('...', 8),
('time', Struct.INT16),
('date', Struct.INT16),
('cluster', Struct.INT16),
('size', Struct.INT32),
])
VfatDirEntry = Struct('VfatDirEntry', [
('sequence', Struct.INT8),
('name1', Struct.STR % 10),
('attr', Struct.INT8),
('...', 1),
('checksum', Struct.INT8),
('name2', Struct.STR % 12),
('...', 2),
('name3', Struct.STR % 4),
])
def isFat(file):
header = FatHeader.unpack(file)
return header and header.signature == fatHeaderSignature and header.extendedSignature == fatHeaderExtendedSignature and header.fsType.startswith(b'FAT')
def readFat(file):
header = FatHeader.unpack(file)
if header.signature != fatHeaderSignature or header.extendedSignature != fatHeaderExtendedSignature:
raise Exception('Wrong magic')
fatOffset = header.reservedSectors * header.bytesPerSector
rootOffset = fatOffset + header.fatCopies * header.sectorsPerFat * header.bytesPerSector
dataOffset = rootOffset + ((header.rootEntries * FatDirEntry.size - 1) // header.bytesPerSector + 1) * header.bytesPerSector
file.seek(fatOffset)
if header.fsType == b'FAT12 ':
endMarker = 0xfff
packedClusters = [parse32le(file.read(3) + b'\0') for i in range(0, header.sectorsPerFat * header.bytesPerSector, 3)]
clusters = [cluster for packed in packedClusters for cluster in [packed & 0xfff, (packed >> 12) & 0xfff]]
elif header.fsType == b'FAT16 ':
endMarker = 0xffff
clusters = [parse16le(file.read(2)) for i in range(0, header.sectorsPerFat * header.bytesPerSector, 2)]
else:
raise Exception('Unknown FAT width')
def readDir(entries, path=''):
offset = 0
vfatName = b''
while entries[offset:offset+1] != b'\0':
entry = FatDirEntry.unpack(entries, offset)
if entry.name[0:1] != b'\xe5':
if entry.attr == 0x0f:
# VFAT
vfatEntry = VfatDirEntry.unpack(entries, offset)
vfatName = vfatEntry.name1 + vfatEntry.name2 + vfatEntry.name3 + vfatName
else:
if vfatName != b'':
name = vfatName.decode('utf16').rstrip(u'\0\uffff')
vfatName = b''
else:
name = entry.name.decode('ascii').rstrip(' ')
if name[0] == '\x05':
name = '\xe5' + name[1:]
ext = entry.ext.decode('ascii').rstrip(' ')
if ext != '':
name += '.' + ext
if name != '.' and name != '..':
isLink = (entry.attr & 0x04) and (entry.ctimeCs & 0xe1) == 0x21
isDir = entry.attr & 0x10
def generateChunks(cluster=entry.cluster, size=entry.size, isDir=isDir):
read = 0
while cluster != 0 and cluster != endMarker and (read < size or isDir):
file.seek(dataOffset + (cluster - 2) * header.sectorsPerCluster * header.bytesPerSector)
block = file.read(header.sectorsPerCluster * header.bytesPerSector)
yield block if isDir else block[:size-read]
read += len(block)
cluster = clusters[cluster]
contents = ChunkedFile(generateChunks, entry.size if not isDir else -1)
yield UnixFile(
path = path + '/' + name,
size = entry.size,
mtime = time.mktime((1980 + (entry.date >> 9), (entry.date >> 5) & 0xf, entry.date & 0x1f, entry.time >> 11, (entry.time >> 5) & 0x3f, (entry.time & 0x1f) * 2, -1, -1, -1)),
mode = S_IFDIR if isDir else S_IFLNK if isLink else S_IFREG,
uid = 0,
gid = 0,
contents = contents if not isDir else None,
)
if isDir:
for f in readDir(contents.read(), path + '/' + name):
yield f
offset += FatDirEntry.size
file.seek(rootOffset)
for f in readDir(file.read(dataOffset - rootOffset)):
yield f
def writeFat(files, size, outFile):
files = {f.path: f for f in files}
tree = {'': set()}
for path in files:
while path != '':
parent = posixpath.dirname(path).rstrip('/')
tree.setdefault(parent, set()).add(path)
path = parent
sectorSize = 0x200
clusterSize = 0x4000
sectors = size // sectorSize
fatSize = (size // clusterSize + 1) // 2 * 3
fatSectors = (fatSize + sectorSize - 1) // sectorSize
outFile.write(FatHeader.pack(
jump = b'\xeb\0\x90',
oemName = 8*b'\0',
bytesPerSector = sectorSize,
sectorsPerCluster = clusterSize // sectorSize,
reservedSectors = 1,
fatCopies = 1,
rootEntries = clusterSize // FatDirEntry.size,
sectors = sectors,
mediaDescriptor = 0xf8,
sectorsPerFat = fatSectors,
extendedSignature = fatHeaderExtendedSignature,
serialNumber = 0,
volumeLabel = 11*b' ',
fsType = b'FAT12 ',
signature = fatHeaderSignature,
))
for i in range(sectors - 1):
outFile.write(sectorSize * b'\0')
fatOffset = sectorSize
rootOffset = fatOffset + fatSectors * sectorSize
dataOffset = rootOffset + clusterSize
clusters = [0xff8, 0xfff]
def writeData(f):
f.seek(0)
outFile.seek(dataOffset + (len(clusters) - 2) * clusterSize)
shutil.copyfileobj(f, outFile)
nc = (f.tell() + clusterSize - 1) // clusterSize
for i in range(nc):
clusters.append(len(clusters) + 1 if i < nc-1 else 0xfff)
return (len(clusters)-nc if nc else 0), f.tell()
def dirEntries(pc, c):
return FatDirEntry.pack(
name = b'. ',
ext = b' ',
attr = 0x10,
ctimeCs = 0,
time = 0,
date = 0,
cluster = c,
size = 0,
) + FatDirEntry.pack(
name = b'.. ',
ext = b' ',
attr = 0x10,
ctimeCs = 0,
time = 0,
date = 0,
cluster = pc,
size = 0,
)
dirs = {}
def writeDir(path):
data = io.BytesIO()
if path != '':
data.write(dirEntries(0, 0))
for p in tree.get(path, set()):
file = files.get(p, UnixFile(p, 0, 0, S_IFDIR | 0o775, 0, 0, None))
c, s = writeData(file.contents if not S_ISDIR(file.mode) else writeDir(file.path))
if S_ISDIR(file.mode):
dirs[file.path] = c
name, ext = (posixpath.basename(file.path).upper() + '.').split('.', 1)
name = name[:8].ljust(8, ' ').encode('ascii')
ext = ext[:3].ljust(3, ' ').encode('ascii')
sum = 0
for chr in (name + ext):
sum = (((sum & 1) << 7) + (sum >> 1) + chr) & 0xff
fn = posixpath.basename(file.path) + '\0'
vfatEntries = [fn[o:o+13] for o in range(0, len(fn), 13)]
for i, n in list(enumerate(vfatEntries))[::-1]:
n = n.encode('utf-16le').ljust(26, b'\xff')
data.write(VfatDirEntry.pack(
sequence = i + 1 + (0x40 if i == len(vfatEntries)-1 else 0),
name1 = n[:10],
attr = 0x0f,
checksum = sum,
name2 = n[10:22],
name3 = n[22:],
))
t = time.localtime(file.mtime)
data.write(FatDirEntry.pack(
name = name,
ext = ext,
attr = 0x10 if S_ISDIR(file.mode) else 0x04 if S_ISLNK(file.mode) else 0,
ctimeCs = 0x21 if S_ISLNK(file.mode) else 0,
time = (t.tm_hour << 11) + (t.tm_min << 5) + t.tm_sec // 2,
date = (max(t.tm_year - 1980, 0) << 9) + (t.tm_mon << 5) + t.tm_mday,
cluster = c,
size = s if not S_ISDIR(file.mode) else 0,
))
return data
root = writeDir('')
root.seek(0)
outFile.seek(rootOffset)
shutil.copyfileobj(root, outFile)
for p, c in dirs.items():
parent = posixpath.split(p)[0]
outFile.seek(dataOffset + (c - 2) * clusterSize)
outFile.write(dirEntries(dirs[parent] if parent != '/' else 0, c))
outFile.seek(fatOffset)
for i in range(0, len(clusters), 2):
outFile.write(dump32le(clusters[i] + ((clusters[i+1] << 12) if i+1 < len(clusters) else 0))[:3])
| {
"repo_name": "ma1co/fwtool.py",
"path": "fwtool/archive/fat.py",
"copies": "1",
"size": "8219",
"license": "mit",
"hash": 8106126916554502000,
"line_mean": 29.4407407407,
"line_max": 180,
"alpha_frac": 0.6220951454,
"autogenerated": false,
"ratio": 2.974665218964893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4096760364364893,
"avg_score": null,
"num_lines": null
} |
"""A parser for HCL2 implemented using the Lark parser"""
import os
from os.path import exists, dirname
from lark import Lark
from lark.grammar import Rule
from lark.lexer import TerminalDef
from hcl2.transformer import DictTransformer
PARSER_FILE = os.path.join(dirname(__file__), 'lark_parser.py')
PARSER_FILE_TEMPLATE = """
from lark import Lark
DATA = (%s)
MEMO = (%s)
def Lark_StandAlone(**kwargs):
return Lark._load_from_dict(DATA, MEMO, **kwargs)
"""
def create_parser_file():
"""
Parsing the Lark grammar takes about 0.5 seconds. In order to improve performance we can cache the parser
file. The below code caches the entire python file which is generated by Lark's standalone parser feature
See: https://github.com/lark-parser/lark/blob/master/lark/tools/standalone.py
Lark also supports serializing the parser config but the deserialize function did not work for me.
The lark state contains dicts with numbers as keys which is not supported by json so the serialized
state can't be written to a json file. Exporting to other file types would have required
adding additional dependencies or writing a lot more code. Lark's standalone parser
feature works great but it expects to be run as a separate shell command
The below code copies some of the standalone parser generator code in a way that we can use
"""
lark_file = os.path.join(dirname(__file__), 'hcl2.lark')
with open(lark_file, 'r') as lark_file, open(PARSER_FILE, 'w') as parser_file:
lark_inst = Lark(lark_file.read(), parser="lalr", lexer="standard")
data, memo = lark_inst.memo_serialize([TerminalDef, Rule])
print(PARSER_FILE_TEMPLATE % (data, memo), file=parser_file)
if not exists(PARSER_FILE):
create_parser_file()
# pylint: disable=wrong-import-position
# Lark_StandAlone needs to be imported after the above block of code because lark_parser.py might not exist
from hcl2.lark_parser import Lark_StandAlone
hcl2 = Lark_StandAlone(transformer=DictTransformer())
| {
"repo_name": "amplify-education/python-hcl2",
"path": "hcl2/parser.py",
"copies": "1",
"size": "2036",
"license": "mit",
"hash": -7207084062034242000,
"line_mean": 37.4150943396,
"line_max": 109,
"alpha_frac": 0.7357563851,
"autogenerated": false,
"ratio": 3.629233511586453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9853125394038583,
"avg_score": 0.0023729005295738083,
"num_lines": 53
} |
"""A parser for HTML and XHTML.
Backported for python-future from Python 3.3.
"""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import *
from future.backports import _markupbase
import re
import warnings
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
# Note:
# 1) the strict attrfind isn't really strict, but we can't make it
# correctly strict without breaking backward compatibility;
# 2) if you change attrfind remember to update locatestarttagend too;
# 3) if you change attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(_markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self, strict=False):
"""Initialize and reset this instance.
If strict is set to False (the default) the parser will parse invalid
markup, otherwise it will raise an error. Note that the strict mode
is deprecated.
"""
if strict:
warnings.warn("The strict mode is deprecated.",
DeprecationWarning, stacklevel=2)
self.strict = strict
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
_markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
if self.strict:
k = self.parse_declaration(i)
else:
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
if self.strict:
self.error("EOF in middle of construct")
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k - 1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: # bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k - 1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
if self.strict:
self.error("EOF in middle of entity or char ref")
else:
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == '<!', ('unexpected call to '
'parse_html_declaration()')
if rawdata[i:i + 4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i + 3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i + 9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i + 9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i + 2:gtpos])
return gtpos + 1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i + 2] in ('<!', '</'), ('unexpected call to '
'parse_comment()')
pos = rawdata.find('>', i + 2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i + 2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i + 2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i + 2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i + 1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
if self.strict:
m = locatestarttagend.match(rawdata, i)
else:
m = locatestarttagend_tolerant.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j + 1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
if self.strict:
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if j > i:
return j
else:
return i + 1
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if self.strict:
self.updatepos(i, j)
self.error("malformed start tag")
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i + 1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
if self.strict:
self.error("bad end tag: %r" % (rawdata[i:gtpos],))
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i + 2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i + 3] == '</>':
return i + 3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group().lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos + 1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem.lower())
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
if self.strict:
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x', 'X']:
c = int(s[1:].rstrip(';'), 16)
else:
c = int(s.rstrip(';'))
return chr(c)
except ValueError:
return '&#' + s
else:
from future.backports.html.entities import html5
if s in html5:
return html5[s]
elif s.endswith(';'):
return '&' + s
for x in range(2, len(s)):
if s[:x] in html5:
return html5[s[:x]] + s[x:]
else:
return '&' + s
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))",
replaceEntities, s)
| {
"repo_name": "thonkify/thonkify",
"path": "src/lib/future/backports/html/parser.py",
"copies": "1",
"size": "19849",
"license": "mit",
"hash": 5379010073604441000,
"line_mean": 35.9627560521,
"line_max": 80,
"alpha_frac": 0.487530858,
"autogenerated": false,
"ratio": 4.102728400165358,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5090259258165357,
"avg_score": null,
"num_lines": null
} |
"""A parser for HTML and XHTML."""
########
# This is copied from Python3 and the slightly modified to support needed
# features. The original file can be found at:
# https://github.com/python/cpython/blob/44b548dda872c0d4f30afd6b44fd74b053a55ad8/Lib/html/parser.py
#
# The largest difference is the reinstatment of the unescape method in
# HTMLParser, which is needed for features in htmlmin. Changes are also
# made to ensure Python2.7 compatability.
########
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import re
import warnings
try:
import _markupbase as markupbase
except ImportError:
import markupbase
from . import unescape
__all__ = ['HTMLParser']
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
# Note:
# 1) if you change tagfind/attrfind remember to update locatestarttagend too;
# 2) if you change tagfind/attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile(r'([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][^\t\n\r\f />\x00]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile(r'</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). If convert_charrefs is
True the character references are converted automatically to the
corresponding Unicode character (and self.handle_data() is no
longer split in chunks), otherwise they are passed by calling
self.handle_entityref() or self.handle_charref() with the string
containing respectively the named or numeric reference as the
argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self, convert_charrefs=True):
"""Initialize and reset this instance.
If convert_charrefs is True (the default), all character references
are automatically converted to the corresponding Unicode characters.
"""
self.convert_charrefs = convert_charrefs
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.convert_charrefs and not self.cdata_elem:
j = rawdata.find('<', i)
if j < 0:
# if we can't find the next <, either we are at the end
# or there's more text incoming. If the latter is True,
# we can't pass the text to handle_data in case we have
# a charref cut in half at end. Try to determine if
# this is the case before proceeding by looking for an
# & near the end and see if it's followed by a space or ;.
amppos = rawdata.rfind('&', max(i, n-34))
if (amppos >= 0 and
not re.compile(r'[\s;]').search(rawdata, amppos)):
break # wait till we get all the text
j = n
else:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(self.unescape(rawdata[i:j]))
else:
self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(self.unescape(rawdata[i:k]))
else:
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: # bail by consuming &#
self.handle_data(rawdata[i:i+2])
i = self.updatepos(i, i+2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
k = match.end()
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(self.unescape(rawdata[i:n]))
else:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<!', ('unexpected call to '
'parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
'parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind_tolerant.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend_tolerant.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
if j > i:
return j
else:
return i + 1
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group(1).lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem.lower())
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
pass
# Internal -- helper to remove special character quoting
def unescape(self, s):
return unescape(s)
| {
"repo_name": "mankyd/htmlmin",
"path": "htmlmin/python3html/parser.py",
"copies": "1",
"size": "18039",
"license": "bsd-3-clause",
"hash": -4397138810753088500,
"line_mean": 36.5031185031,
"line_max": 100,
"alpha_frac": 0.5105604524,
"autogenerated": false,
"ratio": 4.094189741261916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5104750193661916,
"avg_score": null,
"num_lines": null
} |
"""A parser for HTML and XHTML.
This code is a direct copy of Python 2.7.10 source.
It is intended for further customization as some special/adversarial cases
in web test are not handled by known parsers.
"""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
endtagclose = re.compile('</(?:\s*[a-zA-Z][-.a-zA-Z0-9:_]*|$)')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
# note: if you change tagfind/attrfind remember to update locatestarttagend too
tagfind = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
# this regex is currently unused, but left for backward compatibility
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
attrfind = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][^\t\n\r\f />\x00]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif endtagclose.match(rawdata, i): # </
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: # bail by consuming '&#'
self.handle_data(rawdata[i:i+2])
i = self.updatepos(i, i+2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<!':
self.error('unexpected call to parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+2] not in ('<!', '</'):
self.error('unexpected call to parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group(1).lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem)
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
pass
# Internal -- helper to remove special character quoting
entitydefs = None
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:], 16)
else:
c = int(s)
return unichr(c)
except ValueError:
return '&#'+s+';'
else:
# Cannot use name2codepoint directly, because HTMLParser supports apos,
# which is not part of HTML 4
import htmlentitydefs
if HTMLParser.entitydefs is None:
entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
entitydefs[k] = unichr(v)
try:
return self.entitydefs[s]
except KeyError:
return '&'+s+';'
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", replaceEntities, s)
| {
"repo_name": "Livefyre/flaubert",
"path": "flaubert/HTMLParser.py",
"copies": "2",
"size": "17393",
"license": "mit",
"hash": -5723417758449017000,
"line_mean": 35.0103519669,
"line_max": 87,
"alpha_frac": 0.5058931754,
"autogenerated": false,
"ratio": 4.046765937645416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009343383142231224,
"num_lines": 483
} |
"""A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
# note: if you change tagfind/attrfind remember to update locatestarttagend too
tagfind = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
# this regex is currently unused, but left for backward compatibility
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
attrfind = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][^\t\n\r\f />\x00]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: # bail by consuming '&#'
self.handle_data(rawdata[i:i+2])
i = self.updatepos(i, i+2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<!':
self.error('unexpected call to parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+2] not in ('<!', '</'):
self.error('unexpected call to parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group(1).lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem)
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
pass
# Internal -- helper to remove special character quoting
entitydefs = None
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:], 16)
else:
c = int(s)
return unichr(c)
except ValueError:
return '&#'+s+';'
else:
# Cannot use name2codepoint directly, because HTMLParser supports apos,
# which is not part of HTML 4
import htmlentitydefs
if HTMLParser.entitydefs is None:
entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
entitydefs[k] = unichr(v)
try:
return self.entitydefs[s]
except KeyError:
return '&'+s+';'
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", replaceEntities, s)
| {
"repo_name": "franekp/millandict",
"path": "ankidict/thirdparty/bs4/builder/HTMLParser.py",
"copies": "2",
"size": "17138",
"license": "mit",
"hash": -3610825267012324000,
"line_mean": 34.928721174,
"line_max": 87,
"alpha_frac": 0.5027424437,
"autogenerated": false,
"ratio": 4.068850902184235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0030097702427039166,
"num_lines": 477
} |
"""A parser for HTML and XHTML."""
# This file is a modified Version from htmlparser.HTMLParser
# in this version tag and attribute names aren't converted to
# lowercase
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
from __future__ import unicode_literals
from builtins import chr
try:
import markupbase
except ImportError:
import _markupbase as markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
interesting_cdata = re.compile(r'<(/|\Z)')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self):
self.interesting = interesting_cdata
def clear_cdata_mode(self):
self.interesting = interesting_normal
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if end:
self.error("EOF in middle of construct")
break
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: #bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k]
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrvalue = self.unescape(attrvalue)
attrs.append((attrname, attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode()
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
self.updatepos(i, j)
self.error("malformed start tag")
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
j = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
self.error("bad end tag: %r" % (rawdata[i:j],))
tag = match.group(1)
self.handle_endtag(tag)
self.clear_cdata_mode()
return j
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
entitydefs = None
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:], 16)
else:
c = int(s)
return chr(c)
except ValueError:
return '&#'+s+';'
else:
# Cannot use name2codepoint directly, because HTMLParser supports apos,
# which is not part of HTML 4
import htmlentitydefs
if HTMLParser.entitydefs is None:
entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
entitydefs[k] = chr(v)
try:
return self.entitydefs[s]
except KeyError:
return '&'+s+';'
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", replaceEntities, s)
| {
"repo_name": "ernw/python-wcfbin",
"path": "wcf/MyHTMLParser.py",
"copies": "2",
"size": "13903",
"license": "bsd-3-clause",
"hash": -3600152253876996600,
"line_mean": 33.5845771144,
"line_max": 87,
"alpha_frac": 0.5019779904,
"autogenerated": false,
"ratio": 4.143964232488822,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010178488888164764,
"num_lines": 402
} |
"""A parser for HTML and XHTML.
Backported for python-future from Python 3.3.
"""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import *
from future.backports import _markupbase
import re
import warnings
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
# Note:
# 1) the strict attrfind isn't really strict, but we can't make it
# correctly strict without breaking backward compatibility;
# 2) if you change attrfind remember to update locatestarttagend too;
# 3) if you change attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(_markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self, strict=False):
"""Initialize and reset this instance.
If strict is set to False (the default) the parser will parse invalid
markup, otherwise it will raise an error. Note that the strict mode
is deprecated.
"""
if strict:
warnings.warn("The strict mode is deprecated.",
DeprecationWarning, stacklevel=2)
self.strict = strict
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
_markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
if self.strict:
k = self.parse_declaration(i)
else:
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
if self.strict:
self.error("EOF in middle of construct")
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: #bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
if self.strict:
self.error("EOF in middle of entity or char ref")
else:
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<!', ('unexpected call to '
'parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
'parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
if self.strict:
m = locatestarttagend.match(rawdata, i)
else:
m = locatestarttagend_tolerant.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
if self.strict:
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if j > i:
return j
else:
return i + 1
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if self.strict:
self.updatepos(i, j)
self.error("malformed start tag")
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
if self.strict:
self.error("bad end tag: %r" % (rawdata[i:gtpos],))
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group().lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem.lower())
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
if self.strict:
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:].rstrip(';'), 16)
else:
c = int(s.rstrip(';'))
return chr(c)
except ValueError:
return '&#' + s
else:
from future.backports.html.entities import html5
if s in html5:
return html5[s]
elif s.endswith(';'):
return '&' + s
for x in range(2, len(s)):
if s[:x] in html5:
return html5[s[:x]] + s[x:]
else:
return '&' + s
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))",
replaceEntities, s)
| {
"repo_name": "agincel/AdamTestBot",
"path": "future/backports/html/parser.py",
"copies": "2",
"size": "20308",
"license": "mit",
"hash": -7757847978496462000,
"line_mean": 35.8175046555,
"line_max": 80,
"alpha_frac": 0.4765117195,
"autogenerated": false,
"ratio": 4.170876976791949,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.564738869629195,
"avg_score": null,
"num_lines": null
} |
"""A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
# note: if you change tagfind/attrfind remember to update locatestarttagend too
tagfind = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
# this regex is currently unused, but left for backward compatibility
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
attrfind = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][^\t\n\r\f />\x00]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: # bail by consuming '&#'
self.handle_data(rawdata[i:i+2])
i = self.updatepos(i, i+2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<!':
self.error('unexpected call to parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+2] not in ('<!', '</'):
self.error('unexpected call to parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group(1).lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem)
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
pass
# Internal -- helper to remove special character quoting
entitydefs = None
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:], 16)
else:
c = int(s)
return unichr(c)
except ValueError:
return '&#'+s+';'
else:
# Cannot use name2codepoint directly, because HTMLParser supports apos,
# which is not part of HTML 4
import htmlentitydefs
if HTMLParser.entitydefs is None:
entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
entitydefs[k] = unichr(v)
try:
return self.entitydefs[s]
except KeyError:
return '&'+s+';'
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", replaceEntities, s)
| {
"repo_name": "google/google-ctf",
"path": "third_party/edk2/AppPkg/Applications/Python/Python-2.7.10/Lib/HTMLParser.py",
"copies": "5",
"size": "17611",
"license": "apache-2.0",
"hash": -6723485861488223000,
"line_mean": 35.0757894737,
"line_max": 87,
"alpha_frac": 0.4892396797,
"autogenerated": false,
"ratio": 4.162372961474829,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7151612641174829,
"avg_score": null,
"num_lines": null
} |
"""A parser for HTML and XHTML."""
# This file is a modified Version from htmlparser.HTMLParser
# in this version tag and attribute names aren't converted to
# lowercase
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
from __future__ import unicode_literals
from builtins import chr
try:
import markupbase
except ImportError:
import _markupbase as markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
interesting_cdata = re.compile(r'<(/|\Z)')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self):
self.interesting = interesting_cdata
def clear_cdata_mode(self):
self.interesting = interesting_normal
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if end:
self.error("EOF in middle of construct")
break
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: #bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k]
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrvalue = self.unescape(attrvalue)
attrs.append((attrname, attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode()
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
self.updatepos(i, j)
self.error("malformed start tag")
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
j = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
self.error("bad end tag: %r" % (rawdata[i:j],))
tag = match.group(1)
self.handle_endtag(tag)
self.clear_cdata_mode()
return j
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
entitydefs = None
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:], 16)
else:
c = int(s)
return chr(c)
except ValueError:
return '&#'+s+';'
else:
# Cannot use name2codepoint directly, because HTMLParser supports apos,
# which is not part of HTML 4
import htmlentitydefs
if HTMLParser.entitydefs is None:
entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
entitydefs[k] = chr(v)
try:
return self.entitydefs[s]
except KeyError:
return '&'+s+';'
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", replaceEntities, s)
| {
"repo_name": "InLefter/Air_Of_China",
"path": "wcf/MyHTMLParser.py",
"copies": "1",
"size": "14305",
"license": "mit",
"hash": 253369496792622620,
"line_mean": 33.5845771144,
"line_max": 87,
"alpha_frac": 0.4878713736,
"autogenerated": false,
"ratio": 4.238518518518519,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5226389892118518,
"avg_score": null,
"num_lines": null
} |
"""A parser for HTML."""
# This file is derived from sgmllib.py, which is part of Python.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import re
import string
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
interesting_cdata = re.compile(r'<(/|\Z)')
incomplete = re.compile('(&[a-zA-Z][-.a-zA-Z0-9]*|&#[0-9]*)')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
starttagopen = re.compile('<[a-zA-Z]')
piopen = re.compile(r'<\?')
piclose = re.compile('>')
endtagopen = re.compile('</')
declopen = re.compile('<!')
special = re.compile('<![^<>]*>')
commentopen = re.compile('<!--')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./:;+*%?!&$\(\)_#=~]*))?')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
endstarttag = re.compile(r"\s*/?>")
endendtag = re.compile('>')
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
declname = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*')
declstringlit = re.compile(r'(\'[^\']*\'|"[^"]*")\s*')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
# HTML parser class -- find tags and call handler functions.
# Usage:
#
# p = HTMLParser(); p.feed(data); ...; p.close()
# Start tags are handled by calling self.handle_starttag() or
# self.handle_startendtag(); end tags by self.handle_endtag(). The
# data between tags is passed from the parser to the derived class by
# calling self.handle_data() with the data as argument (the data may
# be split up in arbitrary chunks). Entity references are passed by
# calling self.handle_entityref() with the entity reference as the
# argument. Numeric character references are passed to
# self.handle_charref() with the string containing the reference as
# the argument.
class HTMLParser:
CDATA_CONTENT_ELEMENTS = ("script", "style")
# Interface -- initialize and reset this instance
def __init__(self):
self.reset()
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.lineno = 1
self.offset = 0
self.interesting = interesting_normal
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data):
self.rawdata = self.rawdata + data
self.goahead(0)
# Interface -- handle the remaining data
def close(self):
self.goahead(1)
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = string.count(rawdata, "\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = string.rindex(rawdata, "\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
# Interface -- return current line number and offset.
def getpos(self):
return self.lineno, self.offset
__starttag_text = None
# Interface -- return full source of start tag: "<...>"
def get_starttag_text(self):
return self.__starttag_text
def set_cdata_mode(self, tag):
self.interesting = re.compile(r'<(/%s|\Z)'%tag)
def clear_cdata_mode(self):
self.interesting = interesting_normal
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif endtagopen.match(rawdata, i): # </
k = self.parse_endtag(i)
if k >= 0:
self.clear_cdata_mode()
elif commentopen.match(rawdata, i): # <!--
k = self.parse_comment(i)
elif piopen.match(rawdata, i): # <?
k = self.parse_pi(i)
elif declopen.match(rawdata, i): # <!
k = self.parse_declaration(i)
else:
if i < n-1:
raise HTMLParseError(
"invalid '<' construct: %s" % `rawdata[i:i+2]`,
self.getpos())
k = -1
if k < 0:
if end:
raise HTMLParseError("EOF in middle of construct",
self.getpos())
break
i = self.updatepos(i, k)
elif rawdata[i] == '&':
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
k = match.end()
if rawdata[k-1] != ';':
k = k-1
i = self.updatepos(i, k)
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if rawdata[k-1] != ';':
k = k-1
i = self.updatepos(i, k)
continue
if incomplete.match(rawdata, i):
if end:
raise HTMLParseError(
"EOF in middle of entity or char ref",
self.getpos())
return -1 # incomplete
#raise HTMLParseError("'&' not part of entity or char ref",
# self.getpos())
# people seem to be fond of bare '&', so skip it
i = self.updatepos(i, i+1)
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse comment, return end or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
assert rawdata[i:i+4] == '<!--', 'unexpected call to parse_comment()'
match = commentclose.search(rawdata, i+4)
if not match:
return -1
j = match.start()
self.handle_comment(rawdata[i+4: j])
j = match.end()
return j
# Internal -- parse declaration.
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# in practice, this should look like: ((name|stringlit) S*)+ '>'
n = len(rawdata)
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
self.handle_decl(rawdata[i+2:j])
return j + 1
if c in "\"'":
m = declstringlit.match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
m = declname.match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
else:
raise HTMLParseError(
"unexpected char in declaration: %s" % `rawdata[j]`,
self.getpos())
return -1 # incomplete
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = string.lower(rawdata[i+1:k])
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrvalue = self.unescape(attrvalue)
attrs.append((string.lower(attrname), attrvalue))
k = m.end()
end = string.strip(rawdata[k:endpos])
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + string.count(self.__starttag_text, "\n")
offset = len(self.__starttag_text) \
- string.rfind(self.__starttag_text, "\n")
else:
offset = offset + len(self.__starttag_text)
raise HTMLParseError("junk characters in start tag: %s"
% `rawdata[k:endpos][:20]`,
(lineno, offset))
if end[-2:] == '/>':
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
s = rawdata[j:j+2]
if s == "/>":
return j + 2
if s == "/":
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
raise HTMLParseError("malformed empty start tag",
self.getpos())
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
self.updatepos(i, j)
raise HTMLParseError("malformed start tag", self.getpos())
raise AssertionError("we should not gt here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
j = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
raise HTMLParseError("bad end tag: %s" % `rawdata[i:j]`,
self.getpos())
tag = match.group(1)
self.handle_endtag(string.lower(tag))
return j
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
# Internal -- helper to remove special character quoting
def unescape(self, s):
if '&' not in s:
return s
s = string.replace(s, "<", "<")
s = string.replace(s, ">", ">")
s = string.replace(s, "'", "'")
s = string.replace(s, """, '"')
s = string.replace(s, "&", "&") # Must be last
return s
| {
"repo_name": "NGSegovia/wsgi-intercept",
"path": "wsgi_intercept/webunit_intercept/HTMLParser.py",
"copies": "5",
"size": "15724",
"license": "mit",
"hash": 4301583849333503000,
"line_mean": 34.8177676538,
"line_max": 77,
"alpha_frac": 0.5062325108,
"autogenerated": false,
"ratio": 4.038007190549563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001408701521120403,
"num_lines": 439
} |
'''A parser for redis messages
'''
from itertools import starmap
nil = b'$-1\r\n'
null_array = b'*-1\r\n'
REPLAY_TYPE = frozenset((b'$', # REDIS_REPLY_STRING,
b'*', # REDIS_REPLY_ARRAY,
b':', # REDIS_REPLY_INTEGER,
b'+', # REDIS_REPLY_STATUS,
b'-')) # REDIS_REPLY_ERROR
class String:
__slots__ = ('_length', 'next')
def __init__(self, length, next):
self._length = length
self.next = next
def decode(self, parser, result):
parser._current = None
length = self._length
if length >= 0:
b = parser._inbuffer
if len(b) >= length+2:
parser._inbuffer, chunk = b[length+2:], bytes(b[:length])
if parser.encoding:
return chunk.decode(parser.encoding)
else:
return chunk
else:
parser._current = self
return False
class ArrayTask:
__slots__ = ('_length', '_response', 'next')
def __init__(self, length, next):
self._length = length
self._response = []
self.next = next
def decode(self, parser, result):
parser._current = None
length = self._length
if length >= 0:
response = self._response
if result is not False:
response.append(result)
while len(response) < length:
result = parser._get(self)
if result is False:
break
response.append(result)
if len(response) == length:
parser._current = None
return response
elif not parser._current:
parser._current = self
return False
class Parser:
'''A python parser for redis.'''
encoding = None
def __init__(self, protocolError, responseError):
self.protocolError = protocolError
self.responseError = responseError
self._current = None
self._inbuffer = bytearray()
def on_connect(self, connection):
if connection.decode_responses:
self.encoding = connection.encoding
def on_disconnect(self):
pass
def feed(self, buffer):
'''Feed new data into the buffer'''
self._inbuffer.extend(buffer)
def get(self):
'''Called by the protocol consumer'''
if self._current:
return self._resume(self._current, False)
else:
return self._get(None)
def bulk(self, value):
if value is None:
return nil
else:
return ('$%d\r\n' % len(value)).encode('utf-8') + value + b'\r\n'
def multi_bulk_len(self, len):
return ('*%s\r\n' % len).encode('utf-8')
def multi_bulk(self, args):
'''Multi bulk encoding for list/tuple ``args``
'''
return null_array if args is None else b''.join(self._pack(args))
def pack_command(self, args):
'''Encode a command to send to the server.
Used by redis clients
'''
return b''.join(self._pack_command(args))
def pack_pipeline(self, commands):
'''Packs pipeline commands into bytes.'''
return b''.join(
starmap(lambda *args: b''.join(self._pack_command(args)),
(a for a, _ in commands)))
# INTERNALS
def _pack_command(self, args):
crlf = b'\r\n'
yield ('*%d\r\n' % len(args)).encode('utf-8')
for value in args:
if isinstance(value, str):
value = value.encode('utf-8')
elif not isinstance(value, bytes):
value = str(value).encode('utf-8')
yield ('$%d\r\n' % len(value)).encode('utf-8')
yield value
yield crlf
def _pack(self, args):
crlf = b'\r\n'
yield ('*%d\r\n' % len(args)).encode('utf-8')
for value in args:
if value is None:
yield nil
elif isinstance(value, bytes):
yield ('$%d\r\n' % len(value)).encode('utf-8')
yield value
yield crlf
elif isinstance(value, str):
value = value.encode('utf-8')
yield ('$%d\r\n' % len(value)).encode('utf-8')
yield value
yield crlf
elif hasattr(value, 'items'):
for value in self._pack(tuple(self._lua_dict(value))):
yield value
elif hasattr(value, '__len__'):
for value in self._pack(value):
yield value
else:
value = str(value).encode('utf-8')
yield ('$%d\r\n' % len(value)).encode('utf-8')
yield value
yield crlf
def _lua_dict(self, d):
index = 0
while True:
index += 1
v = d.get(index)
if v is None:
break
yield v
def _get(self, next):
b = self._inbuffer
length = b.find(b'\r\n')
if length >= 0:
self._inbuffer, response = b[length+2:], bytes(b[:length])
rtype, response = response[:1], response[1:]
if rtype == b'-':
return self.responseError(response.decode('utf-8'))
elif rtype == b':':
return int(response)
elif rtype == b'+':
return response
elif rtype == b'$':
task = String(int(response), next)
return task.decode(self, False)
elif rtype == b'*':
task = ArrayTask(int(response), next)
return task.decode(self, False)
else:
# Clear the buffer and raise
self._inbuffer = bytearray()
raise self.protocolError('Protocol Error')
else:
return False
def buffer(self):
'''Current buffer'''
return bytes(self._inbuffer)
def _resume(self, task, result):
result = task.decode(self, result)
if result is not False and task.next:
return self._resume(task.next, result)
else:
return result
| {
"repo_name": "dejlek/pulsar",
"path": "pulsar/apps/ds/pyparser.py",
"copies": "1",
"size": "6342",
"license": "bsd-3-clause",
"hash": -6504797495584087000,
"line_mean": 30.2413793103,
"line_max": 77,
"alpha_frac": 0.4886471145,
"autogenerated": false,
"ratio": 4.25923438549362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 203
} |
"""A parser for SGML, using the derived class as a static DTD."""
# Stolen from the Python 2.0 distribution and tweaked by JMT
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import re
import string
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*);')
charref = re.compile('&#([0-9]+)[^0-9];')
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piopen = re.compile('<\?')
piclose = re.compile('>')
endtagopen = re.compile('</[<>a-zA-Z]')
endbracket = re.compile('[<>]')
special = re.compile('<![^<>]*>')
commentopen = re.compile('<!--')
commentclose = re.compile('--[%s]*>' % string.whitespace)
tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9]*')
attrfind = re.compile(
'[%s]*([a-zA-Z_][-.a-zA-Z_0-9]*)' % string.whitespace
+ ('([%s]*=[%s]*' % (string.whitespace, string.whitespace))
+ r'(\'[^\'<>]*[\'<>]|"[^"<>]*["<>]|[-a-zA-Z0-9@./:+*%?!&$\(\)_#=~,;\|]*))?')
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser:
# Interface -- initialize and reset this instance
def __init__(self, verbose=0):
self.verbose = verbose
self.reset()
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data):
self.rawdata = self.rawdata + data
self.goahead(0)
# Interface -- handle the remaining data
def close(self):
self.goahead(1)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start(0)
else: j = n
if i < j: self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if commentopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_comment(i)
if k < 0: break
i = i+k
continue
if piopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
match = special.match(rawdata, i)
if match:
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
start = i
i = match.end(0)
self.handle_data(rawdata[start:i])
continue
elif rawdata[i] == '&':
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
raise RuntimeError, 'neither < nor & ??'
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
if rawdata[i:i+4] <> '<!--':
raise RuntimeError, 'unexpected call to handle_comment'
match = commentclose.search(rawdata, i+4)
if not match:
return -1
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
j = match.end(0)
return j-i
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] <> '<?':
raise RuntimeError, 'unexpected call to handle_pi'
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
__starttag_text = None
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = string.lower(tag)
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
self.handle_data('<>')
return k + 1
else:
match = tagfind.match(rawdata, i+1)
if not match:
raise RuntimeError, 'unexpected call to parse_starttag'
k = match.end(0)
tag = string.lower(rawdata[i+1:k])
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
qchar = '\0'
if not rest:
attrvalue = attrname
elif attrvalue[:1] == '\'' or attrvalue[:1] == '"':
qchar = attrvalue[:1]
attrvalue = attrvalue[1:-1]
attrs.append((string.lower(attrname), attrvalue, qchar))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = string.lower(string.strip(rawdata[i+2:j]))
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
n = string.atoi(name)
except string.atoi_error:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
# Example -- handle entity reference, no need to override
def handle_entityref(self, name):
table = self.entitydefs
if table.has_key(name):
self.handle_data(table[name])
else:
self.unknown_entityref(name)
return
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(`self.testdata`) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', `data`
def handle_comment(self, data):
self.flush()
r = `data`
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if not args:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
| {
"repo_name": "hilbix/ekhtml",
"path": "testsuite/ek_sgmllib.py",
"copies": "1",
"size": "16039",
"license": "bsd-2-clause",
"hash": -1858468973763169800,
"line_mean": 31.66598778,
"line_max": 81,
"alpha_frac": 0.5004052622,
"autogenerated": false,
"ratio": 3.9997506234413964,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9989176762711498,
"avg_score": 0.002195824585979532,
"num_lines": 491
} |
"""A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
import markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 255:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(self.convert_entityref(name))
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def unknown_decl(self, data):
self.flush()
print '*** unknown decl: [' + data + ']'
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
| {
"repo_name": "ericlink/adms-server",
"path": "playframework-dist/play-1.1/python/Lib/sgmllib.py",
"copies": "2",
"size": "18320",
"license": "mit",
"hash": -8492274654981511000,
"line_mean": 31.4306569343,
"line_max": 79,
"alpha_frac": 0.4865174672,
"autogenerated": false,
"ratio": 4.208591775786814,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011785908078781175,
"num_lines": 548
} |
"""A parser for the Music Metadata XML Format (MMD).
This module contains L{MbXmlParser}, which parses the U{Music Metadata XML
Format (MMD) <http://musicbrainz.org/development/mmd/>} returned by the
MusicBrainz webservice.
There are also DOM helper functions in this module used by the parser which
probably aren't useful to users.
"""
__revision__ = '$Id$'
import re
import logging
import urlparse
import xml.dom.minidom
import xml.sax.saxutils as saxutils
from xml.parsers.expat import ExpatError
from xml.dom import DOMException
import musicbrainz2.utils as mbutils
import musicbrainz2.model as model
from musicbrainz2.model import NS_MMD_1, NS_REL_1, NS_EXT_1
__all__ = [
'DefaultFactory', 'Metadata', 'ParseError',
'MbXmlParser', 'MbXmlWriter',
'AbstractResult',
'ArtistResult', 'ReleaseResult', 'TrackResult', 'LabelResult',
'ReleaseGroupResult'
]
class DefaultFactory(object):
"""A factory to instantiate classes from the domain model.
This factory may be used to create objects from L{musicbrainz2.model}.
"""
def newArtist(self): return model.Artist()
def newRelease(self): return model.Release()
def newReleaseGroup(self): return model.ReleaseGroup()
def newTrack(self): return model.Track()
def newRelation(self): return model.Relation()
def newReleaseEvent(self): return model.ReleaseEvent()
def newDisc(self): return model.Disc()
def newArtistAlias(self): return model.ArtistAlias()
def newUser(self): return model.User()
def newLabel(self): return model.Label()
def newLabelAlias(self): return model.LabelAlias()
def newTag(self): return model.Tag()
def newRating(self): return model.Rating()
class ParseError(Exception):
"""Exception to be thrown if a parse error occurs.
The C{'msg'} attribute contains a printable error message, C{'reason'}
is the lower level exception that was raised.
"""
def __init__(self, msg='Parse Error', reason=None):
Exception.__init__(self)
self.msg = msg
self.reason = reason
def __str__(self):
return self.msg
class Metadata(object):
"""Represents a parsed Music Metadata XML document.
The Music Metadata XML format is very flexible and may contain a
diverse set of data (e.g. an artist, a release and a list of tracks),
but usually only a small subset is used (either an artist, a release
or a track, or a lists of objects from one class).
@see: L{MbXmlParser} for reading, and L{MbXmlWriter} for writing
Metadata objects
"""
def __init__(self):
self._artist = None
self._release = None
self._track = None
self._label = None
self._releaseGroup = None
self._artistResults = [ ]
self._artistResultsOffset = None
self._artistResultsCount = None
self._releaseResults = [ ]
self._releaseResultsOffset = None
self._releaseResultsCount = None
self._releaseGroupResults = [ ]
self._releaseGroupResultsOffset = None
self._releaseGroupResultsCount = None
self._trackResults = [ ]
self._trackResultsOffset = None
self._trackResultsCount = None
self._labelResults = [ ]
self._labelResultsOffset = None
self._labelResultsCount = None
self._tagList = [ ]
self._rating = None
self._userList = [ ]
def getArtist(self):
return self._artist
def setArtist(self, artist):
self._artist = artist
artist = property(getArtist, setArtist, doc='An Artist object.')
def getLabel(self):
return self._label
def setLabel(self, label):
self._label = label
label = property(getLabel, setLabel, doc='A Label object.')
def getRelease(self):
return self._release
def setRelease(self, release):
self._release = release
release = property(getRelease, setRelease, doc='A Release object.')
def getReleaseGroup(self):
return self._releaseGroup
def setReleaseGroup(self, releaseGroup):
self._releaseGroup = releaseGroup
releaseGroup = property(getReleaseGroup, setReleaseGroup)
def getTrack(self):
return self._track
def setTrack(self, track):
self._track = track
track = property(getTrack, setTrack, doc='A Track object.')
def getArtistResults(self):
"""Returns an artist result list.
@return: a list of L{ArtistResult} objects.
"""
return self._artistResults
artistResults = property(getArtistResults,
doc='A list of ArtistResult objects.')
def getArtistResultsOffset(self):
"""Returns the offset of the artist result list.
The offset is used for paging through the result list. It
is zero-based.
@return: an integer containing the offset, or None
@see: L{getArtistResults}, L{getArtistResultsCount}
"""
return self._artistResultsOffset
def setArtistResultsOffset(self, value):
"""Sets the offset of the artist result list.
@param value: an integer containing the offset, or None
@see: L{getArtistResultsOffset}
"""
self._artistResultsOffset = value
artistResultsOffset = property(
getArtistResultsOffset, setArtistResultsOffset,
doc='The offset of the artist results.')
def getArtistResultsCount(self):
"""Returns the total number of results available.
This may or may not match with the number of elements that
L{getArtistResults} returns. If the count is higher than
the list, it indicates that the list is incomplete.
@return: an integer containing the count, or None
@see: L{setArtistResultsCount}, L{getArtistResultsOffset}
"""
return self._artistResultsCount
def setArtistResultsCount(self, value):
"""Sets the total number of available results.
@param value: an integer containing the count, or None
@see: L{getArtistResults}, L{setArtistResultsOffset}
"""
self._artistResultsCount = value
artistResultsCount = property(
getArtistResultsCount, setArtistResultsCount,
doc='The total number of artists results.')
def getLabelResults(self):
"""Returns a label result list.
@return: a list of L{LabelResult} objects.
"""
return self._labelResults
labelResults = property(getLabelResults,
doc='A list of LabelResult objects')
def getLabelResultsOffset(self):
"""Returns the offset of the label result list.
The offset is used for paging through the result list. It
is zero-based.
@return: an integer containing the offset, or None
@see: L{getLabelResults}, L{getLabelResultsCount}
"""
return self._labelResultsOffset
def setLabelResultsOffset(self, value):
"""Sets the offset of the label result list.
@param value: an integer containing the offset, or None
@see: L{getLabelResultsOffset}
"""
self._labelResultsOffset = value
labelResultsOffset = property(
getLabelResultsOffset, setLabelResultsOffset,
doc='The offset of the label results.')
def getLabelResultsCount(self):
"""Returns the total number of results available.
This may or may not match with the number of elements that
L{getLabelResults} returns. If the count is higher than
the list, it indicates that the list is incomplete.
@return: an integer containing the count, or None
@see: L{setLabelResultsCount}, L{getLabelResultsOffset}
"""
return self._labelResultsCount
def setLabelResultsCount(self, value):
"""Sets the total number of available results.
@param value: an integer containing the count, or None
@see: L{getLabelResults}, L{setLabelResultsOffset}
"""
self._labelResultsCount = value
labelResultsCount = property(
getLabelResultsCount, setLabelResultsCount,
doc='The total number of label results.')
def getReleaseResults(self):
"""Returns a release result list.
@return: a list of L{ReleaseResult} objects.
"""
return self._releaseResults
releaseResults = property(getReleaseResults,
doc='A list of ReleaseResult objects.')
def getReleaseResultsOffset(self):
"""Returns the offset of the release result list.
The offset is used for paging through the result list. It
is zero-based.
@return: an integer containing the offset, or None
@see: L{getReleaseResults}, L{getReleaseResultsCount}
"""
return self._releaseResultsOffset
def setReleaseResultsOffset(self, value):
"""Sets the offset of the release result list.
@param value: an integer containing the offset, or None
@see: L{getReleaseResultsOffset}
"""
self._releaseResultsOffset = value
releaseResultsOffset = property(
getReleaseResultsOffset, setReleaseResultsOffset,
doc='The offset of the release results.')
def getReleaseResultsCount(self):
"""Returns the total number of results available.
This may or may not match with the number of elements that
L{getReleaseResults} returns. If the count is higher than
the list, it indicates that the list is incomplete.
@return: an integer containing the count, or None
@see: L{setReleaseResultsCount}, L{getReleaseResultsOffset}
"""
return self._releaseResultsCount
def setReleaseResultsCount(self, value):
"""Sets the total number of available results.
@param value: an integer containing the count, or None
@see: L{getReleaseResults}, L{setReleaseResultsOffset}
"""
self._releaseResultsCount = value
releaseResultsCount = property(
getReleaseResultsCount, setReleaseResultsCount,
doc='The total number of release results.')
def getReleaseGroupResults(self):
"""Returns a release group result list.
@return: a list of L{ReleaseGroupResult} objects.
"""
return self._releaseGroupResults
releaseGroupResults = property(getReleaseGroupResults,
doc = 'A list of ReleaseGroupResult objects.')
def getReleaseGroupResultsOffset(self):
"""Returns the offset of the release group result list.
The offset is used for paging through the result list. It
is zero-based.
@return: an integer containing the offset, or None.
@see: L{getReleaseGroupResults}, L{getReleaseGroupResultsCount}
"""
return self._releaseGroupResultsOffset
def setReleaseGroupResultsOffset(self, value):
"""Sets the offset of the release group result list.
@param value: an integer containing the offset, or None
@see: L{getReleaseGroupResultsOffset}
"""
self._releaseGroupResultsOffset = value
releaseGroupResultsOffset = property(
getReleaseGroupResultsOffset, setReleaseGroupResultsOffset,
doc='The offset of the release group results.')
def getReleaseGroupResultsCount(self):
"""Returns the total number of results available.
This may or may not match with the number of elements that
L{getReleaseGroupResults} returns. If the count is higher than
the list, it indicates that the list is incomplete.
@return: an integer containing the count, or None
@see: L{setReleaseGroupResultsCount}, L{getReleaseGroupResultsOffset}
"""
return self._releaseGroupResultsCount
def setReleaseGroupResultsCount(self, value):
"""Sets the total number of available results.
@param value: an integer containing the count, or None
@see: L{getReleaseGroupResults}, L{setReleaseGroupResultsOffset}
"""
self._releaseGroupResultsCount = value
releaseGroupResultsCount = property(
getReleaseGroupResultsCount, setReleaseGroupResultsCount,
doc='The total number of release group results.')
def getTrackResults(self):
"""Returns a track result list.
@return: a list of L{TrackResult} objects.
"""
return self._trackResults
trackResults = property(getTrackResults,
doc='A list of TrackResult objects.')
def getTrackResultsOffset(self):
"""Returns the offset of the track result list.
The offset is used for paging through the result list. It
is zero-based.
@return: an integer containing the offset, or None
@see: L{getTrackResults}, L{getTrackResultsCount}
"""
return self._trackResultsOffset
def setTrackResultsOffset(self, value):
"""Sets the offset of the track result list.
@param value: an integer containing the offset, or None
@see: L{getTrackResultsOffset}
"""
self._trackResultsOffset = value
trackResultsOffset = property(
getTrackResultsOffset, setTrackResultsOffset,
doc='The offset of the track results.')
def getTrackResultsCount(self):
"""Returns the total number of results available.
This may or may not match with the number of elements that
L{getTrackResults} returns. If the count is higher than
the list, it indicates that the list is incomplete.
@return: an integer containing the count, or None
@see: L{setTrackResultsCount}, L{getTrackResultsOffset}
"""
return self._trackResultsCount
def setTrackResultsCount(self, value):
"""Sets the total number of available results.
@param value: an integer containing the count, or None
@see: L{getTrackResults}, L{setTrackResultsOffset}
"""
self._trackResultsCount = value
trackResultsCount = property(
getTrackResultsCount, setTrackResultsCount,
doc='The total number of track results.')
def getTagList(self):
"""Returns a list of tags.
@return: a list of L{model.Tag} objects
"""
return self._tagList
tagResults = property(getTagList,
doc='A list of Tag objects.')
def getRating(self):
"""Returns the rating.
@return: rating object
"""
return self._rating
def setRating(self, value):
"""Sets the rating.
@param value: a L{model.Rating} object
"""
self._rating = value
rating = property(getRating, setRating, doc='A Rating object.')
# MusicBrainz extension to the schema
def getUserList(self):
"""Returns a list of users.
@return: a list of L{model.User} objects
@note: This is a MusicBrainz extension.
"""
return self._userList
userResults = property(getUserList,
doc='A list of User objects.')
class AbstractResult(object):
"""The abstract representation of a result.
A result is an instance of some kind (Artist, Release, ...)
associated with a score.
"""
def __init__(self, score):
self._score = score
def getScore(self):
"""Returns the result score.
The score indicates how good this result matches the search
parameters. The higher the value, the better the match.
@return: an int between 0 and 100 (both inclusive), or None
"""
return self._score
def setScore(self, score):
self._score = score
score = property(getScore, setScore, doc='The relevance score.')
class ArtistResult(AbstractResult):
"""Represents an artist result.
An ArtistResult consists of a I{score} and an artist. The score is a
number between 0 and 100, where a higher number indicates a better
match.
"""
def __init__(self, artist, score):
super(ArtistResult, self).__init__(score)
self._artist = artist
def getArtist(self):
"""Returns an Artist object.
@return: a L{musicbrainz2.model.Artist} object
"""
return self._artist
def setArtist(self, artist):
self._artist = artist
artist = property(getArtist, setArtist, doc='An Artist object.')
class ReleaseResult(AbstractResult):
"""Represents a release result.
A ReleaseResult consists of a I{score} and a release. The score is a
number between 0 and 100, where a higher number indicates a better
match.
"""
def __init__(self, release, score):
super(ReleaseResult, self).__init__(score)
self._release = release
def getRelease(self):
"""Returns a Release object.
@return: a L{musicbrainz2.model.Release} object
"""
return self._release
def setRelease(self, release):
self._release = release
release = property(getRelease, setRelease, doc='A Release object.')
class ReleaseGroupResult(AbstractResult):
"""Represents a release group result.
A ReleaseGroupResult consists of a I{score} and a release group. The
score is a number between 0 and 100, where a higher number indicates
a better match.
"""
def __init__(self, releaseGroup, score):
super(ReleaseGroupResult, self).__init__(score)
self._releaseGroup = releaseGroup
def getReleaseGroup(self):
"""Returns a ReleaseGroup object.
@return: a L{musicbrainz2.model.ReleaseGroup} object
"""
return self._releaseGroup
def setReleaseGroup(self, value):
self._releaseGroup = value
releaseGroup = property(getReleaseGroup, setReleaseGroup, doc='A ReleaseGroup object.')
class TrackResult(AbstractResult):
"""Represents a track result.
A TrackResult consists of a I{score} and a track. The score is a
number between 0 and 100, where a higher number indicates a better
match.
"""
def __init__(self, track, score):
super(TrackResult, self).__init__(score)
self._track = track
def getTrack(self):
"""Returns a Track object.
@return: a L{musicbrainz2.model.Track} object
"""
return self._track
def setTrack(self, track):
self._track = track
track = property(getTrack, setTrack, doc='A Track object.')
class LabelResult(AbstractResult):
"""Represents a label result.
An LabelResult consists of a I{score} and a label. The score is a
number between 0 and 100, where a higher number indicates a better
match.
"""
def __init__(self, label, score):
super(LabelResult, self).__init__(score)
self._label = label
def getLabel(self):
"""Returns a Label object.
@return: a L{musicbrainz2.model.Label} object
"""
return self._label
def setLabel(self, label):
self._label = label
label = property(getLabel, setLabel, doc='A Label object.')
class MbXmlParser(object):
"""A parser for the Music Metadata XML format.
This parser supports all basic features and extensions defined by
MusicBrainz, including unlimited document nesting. By default it
reads an XML document from a file-like object (stream) and returns
an object tree representing the document using classes from
L{musicbrainz2.model}.
The implementation tries to be as permissive as possible. Invalid
contents are skipped, but documents have to be well-formed and using
the correct namespace. In case of unrecoverable errors, a L{ParseError}
exception is raised.
@see: U{The Music Metadata XML Format
<http://musicbrainz.org/development/mmd/>}
"""
def __init__(self, factory=DefaultFactory()):
"""Constructor.
The C{factory} parameter has be an instance of L{DefaultFactory}
or a subclass of it. It is used by L{parse} to obtain objects
from L{musicbrainz2.model} to build resulting object tree.
If you supply your own factory, you have to make sure all
returned objects have the same interface as their counterparts
from L{musicbrainz2.model}.
@param factory: an object factory
"""
self._log = logging.getLogger(str(self.__class__))
self._factory = factory
def parse(self, inStream):
"""Parses the MusicBrainz web service XML.
Returns a L{Metadata} object representing the parsed XML or
raises a L{ParseError} exception if the data was malformed.
The parser tries to be liberal and skips invalid content if
possible.
Note that an L{IOError} may be raised if there is a problem
reading C{inStream}.
@param inStream: a file-like object
@return: a L{Metadata} object (never None)
@raise ParseError: if the document is not valid
@raise IOError: if reading from the stream failed
"""
try:
doc = xml.dom.minidom.parse(inStream)
# Try to find the root element. If this isn't an mmd
# XML file or the namespace is wrong, this will fail.
elems = doc.getElementsByTagNameNS(NS_MMD_1, 'metadata')
if len(elems) != 0:
md = self._createMetadata(elems[0])
else:
msg = 'cannot find root element mmd:metadata'
self._log.debug('ParseError: ' + msg)
raise ParseError(msg)
doc.unlink()
return md
except ExpatError, e:
self._log.debug('ExpatError: ' + str(e))
raise ParseError(msg=str(e), reason=e)
except DOMException, e:
self._log.debug('DOMException: ' + str(e))
raise ParseError(msg=str(e), reason=e)
def _createMetadata(self, metadata):
md = Metadata()
for node in _getChildElements(metadata):
if _matches(node, 'artist'):
md.artist = self._createArtist(node)
elif _matches(node, 'release'):
md.release = self._createRelease(node)
elif _matches(node, 'release-group'):
md.releaseGroup = self._createReleaseGroup(node)
elif _matches(node, 'track'):
md.track = self._createTrack(node)
elif _matches(node, 'label'):
md.label = self._createLabel(node)
elif _matches(node, 'artist-list'):
(offset, count) = self._getListAttrs(node)
md.artistResultsOffset = offset
md.artistResultsCount = count
self._addArtistResults(node, md.getArtistResults())
elif _matches(node, 'release-list'):
(offset, count) = self._getListAttrs(node)
md.releaseResultsOffset = offset
md.releaseResultsCount = count
self._addReleaseResults(node, md.getReleaseResults())
elif _matches(node, 'release-group-list'):
(offset, count) = self._getListAttrs(node)
md.releaseGroupResultsOffset = offset
md.releaseGroupResultsCount = count
self._addReleaseGroupResults(node, md.getReleaseGroupResults())
elif _matches(node, 'track-list'):
(offset, count) = self._getListAttrs(node)
md.trackResultsOffset = offset
md.trackResultsCount = count
self._addTrackResults(node, md.getTrackResults())
elif _matches(node, 'label-list'):
(offset, count) = self._getListAttrs(node)
md.labelResultsOffset = offset
md.labelResultsCount = count
self._addLabelResults(node, md.getLabelResults())
elif _matches(node, 'tag-list'):
self._addTagsToList(node, md.getTagList())
elif _matches(node, 'user-list', NS_EXT_1):
self._addUsersToList(node, md.getUserList())
return md
def _addArtistResults(self, listNode, resultList):
for c in _getChildElements(listNode):
artist = self._createArtist(c)
score = _getIntAttr(c, 'score', 0, 100, ns=NS_EXT_1)
if artist is not None:
resultList.append(ArtistResult(artist, score))
def _addReleaseResults(self, listNode, resultList):
for c in _getChildElements(listNode):
release = self._createRelease(c)
score = _getIntAttr(c, 'score', 0, 100, ns=NS_EXT_1)
if release is not None:
resultList.append(ReleaseResult(release, score))
def _addReleaseGroupResults(self, listNode, resultList):
for c in _getChildElements(listNode):
releaseGroup = self._createReleaseGroup(c)
score = _getIntAttr(c, 'score', 0, 100, ns=NS_EXT_1)
if releaseGroup is not None:
resultList.append(ReleaseGroupResult(releaseGroup, score))
def _addTrackResults(self, listNode, resultList):
for c in _getChildElements(listNode):
track = self._createTrack(c)
score = _getIntAttr(c, 'score', 0, 100, ns=NS_EXT_1)
if track is not None:
resultList.append(TrackResult(track, score))
def _addLabelResults(self, listNode, resultList):
for c in _getChildElements(listNode):
label = self._createLabel(c)
score = _getIntAttr(c, 'score', 0, 100, ns=NS_EXT_1)
if label is not None:
resultList.append(LabelResult(label, score))
def _addReleasesToList(self, listNode, resultList):
self._addToList(listNode, resultList, self._createRelease)
def _addReleaseGroupsToList(self, listNode, resultList):
self._addToList(listNode, resultList, self._createReleaseGroup)
def _addTracksToList(self, listNode, resultList):
self._addToList(listNode, resultList, self._createTrack)
def _addUsersToList(self, listNode, resultList):
self._addToList(listNode, resultList, self._createUser)
def _addTagsToList(self, listNode, resultList):
self._addToList(listNode, resultList, self._createTag)
def _addTagsToEntity(self, listNode, entity):
for node in _getChildElements(listNode):
tag = self._createTag(node)
entity.addTag(tag)
def _addRatingToEntity(self, attrNode, entity):
rating = self._createRating(attrNode)
entity.setRating(rating)
def _addToList(self, listNode, resultList, creator):
for c in _getChildElements(listNode):
resultList.append(creator(c))
def _getListAttrs(self, listNode):
offset = _getIntAttr(listNode, 'offset')
count = _getIntAttr(listNode, 'count')
return (offset, count)
def _createArtist(self, artistNode):
artist = self._factory.newArtist()
artist.setId(_getIdAttr(artistNode, 'id', 'artist'))
artist.setType(_getUriAttr(artistNode, 'type'))
for node in _getChildElements(artistNode):
if _matches(node, 'name'):
artist.setName(_getText(node))
elif _matches(node, 'sort-name'):
artist.setSortName(_getText(node))
elif _matches(node, 'disambiguation'):
artist.setDisambiguation(_getText(node))
elif _matches(node, 'life-span'):
artist.setBeginDate(_getDateAttr(node, 'begin'))
artist.setEndDate(_getDateAttr(node, 'end'))
elif _matches(node, 'alias-list'):
self._addArtistAliases(node, artist)
elif _matches(node, 'release-list'):
(offset, count) = self._getListAttrs(node)
artist.setReleasesOffset(offset)
artist.setReleasesCount(count)
self._addReleasesToList(node, artist.getReleases())
elif _matches(node, 'release-group-list'):
(offset, count) = self._getListAttrs(node)
artist.setReleaseGroupsOffset(offset)
artist.setReleaseGroupsCount(count)
self._addReleaseGroupsToList(node, artist.getReleaseGroups())
elif _matches(node, 'relation-list'):
self._addRelationsToEntity(node, artist)
elif _matches(node, 'tag-list'):
self._addTagsToEntity(node, artist)
elif _matches(node, 'rating'):
self._addRatingToEntity(node, artist)
return artist
def _createLabel(self, labelNode):
label = self._factory.newLabel()
label.setId(_getIdAttr(labelNode, 'id', 'label'))
label.setType(_getUriAttr(labelNode, 'type'))
for node in _getChildElements(labelNode):
if _matches(node, 'name'):
label.setName(_getText(node))
if _matches(node, 'sort-name'):
label.setSortName(_getText(node))
elif _matches(node, 'disambiguation'):
label.setDisambiguation(_getText(node))
elif _matches(node, 'label-code'):
label.setCode(_getText(node))
elif _matches(node, 'country'):
country = _getText(node, '^[A-Z]{2}$')
label.setCountry(country)
elif _matches(node, 'life-span'):
label.setBeginDate(_getDateAttr(node, 'begin'))
label.setEndDate(_getDateAttr(node, 'end'))
elif _matches(node, 'alias-list'):
self._addLabelAliases(node, label)
elif _matches(node, 'tag-list'):
self._addTagsToEntity(node, label)
elif _matches(node, 'rating'):
self._addRatingToEntity(node, label)
return label
def _createRelease(self, releaseNode):
release = self._factory.newRelease()
release.setId(_getIdAttr(releaseNode, 'id', 'release'))
for t in _getUriListAttr(releaseNode, 'type'):
release.addType(t)
for node in _getChildElements(releaseNode):
if _matches(node, 'title'):
release.setTitle(_getText(node))
elif _matches(node, 'text-representation'):
lang = _getAttr(node, 'language', '^[A-Z]{3}$')
release.setTextLanguage(lang)
script = _getAttr(node, 'script', '^[A-Z][a-z]{3}$')
release.setTextScript(script)
elif _matches(node, 'asin'):
release.setAsin(_getText(node))
elif _matches(node, 'artist'):
release.setArtist(self._createArtist(node))
elif _matches(node, 'release-event-list'):
self._addReleaseEvents(node, release)
elif _matches(node, 'release-group'):
release.setReleaseGroup(self._createReleaseGroup(node))
elif _matches(node, 'disc-list'):
self._addDiscs(node, release)
elif _matches(node, 'track-list'):
(offset, count) = self._getListAttrs(node)
release.setTracksOffset(offset)
release.setTracksCount(count)
self._addTracksToList(node, release.getTracks())
elif _matches(node, 'relation-list'):
self._addRelationsToEntity(node, release)
elif _matches(node, 'tag-list'):
self._addTagsToEntity(node, release)
elif _matches(node, 'rating'):
self._addRatingToEntity(node, release)
return release
def _createReleaseGroup(self, node):
rg = self._factory.newReleaseGroup()
rg.setId(_getIdAttr(node, 'id', 'release-group'))
rg.setType(_getUriAttr(node, 'type'))
for child in _getChildElements(node):
if _matches(child, 'title'):
rg.setTitle(_getText(child))
elif _matches(child, 'artist'):
rg.setArtist(self._createArtist(child))
elif _matches(child, 'release-list'):
(offset, count) = self._getListAttrs(child)
rg.setReleasesOffset(offset)
rg.setReleasesCount(count)
self._addReleasesToList(child, rg.getReleases())
return rg
def _addReleaseEvents(self, releaseListNode, release):
for node in _getChildElements(releaseListNode):
if _matches(node, 'event'):
country = _getAttr(node, 'country', '^[A-Z]{2}$')
date = _getDateAttr(node, 'date')
catalogNumber = _getAttr(node, 'catalog-number')
barcode = _getAttr(node, 'barcode')
format = _getUriAttr(node, 'format')
# The date attribute is mandatory. If it isn't present,
# we don't add anything from this release event.
if date is not None:
event = self._factory.newReleaseEvent()
event.setCountry(country)
event.setDate(date)
event.setCatalogNumber(catalogNumber)
event.setBarcode(barcode)
event.setFormat(format)
for subNode in _getChildElements(node):
if _matches(subNode, 'label'):
event.setLabel(self._createLabel(subNode))
release.addReleaseEvent(event)
def _addDiscs(self, discIdListNode, release):
for node in _getChildElements(discIdListNode):
if _matches(node, 'disc') and node.hasAttribute('id'):
d = self._factory.newDisc()
d.setId(node.getAttribute('id'))
d.setSectors(_getIntAttr(node, 'sectors', 0))
release.addDisc(d)
def _addArtistAliases(self, aliasListNode, artist):
for node in _getChildElements(aliasListNode):
if _matches(node, 'alias'):
alias = self._factory.newArtistAlias()
self._initializeAlias(alias, node)
artist.addAlias(alias)
def _addLabelAliases(self, aliasListNode, label):
for node in _getChildElements(aliasListNode):
if _matches(node, 'alias'):
alias = self._factory.newLabelAlias()
self._initializeAlias(alias, node)
label.addAlias(alias)
def _initializeAlias(self, alias, node):
alias.setValue(_getText(node))
alias.setType(_getUriAttr(node, 'type'))
alias.setScript(_getAttr(node, 'script',
'^[A-Z][a-z]{3}$'))
def _createTrack(self, trackNode):
track = self._factory.newTrack()
track.setId(_getIdAttr(trackNode, 'id', 'track'))
for node in _getChildElements(trackNode):
if _matches(node, 'title'):
track.setTitle(_getText(node))
elif _matches(node, 'artist'):
track.setArtist(self._createArtist(node))
elif _matches(node, 'duration'):
track.setDuration(_getPositiveIntText(node))
elif _matches(node, 'release-list'):
self._addReleasesToList(node, track.getReleases())
elif _matches(node, 'puid-list'):
self._addPuids(node, track)
elif _matches(node, 'isrc-list'):
self._addISRCs(node, track)
elif _matches(node, 'relation-list'):
self._addRelationsToEntity(node, track)
elif _matches(node, 'tag-list'):
self._addTagsToEntity(node, track)
elif _matches(node, 'rating'):
self._addRatingToEntity(node, track)
return track
# MusicBrainz extension
def _createUser(self, userNode):
user = self._factory.newUser()
for t in _getUriListAttr(userNode, 'type', NS_EXT_1):
user.addType(t)
for node in _getChildElements(userNode):
if _matches(node, 'name'):
user.setName(_getText(node))
elif _matches(node, 'nag', NS_EXT_1):
user.setShowNag(_getBooleanAttr(node, 'show'))
return user
def _createRating(self, ratingNode):
rating = self._factory.newRating()
rating.value = _getText(ratingNode)
rating.count = _getIntAttr(ratingNode, 'votes-count')
return rating
def _createTag(self, tagNode):
tag = self._factory.newTag()
tag.value = _getText(tagNode)
tag.count = _getIntAttr(tagNode, 'count')
return tag
def _addPuids(self, puidListNode, track):
for node in _getChildElements(puidListNode):
if _matches(node, 'puid') and node.hasAttribute('id'):
track.addPuid(node.getAttribute('id'))
def _addISRCs(self, isrcListNode, track):
for node in _getChildElements(isrcListNode):
if _matches(node, 'isrc') and node.hasAttribute('id'):
track.addISRC(node.getAttribute('id'))
def _addRelationsToEntity(self, relationListNode, entity):
targetType = _getUriAttr(relationListNode, 'target-type', NS_REL_1)
if targetType is None:
return
for node in _getChildElements(relationListNode):
if _matches(node, 'relation'):
rel = self._createRelation(node, targetType)
if rel is not None:
entity.addRelation(rel)
def _createRelation(self, relationNode, targetType):
relation = self._factory.newRelation()
relation.setType(_getUriAttr(relationNode, 'type', NS_REL_1))
relation.setTargetType(targetType)
resType = _getResourceType(targetType)
relation.setTargetId(_getIdAttr(relationNode, 'target', resType))
if relation.getType() is None \
or relation.getTargetType() is None \
or relation.getTargetId() is None:
return None
relation.setDirection(_getDirectionAttr(relationNode, 'direction'))
relation.setBeginDate(_getDateAttr(relationNode, 'begin'))
relation.setEndDate(_getDateAttr(relationNode, 'end'))
for a in _getUriListAttr(relationNode, 'attributes', NS_REL_1):
relation.addAttribute(a)
target = None
children = _getChildElements(relationNode)
if len(children) > 0:
node = children[0]
if _matches(node, 'artist'):
target = self._createArtist(node)
elif _matches(node, 'release'):
target = self._createRelease(node)
elif _matches(node, 'track'):
target = self._createTrack(node)
relation.setTarget(target)
return relation
#
# XML output
#
class _XmlWriter(object):
def __init__(self, outStream, indentAmount=' ', newline="\n"):
self._out = outStream
self._indentAmount = indentAmount
self._stack = [ ]
self._newline = newline
def prolog(self, encoding='UTF-8', version='1.0'):
pi = '<?xml version="%s" encoding="%s"?>' % (version, encoding)
self._out.write(pi + self._newline)
def start(self, name, attrs={ }):
indent = self._getIndention()
self._stack.append(name)
self._out.write(indent + self._makeTag(name, attrs) + self._newline)
def end(self):
name = self._stack.pop()
indent = self._getIndention()
self._out.write('%s</%s>\n' % (indent, name))
def elem(self, name, value, attrs={ }):
# delete attributes with an unset value
for (k, v) in attrs.items():
if v is None or v == '':
del attrs[k]
if value is None or value == '':
if len(attrs) == 0:
return
self._out.write(self._getIndention())
self._out.write(self._makeTag(name, attrs, True) + '\n')
else:
escValue = saxutils.escape(value or '')
self._out.write(self._getIndention())
self._out.write(self._makeTag(name, attrs))
self._out.write(escValue)
self._out.write('</%s>\n' % name)
def _getIndention(self):
return self._indentAmount * len(self._stack)
def _makeTag(self, name, attrs={ }, close=False):
ret = '<' + name
for (k, v) in attrs.iteritems():
if v is not None:
v = saxutils.quoteattr(str(v))
ret += ' %s=%s' % (k, v)
if close:
return ret + '/>'
else:
return ret + '>'
class MbXmlWriter(object):
"""Write XML in the Music Metadata XML format."""
def __init__(self, indentAmount=' ', newline="\n"):
"""Constructor.
@param indentAmount: the amount of whitespace to use per level
"""
self._indentAmount = indentAmount
self._newline = newline
def write(self, outStream, metadata):
"""Writes the XML representation of a Metadata object to a file.
@param outStream: an open file-like object
@param metadata: a L{Metadata} object
"""
xml = _XmlWriter(outStream, self._indentAmount, self._newline)
xml.prolog()
xml.start('metadata', {
'xmlns': NS_MMD_1,
'xmlns:ext': NS_EXT_1,
})
self._writeArtist(xml, metadata.getArtist())
self._writeRelease(xml, metadata.getRelease())
self._writeReleaseGroup(xml, metadata.getReleaseGroup())
self._writeTrack(xml, metadata.getTrack())
self._writeLabel(xml, metadata.getLabel())
if len(metadata.getArtistResults()) > 0:
xml.start('artist-list', {
'offset': metadata.artistResultsOffset,
'count': metadata.artistResultsCount,
})
for result in metadata.getArtistResults():
self._writeArtist(xml, result.getArtist(),
result.getScore())
xml.end()
if len(metadata.getReleaseResults()) > 0:
xml.start('release-list', {
'offset': metadata.releaseResultsOffset,
'count': metadata.releaseResultsCount,
})
for result in metadata.getReleaseResults():
self._writeRelease(xml, result.getRelease(),
result.getScore())
xml.end()
if len(metadata.getReleaseGroupResults()) > 0:
xml.start('release-group-list', {
'offset': metadata.releaseGroupResultsOffset,
'count': metadata.releaseGroupResultsCount
})
for result in metadata.getReleaseGroupResults():
self._writeReleaseGroup(xml, result.getReleaseGroup(),
result.getScore())
xml.end()
if len(metadata.getTrackResults()) > 0:
xml.start('track-list', {
'offset': metadata.trackResultsOffset,
'count': metadata.trackResultsCount,
})
for result in metadata.getTrackResults():
self._writeTrack(xml, result.getTrack(),
result.getScore())
xml.end()
if len(metadata.getLabelResults()) > 0:
xml.start('label-list', {
'offset': metadata.labelResultsOffset,
'count': metadata.labelResultsCount,
})
for result in metadata.getLabelResults():
self._writeLabel(xml, result.getLabel(),
result.getScore())
xml.end()
xml.end()
def _writeArtist(self, xml, artist, score=None):
if artist is None:
return
xml.start('artist', {
'id': mbutils.extractUuid(artist.getId()),
'type': mbutils.extractFragment(artist.getType()),
'ext:score': score,
})
xml.elem('name', artist.getName())
xml.elem('sort-name', artist.getSortName())
xml.elem('disambiguation', artist.getDisambiguation())
xml.elem('life-span', None, {
'begin': artist.getBeginDate(),
'end': artist.getEndDate(),
})
if len(artist.getAliases()) > 0:
xml.start('alias-list')
for alias in artist.getAliases():
xml.elem('alias', alias.getValue(), {
'type': alias.getType(),
'script': alias.getScript(),
})
xml.end()
if len(artist.getReleases()) > 0:
xml.start('release-list')
for release in artist.getReleases():
self._writeRelease(xml, release)
xml.end()
if len(artist.getReleaseGroups()) > 0:
xml.start('release-group-list')
for releaseGroup in artist.getReleaseGroups():
self._writeReleaseGroup(xml, releaseGroup)
xml.end()
self._writeRelationList(xml, artist)
# TODO: extensions
xml.end()
def _writeRelease(self, xml, release, score=None):
if release is None:
return
types = [mbutils.extractFragment(t) for t in release.getTypes()]
typesStr = None
if len(types) > 0:
typesStr = ' '.join(types)
xml.start('release', {
'id': mbutils.extractUuid(release.getId()),
'type': typesStr,
'ext:score': score,
})
xml.elem('title', release.getTitle())
xml.elem('text-representation', None, {
'language': release.getTextLanguage(),
'script': release.getTextScript()
})
xml.elem('asin', release.getAsin())
self._writeArtist(xml, release.getArtist())
self._writeReleaseGroup(xml, release.getReleaseGroup())
if len(release.getReleaseEvents()) > 0:
xml.start('release-event-list')
for event in release.getReleaseEvents():
self._writeReleaseEvent(xml, event)
xml.end()
if len(release.getDiscs()) > 0:
xml.start('disc-list')
for disc in release.getDiscs():
xml.elem('disc', None, { 'id': disc.getId() })
xml.end()
if len(release.getTracks()) > 0:
# TODO: count attribute
xml.start('track-list', {
'offset': release.getTracksOffset()
})
for track in release.getTracks():
self._writeTrack(xml, track)
xml.end()
self._writeRelationList(xml, release)
# TODO: extensions
xml.end()
def _writeReleaseGroup(self, xml, rg, score = None):
if rg is None:
return
xml.start('release-group', {
'id': mbutils.extractUuid(rg.getId()),
'type': mbutils.extractFragment(rg.getType()),
'ext:score': score,
})
xml.elem('title', rg.getTitle())
self._writeArtist(xml, rg.getArtist())
if len(rg.getReleases()) > 0:
xml.start('release-list')
for rel in rg.getReleases():
self._writeRelease(xml, rel)
xml.end()
xml.end()
def _writeReleaseEvent(self, xml, event):
xml.start('event', {
'country': event.getCountry(),
'date': event.getDate(),
'catalog-number': event.getCatalogNumber(),
'barcode': event.getBarcode(),
'format': event.getFormat()
})
self._writeLabel(xml, event.getLabel())
xml.end()
def _writeTrack(self, xml, track, score=None):
if track is None:
return
xml.start('track', {
'id': mbutils.extractUuid(track.getId()),
'ext:score': score,
})
xml.elem('title', track.getTitle())
xml.elem('duration', str(track.getDuration()))
self._writeArtist(xml, track.getArtist())
if len(track.getReleases()) > 0:
# TODO: offset + count
xml.start('release-list')
for release in track.getReleases():
self._writeRelease(xml, release)
xml.end()
if len(track.getPuids()) > 0:
xml.start('puid-list')
for puid in track.getPuids():
xml.elem('puid', None, { 'id': puid })
xml.end()
self._writeRelationList(xml, track)
# TODO: extensions
xml.end()
def _writeLabel(self, xml, label, score=None):
if label is None:
return
xml.start('label', {
'id': mbutils.extractUuid(label.getId()),
'type': mbutils.extractFragment(label.getType()),
'ext:score': score,
})
xml.elem('name', label.getName())
xml.elem('sort-name', label.getSortName())
xml.elem('disambiguation', label.getDisambiguation())
xml.elem('life-span', None, {
'begin': label.getBeginDate(),
'end': label.getEndDate(),
})
if len(label.getAliases()) > 0:
xml.start('alias-list')
for alias in label.getAliases():
xml.elem('alias', alias.getValue(), {
'type': alias.getType(),
'script': alias.getScript(),
})
xml.end()
# TODO: releases, artists
self._writeRelationList(xml, label)
# TODO: extensions
xml.end()
def _writeRelationList(self, xml, entity):
for tt in entity.getRelationTargetTypes():
xml.start('relation-list', {
'target-type': mbutils.extractFragment(tt),
})
for rel in entity.getRelations(targetType=tt):
self._writeRelation(xml, rel, tt)
xml.end()
def _writeRelation(self, xml, rel, targetType):
relAttrs = ' '.join([mbutils.extractFragment(a)
for a in rel.getAttributes()])
if relAttrs == '':
relAttrs = None
attrs = {
'type': mbutils.extractFragment(rel.getType()),
'target': rel.getTargetId(),
'direction': rel.getDirection(),
'begin': rel.getBeginDate(),
'end': rel.getBeginDate(),
'attributes': relAttrs,
}
if rel.getTarget() is None:
xml.elem('relation', None, attrs)
else:
xml.start('relation', attrs)
if targetType == NS_REL_1 + 'Artist':
self._writeArtist(xml, rel.getTarget())
elif targetType == NS_REL_1 + 'Release':
self._writeRelease(xml, rel.getTarget())
elif targetType == NS_REL_1 + 'Track':
self._writeTrack(xml, rel.getTarget())
xml.end()
#
# DOM Utilities
#
def _matches(node, name, namespace=NS_MMD_1):
"""Checks if an xml.dom.Node and a given name and namespace match."""
if node.localName == name and node.namespaceURI == namespace:
return True
else:
return False
def _getChildElements(parentNode):
"""Returns all direct child elements of the given xml.dom.Node."""
children = [ ]
for node in parentNode.childNodes:
if node.nodeType == node.ELEMENT_NODE:
children.append(node)
return children
def _getText(element, regex=None, default=None):
"""Returns the text content of the given xml.dom.Element.
This function simply fetches all contained text nodes, so the element
should not contain child elements.
"""
res = ''
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
res += node.data
if regex is None or re.match(regex, res):
return res
else:
return default
def _getPositiveIntText(element):
"""Returns the text content of the given xml.dom.Element as an int."""
res = _getText(element)
if res is None:
return None
try:
return int(res)
except ValueError:
return None
def _getAttr(element, attrName, regex=None, default=None, ns=None):
"""Returns an attribute of the given element.
If there is no attribute with that name or the attribute doesn't
match the regular expression, default is returned.
"""
if element.hasAttributeNS(ns, attrName):
content = element.getAttributeNS(ns, attrName)
if regex is None or re.match(regex, content):
return content
else:
return default
else:
return default
def _getDateAttr(element, attrName):
"""Gets an incomplete date from an attribute."""
return _getAttr(element, attrName, '^\d+(-\d\d)?(-\d\d)?$')
def _getIdAttr(element, attrName, typeName):
"""Gets an ID from an attribute and turns it into an absolute URI."""
value = _getAttr(element, attrName)
return _makeAbsoluteUri('http://musicbrainz.org/' + typeName + '/', value)
def _getIntAttr(element, attrName, min=0, max=None, ns=None):
"""Gets an int from an attribute, or None."""
try:
val = int(_getAttr(element, attrName, ns=ns))
if max is None:
max = val
if min <= val <= max:
return val
else:
return None
except ValueError:
return None # raised if conversion to int fails
except TypeError:
return None # raised if no such attribute exists
def _getUriListAttr(element, attrName, prefix=NS_MMD_1):
"""Gets a list of URIs from an attribute."""
if not element.hasAttribute(attrName):
return [ ]
f = lambda x: x != ''
uris = filter(f, re.split('\s+', element.getAttribute(attrName)))
m = lambda x: _makeAbsoluteUri(prefix, x)
uris = map(m, uris)
return uris
def _getUriAttr(element, attrName, prefix=NS_MMD_1):
"""Gets a URI from an attribute.
This also works for space-separated URI lists. In this case, the
first URI is returned.
"""
uris = _getUriListAttr(element, attrName, prefix)
if len(uris) > 0:
return uris[0]
else:
return None
def _getBooleanAttr(element, attrName):
"""Gets a boolean value from an attribute."""
value = _getAttr(element, attrName)
if value == 'true':
return True
elif value == 'false':
return False
else:
return None
def _getDirectionAttr(element, attrName):
"""Gets the Relation reading direction from an attribute."""
regex = '^\s*(' + '|'.join((
model.Relation.DIR_FORWARD,
model.Relation.DIR_BACKWARD)) + ')\s*$'
return _getAttr(element, 'direction', regex, model.Relation.DIR_NONE)
def _makeAbsoluteUri(prefix, uriStr):
"""Creates an absolute URI adding prefix, if necessary."""
if uriStr is None:
return None
(scheme, netloc, path, params, query, frag) = urlparse.urlparse(uriStr)
if scheme == '' and netloc == '':
return prefix + uriStr
else:
return uriStr
def _getResourceType(uri):
"""Gets the resource type from a URI.
The resource type is the basename of the URI's path.
"""
m = re.match('^' + NS_REL_1 + '(.*)$', uri)
if m:
return m.group(1).lower()
else:
return None
# EOF
| {
"repo_name": "mineo/python-musicbrainz2",
"path": "src/musicbrainz2/wsxml.py",
"copies": "1",
"size": "47008",
"license": "bsd-3-clause",
"hash": -1789768180403304400,
"line_mean": 27.0644776119,
"line_max": 88,
"alpha_frac": 0.7034334581,
"autogenerated": false,
"ratio": 3.267167083680845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44706005417808453,
"avg_score": null,
"num_lines": null
} |
"""A parser for the pblite serialization format.
pblite (sometimes called "protojson") is a way of encoding Protocol Buffer
messages to arrays. Google uses this in Hangouts because JavaScript handles
arrays better than bytes.
This module allows parsing lists together with a schema to produce
programmer-friendly objects. The conversation from not-quite-json strings to
lists can be done using hangups.javascript.
See:
https://code.google.com/p/google-protorpc/source/browse/python/protorpc/
protojson.py
TODO: Serialization code is currently unused and doesn't have any tests.
"""
import itertools
import types
class Field(object):
"""An untyped field, corresponding to a primitive type."""
def __init__(self, is_optional=False):
self._is_optional = is_optional
def parse(self, input_):
"""Parse the field.
Raises ValueError if the input is None and the Field is not optional.
"""
if not self._is_optional and input_ is None:
raise ValueError('Field is not optional')
else:
return input_
def serialize(self, input_):
"""Serialize the field.
Raises ValueError if the input is None and the Field is not optional.
"""
return self.parse(input_)
class EnumField(object):
"""An enumeration field.
Corresponds to a specified set of constants defined by the given Enum.
EnumFields are always required, but an enum may contain None as a value.
"""
def __init__(self, enum):
self._enum = enum
def parse(self, input_):
"""Parse the field.
Raises ValueError if the input is not an option in the enum.
"""
return self._enum(input_)
def serialize(self, input_):
"""Serialize the field.
Raises ValueError if the input is not an option in the enum.
"""
return self.parse(input_).value
class RepeatedField(object):
"""A field which may be repeated any number of times.
Corresponds to a list.
"""
def __init__(self, field, is_optional=False):
self._field = field
self._is_optional = is_optional
def parse(self, input_, serialize=False):
"""Parse the message.
Raises ValueError if the input is None and the RepeatedField is not
optional, or if the input is not a list.
"""
# Validate input:
if input_ is None and not self._is_optional:
raise ValueError('RepeatedField is not optional')
elif input_ is None and self._is_optional:
return None
elif not isinstance(input_, list):
raise ValueError('RepeatedField expected list but got {}'
.format(type(input_)))
res = []
for field_input in input_:
try:
if serialize:
res.append(self._field.serialize(field_input))
else:
res.append(self._field.parse(field_input))
except ValueError as e:
raise ValueError('RepeatedField item: {}'.format(e))
return res
def serialize(self, input_):
"""Serialize the message.
Raises ValueError if the input is None and the RepeatedField is not
optional, or if the input is not a list.
"""
return self.parse(input_, serialize=True)
class Message(object):
"""A field consisting of a collection of fields paired with a name.
Corresponds to an object (SimpleNamespace).
The input may be shorter than the number of fields and the trailing fields
will be assigned None. The input may be longer than the number of fields
and the trailing input items will be ignored. Fields with name None will
cause the corresponding input item to be optional and ignored.
"""
def __init__(self, *args, is_optional=False):
self._name_field_pairs = args
self._is_optional = is_optional
def parse(self, input_):
"""Parse the message.
Raises ValueError if the input is None and the Message is not optional,
or if any of the contained Fields fail to parse.
"""
# Validate input:
if input_ is None and not self._is_optional:
raise ValueError('Message is not optional')
elif input_ is None and self._is_optional:
return None
elif not isinstance(input_, list):
raise ValueError('Message expected list but got {}'
.format(type(input_)))
# Pad input with Nones if necessary
input_ = itertools.chain(input_, itertools.repeat(None))
res = types.SimpleNamespace()
for (name, field), field_input in zip(self._name_field_pairs, input_):
if name is not None:
try:
p = field.parse(field_input)
except ValueError as e:
raise ValueError('Message field \'{}\': {}'.
format(name, e))
setattr(res, name, p)
return res
def serialize(self, input_):
"""Serialize the message.
Raises ValueError if the input is None and the Message is not optional,
or if any of the contained Fields fail to parse.
"""
# Validate input:
if input_ is None and not self._is_optional:
raise ValueError('Message is not optional')
elif input_ is None and self._is_optional:
return None
elif not isinstance(input_, types.SimpleNamespace):
raise ValueError(
'Message expected types.SimpleNamespace but got {}'
.format(type(input_))
)
res = []
for name, field in self._name_field_pairs:
if name is not None:
field_input = getattr(input_, name)
res.append(field.serialize(field_input))
else:
res.append(None)
return res
| {
"repo_name": "taigrr/hangups",
"path": "hangups/pblite.py",
"copies": "5",
"size": "6015",
"license": "mit",
"hash": 9081935912682762000,
"line_mean": 30.9946808511,
"line_max": 79,
"alpha_frac": 0.6031587697,
"autogenerated": false,
"ratio": 4.6340523882896765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7737211157989676,
"avg_score": null,
"num_lines": null
} |
"""A parser for Thorsten Koch's final netlib results.model.lp
http://www.zib.de/koch/perplex/data/netlib/txt/
@article{Koch:2004:FNR:2308906.2309292,
author = {Koch, Thorsten},
title = {The Final NETLIB-LP Results},
journal = {Oper. Res. Lett.},
issue_date = {March, 2004},
volume = {32},
number = {2},
month = mar,
year = {2004},
issn = {0167-6377},
pages = {138--142},
numpages = {5},
url = {http://dx.doi.org/10.1016/S0167-6377(03)00094-4},
doi = {10.1016/S0167-6377(03)00094-4},
acmid = {2309292},
publisher = {Elsevier Science Publishers B. V.},
address = {Amsterdam, The Netherlands, The Netherlands},
keywords = {Linear-programming, NETLIB, Rational-arithmetic},
}
"""
import glob
import gzip
import os
import pickle
import re
from fractions import Fraction
import six
OBJ_REGEX = re.compile('\* Objvalue : -?\d+/\d+')
the_final_netlib_results = dict()
for path in glob.glob("netlib_reference_results/*.txt.gz"):
print("Parsing", path)
with gzip.open(path) as fhandle:
for line in fhandle.readlines():
if OBJ_REGEX.match(line):
obj_value = Fraction(line.split(' : ')[1])
the_final_netlib_results[os.path.basename(path).replace('.txt.gz', '').upper()] = {
"Objvalue": obj_value}
break
for key, value in six.iteritems(the_final_netlib_results):
assert "Objvalue" in value
assert isinstance(value['Objvalue'], Fraction)
with open('the_final_netlib_results.pcl', 'w') as fhandle:
pickle.dump(the_final_netlib_results, fhandle, protocol=2)
| {
"repo_name": "biosustain/optlang",
"path": "src/optlang/tests/data/parse_the_final_netlib_results.py",
"copies": "1",
"size": "1576",
"license": "apache-2.0",
"hash": -6105886372511711000,
"line_mean": 28.1851851852,
"line_max": 99,
"alpha_frac": 0.6516497462,
"autogenerated": false,
"ratio": 2.929368029739777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9074536294458295,
"avg_score": 0.0012962962962962963,
"num_lines": 54
} |
# sgmlop support added by fredrik@pythonware.com (May 19, 1998)
__version__=''' $Id: xmllib.py 3660 2010-02-08 18:17:33Z damian $ '''
__doc__='''From before xmllib was in the Python standard library.
Probably ought to be removed'''
import re
import string
try:
import sgmlop # this works for both builtin on the path or relative
except ImportError:
sgmlop = None
# standard entity defs
ENTITYDEFS = {
'lt': '<',
'gt': '>',
'amp': '&',
'quot': '"',
'apos': '\''
}
# XML parser base class -- find tags and call handler functions.
# Usage: p = XMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods with
# special names to handle tags: start_foo and end_foo to handle <foo>
# and </foo>, respectively. The data between tags is passed to the
# parser by calling self.handle_data() with some data as argument (the
# data may be split up in arbutrary chunks). Entity references are
# passed by calling self.handle_entityref() with the entity reference
# as argument.
# --------------------------------------------------------------------
# original re-based XML parser
_S = '[ \t\r\n]+'
_opS = '[ \t\r\n]*'
_Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*'
interesting = re.compile('[&<]')
incomplete = re.compile('&(' + _Name + '|#[0-9]*|#x[0-9a-fA-F]*)?|'
'<([a-zA-Z_:][^<>]*|'
'/([a-zA-Z_:][^<>]*)?|'
'![^<>]*|'
'\?[^<>]*)?')
ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+);?')
entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]')
charref = re.compile('&#(?P<char>[0-9]+[^0-9]|x[0-9a-fA-F]+[^0-9a-fA-F])')
space = re.compile(_S)
newline = re.compile('\n')
starttagopen = re.compile('<' + _Name)
endtagopen = re.compile('</')
starttagend = re.compile(_opS + '(?P<slash>/?)>')
endbracket = re.compile('>')
tagfind = re.compile(_Name)
cdataopen = re.compile('<!\[CDATA\[')
cdataclose = re.compile('\]\]>')
special = re.compile('<!(?P<special>[^<>]*)>')
procopen = re.compile('<\?(?P<proc>' + _Name + ')' + _S)
procclose = re.compile('\?>')
commentopen = re.compile('<!--')
commentclose = re.compile('-->')
doubledash = re.compile('--')
attrfind = re.compile(
_opS + '(?P<name>' + _Name + ')'
'(' + _opS + '=' + _opS +
'(?P<value>\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9.:+*%?!()_#=~]+))')
class SlowXMLParser:
# Interface -- initialize and reset this instance
def __init__(self, verbose=0):
self.verbose = verbose
self.reset()
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
self.lineno = 1
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data):
self.rawdata = self.rawdata + data
self.goahead(0)
# Interface -- handle the remaining data
def close(self):
self.goahead(1)
# Interface -- translate references
def translate_references(self, data):
newdata = []
i = 0
while 1:
res = ref.search(data, i)
if res is None:
newdata.append(data[i:])
return ''.join(newdata)
if data[res.end(0) - 1] != ';':
self.syntax_error(self.lineno,
'; missing after entity/char reference')
newdata.append(data[i:res.start(0)])
str = res.group(1)
if str[0] == '#':
if str[1] == 'x':
newdata.append(chr(string.atoi(str[2:], 16)))
else:
newdata.append(chr(string.atoi(str[1:])))
else:
try:
newdata.append(self.entitydefs[str])
except KeyError:
# can't do it, so keep the entity ref in
newdata.append('&' + str + ';')
i = res.end(0)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
data = rawdata[i:n]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = n
break
res = interesting.search(rawdata, i)
if res:
j = res.start(0)
else:
j = n
if i < j:
data = rawdata[i:j]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
self.literal = 0
continue
if commentopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_comment(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if cdataopen.match(rawdata, i):
k = self.parse_cdata(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:i].count('\n')
i = k
continue
res = procopen.match(rawdata, i)
if res:
k = self.parse_proc(i, res)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = special.match(rawdata, i)
if res:
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
self.handle_special(res.group('special'))
self.lineno = self.lineno + res.group(0).count('\n')
i = res.end(0)
continue
elif rawdata[i] == '&':
res = charref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error(self.lineno, '; missing in charref')
i = i-1
self.handle_charref(res.group('char')[:-1])
self.lineno = self.lineno + res.group(0).count('\n')
continue
res = entityref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error(self.lineno, '; missing in entityref')
i = i-1
self.handle_entityref(res.group('name'))
self.lineno = self.lineno + res.group(0).count('\n')
continue
else:
raise RuntimeError('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
res = incomplete.match(rawdata, i)
if not res:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
j = res.end(0)
if j == n:
break # Really incomplete
# allow naked < or &
# self.syntax_error(self.lineno, 'bogus < or &')
data = res.group(0)
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = j
# end while
if end and i < n:
data = rawdata[i:n]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
raise RuntimeError('unexpected call to handle_comment')
res = commentclose.search(rawdata, i+4)
if not res:
return -1
# doubledash search will succeed because it's a subset of commentclose
if doubledash.search(rawdata, i+4).start(0) < res.start(0):
self.syntax_error(self.lineno, "'--' inside comment")
self.handle_comment(rawdata[i+4: res.start(0)])
return res.end(0)
# Internal -- handle CDATA tag, return lenth or -1 if not terminated
def parse_cdata(self, i):
rawdata = self.rawdata
if rawdata[i:i+9] != '<![CDATA[':
raise RuntimeError('unexpected call to handle_cdata')
res = cdataclose.search(rawdata, i+9)
if not res:
return -1
self.handle_cdata(rawdata[i+9:res.start(0)])
return res.end(0)
def parse_proc(self, i, res):
rawdata = self.rawdata
if not res:
raise RuntimeError('unexpected call to parse_proc')
name = res.group('proc')
res = procclose.search(rawdata, res.end(0))
if not res:
return -1
self.handle_proc(name, rawdata[res.pos:res.start(0)])
return res.end(0)
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
rawdata = self.rawdata
# i points to start of tag
end = endbracket.search(rawdata, i+1)
if not end:
return -1
j = end.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrdict = {}
res = tagfind.match(rawdata, i+1)
if not res:
raise RuntimeError('unexpected call to parse_starttag')
k = res.end(0)
tag = res.group(0)
if hasattr(self, tag + '_attributes'):
attrlist = getattr(self, tag + '_attributes')
else:
attrlist = None
self.lasttag = tag
while k < j:
res = attrfind.match(rawdata, k)
if not res: break
attrname, attrvalue = res.group('name', 'value')
if attrvalue is None:
self.syntax_error(self.lineno, 'no attribute value specified')
attrvalue = attrname
elif attrvalue[:1] == "'" == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
else:
self.syntax_error(self.lineno, 'attribute value not quoted')
if attrlist is not None and attrname not in attrlist:
self.syntax_error(self.lineno,
'unknown attribute %s of element %s' %
(attrname, tag))
if attrname in attrdict:
self.syntax_error(self.lineno, 'attribute specified twice')
attrdict[attrname] = self.translate_references(attrvalue)
k = res.end(0)
res = starttagend.match(rawdata, k)
if not res:
self.syntax_error(self.lineno, 'garbage in start tag')
self.finish_starttag(tag, attrdict)
if res and res.group('slash') == '/':
self.finish_endtag(tag)
return end.end(0)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
end = endbracket.search(rawdata, i+1)
if not end:
return -1
res = tagfind.match(rawdata, i+2)
if not res:
self.syntax_error(self.lineno, 'no name specified in end tag')
tag = ''
k = i+2
else:
tag = res.group(0)
k = res.end(0)
if k != end.start(0):
# check that there is only white space at end of tag
res = space.match(rawdata, k)
if res is None or res.end(0) != end.start(0):
self.syntax_error(self.lineno, 'garbage in end tag')
self.finish_endtag(tag)
return end.end(0)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
self.stack.append(tag)
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
if name[0] == 'x':
n = string.atoi(name[1:], 16)
else:
n = string.atoi(name)
except string.atoi_error:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = ENTITYDEFS
# Example -- handle entity reference, no need to override
def handle_entityref(self, name):
table = self.entitydefs
if name in table:
self.handle_data(table[name])
else:
self.unknown_entityref(name)
return
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle cdata, could be overridden
def handle_cdata(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle processing instructions, could be overridden
def handle_proc(self, name, data):
pass
# Example -- handle special instructions, could be overridden
def handle_special(self, data):
pass
# Example -- handle relatively harmless syntax errors, could be overridden
def syntax_error(self, lineno, message):
raise RuntimeError('Syntax error at line %d: %s' % (lineno, message))
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
# --------------------------------------------------------------------
# accelerated XML parser
class FastXMLParser:
# Interface -- initialize and reset this instance
def __init__(self, verbose=0):
self.verbose = verbose
self.reset()
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
self.lineno = 1
self.parser = sgmlop.XMLParser()
self.feed = self.parser.feed
self.parser.register(self)
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data): # overridden by reset
self.parser.feed(data)
# Interface -- handle the remaining data
def close(self):
try:
self.parser.close()
finally:
self.parser = None
# Interface -- translate references
def translate_references(self, data):
newdata = []
i = 0
while 1:
res = ref.search(data, i)
if res is None:
newdata.append(data[i:])
return ''.join(newdata)
if data[res.end(0) - 1] != ';':
self.syntax_error(self.lineno,
'; missing after entity/char reference')
newdata.append(data[i:res.start(0)])
str = res.group(1)
if str[0] == '#':
if str[1] == 'x':
newdata.append(chr(string.atoi(str[2:], 16)))
else:
newdata.append(chr(string.atoi(str[1:])))
else:
try:
newdata.append(self.entitydefs[str])
except KeyError:
# can't do it, so keep the entity ref in
newdata.append('&' + str + ';')
i = res.end(0)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
self.stack.append(tag)
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
if name[0] == 'x':
n = string.atoi(name[1:], 16)
else:
n = string.atoi(name)
except string.atoi_error:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = ENTITYDEFS
# Example -- handle entity reference, no need to override
def handle_entityref(self, name):
table = self.entitydefs
if name in table:
self.handle_data(table[name])
else:
self.unknown_entityref(name)
return
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle cdata, could be overridden
def handle_cdata(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle processing instructions, could be overridden
def handle_proc(self, name, data):
pass
# Example -- handle special instructions, could be overridden
def handle_special(self, data):
pass
# Example -- handle relatively harmless syntax errors, could be overridden
def syntax_error(self, lineno, message):
raise RuntimeError('Syntax error at line %d: %s' % (lineno, message))
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
#sgmlop = None
# pick a suitable parser
if sgmlop:
XMLParser = FastXMLParser
else:
XMLParser = SlowXMLParser
# --------------------------------------------------------------------
# test stuff
class TestXMLParser(XMLParser):
def __init__(self, verbose=0):
self.testdata = ""
XMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print('data: %s' % (repr(data),))
def handle_cdata(self, data):
self.flush()
print('cdata:%s ' % (repr(data),))
def handle_proc(self, name, data):
self.flush()
print('processing: %s %s ' % (name,repr(data)))
def handle_special(self, data):
self.flush()
print('special: %s' % (repr(data), ))
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print('comment: %' % (r,))
def syntax_error(self, lineno, message):
print('error at line %d:' % (lineno, message))
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print('start tag: <' + tag + '>')
else:
print('start tag: <' + tag,)
for name, value in attrs.items():
print(name + '=' + '"' + value + '"',)
print('>')
def unknown_endtag(self, tag):
self.flush()
print('end tag: </' + tag + '>')
def unknown_entityref(self, ref):
self.flush()
print('*** unknown entity ref: &' + ref + ';')
def unknown_charref(self, ref):
self.flush()
print('*** unknown char ref: &#' + ref + ';')
def close(self):
XMLParser.close(self)
self.flush()
def test(args = None):
import sys
if not args:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = XMLParser
else:
klass = TestXMLParser
if args:
file = args[0]
else:
file = 'test.xml'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError as msg:
f.write(":" + msg)
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__': #NO_REPORTLAB_TEST
test()
| {
"repo_name": "nakagami/reportlab",
"path": "src/reportlab/lib/xmllib.py",
"copies": "1",
"size": "25789",
"license": "bsd-3-clause",
"hash": 6492973456177945000,
"line_mean": 32.319121447,
"line_max": 80,
"alpha_frac": 0.5040521152,
"autogenerated": false,
"ratio": 4.062539382482671,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015334872790455443,
"num_lines": 774
} |
"""A parser for XML, using the derived class as static DTD."""
# Author: Sjoerd Mullender.
import re
import string
import warnings
warnings.warn("The xmllib module is obsolete. Use xml.sax instead.", DeprecationWarning)
del warnings
version = '0.3'
class Error(RuntimeError):
pass
# Regular expressions used for parsing
_S = '[ \t\r\n]+' # white space
_opS = '[ \t\r\n]*' # optional white space
_Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*' # valid XML name
_QStr = "(?:'[^']*'|\"[^\"]*\")" # quoted XML string
illegal = re.compile('[^\t\r\n -\176\240-\377]') # illegal chars in content
interesting = re.compile('[]&<]')
amp = re.compile('&')
ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+)[^-a-zA-Z0-9._:]')
entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]')
charref = re.compile('&#(?P<char>[0-9]+[^0-9]|x[0-9a-fA-F]+[^0-9a-fA-F])')
space = re.compile(_S + '$')
newline = re.compile('\n')
attrfind = re.compile(
_S + '(?P<name>' + _Name + ')'
'(' + _opS + '=' + _opS +
'(?P<value>'+_QStr+'|[-a-zA-Z0-9.:+*%?!\(\)_#=~]+))?')
starttagopen = re.compile('<' + _Name)
starttagend = re.compile(_opS + '(?P<slash>/?)>')
starttagmatch = re.compile('<(?P<tagname>'+_Name+')'
'(?P<attrs>(?:'+attrfind.pattern+')*)'+
starttagend.pattern)
endtagopen = re.compile('</')
endbracket = re.compile(_opS + '>')
endbracketfind = re.compile('(?:[^>\'"]|'+_QStr+')*>')
tagfind = re.compile(_Name)
cdataopen = re.compile(r'<!\[CDATA\[')
cdataclose = re.compile(r'\]\]>')
# this matches one of the following:
# SYSTEM SystemLiteral
# PUBLIC PubidLiteral SystemLiteral
_SystemLiteral = '(?P<%s>'+_QStr+')'
_PublicLiteral = '(?P<%s>"[-\'\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*"|' \
"'[-\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*')"
_ExternalId = '(?:SYSTEM|' \
'PUBLIC'+_S+_PublicLiteral%'pubid'+ \
')'+_S+_SystemLiteral%'syslit'
doctype = re.compile('<!DOCTYPE'+_S+'(?P<name>'+_Name+')'
'(?:'+_S+_ExternalId+')?'+_opS)
xmldecl = re.compile('<\?xml'+_S+
'version'+_opS+'='+_opS+'(?P<version>'+_QStr+')'+
'(?:'+_S+'encoding'+_opS+'='+_opS+
"(?P<encoding>'[A-Za-z][-A-Za-z0-9._]*'|"
'"[A-Za-z][-A-Za-z0-9._]*"))?'
'(?:'+_S+'standalone'+_opS+'='+_opS+
'(?P<standalone>\'(?:yes|no)\'|"(?:yes|no)"))?'+
_opS+'\?>')
procopen = re.compile(r'<\?(?P<proc>' + _Name + ')' + _opS)
procclose = re.compile(_opS + r'\?>')
commentopen = re.compile('<!--')
commentclose = re.compile('-->')
doubledash = re.compile('--')
attrtrans = string.maketrans(' \r\n\t', ' ')
# definitions for XML namespaces
_NCName = '[a-zA-Z_][-a-zA-Z0-9._]*' # XML Name, minus the ":"
ncname = re.compile(_NCName + '$')
qname = re.compile('(?:(?P<prefix>' + _NCName + '):)?' # optional prefix
'(?P<local>' + _NCName + ')$')
xmlns = re.compile('xmlns(?::(?P<ncname>'+_NCName+'))?$')
# XML parser base class -- find tags and call handler functions.
# Usage: p = XMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods with
# special names to handle tags: start_foo and end_foo to handle <foo>
# and </foo>, respectively. The data between tags is passed to the
# parser by calling self.handle_data() with some data as argument (the
# data may be split up in arbitrary chunks).
class XMLParser:
attributes = {} # default, to be overridden
elements = {} # default, to be overridden
# parsing options, settable using keyword args in __init__
__accept_unquoted_attributes = 0
__accept_missing_endtag_name = 0
__map_case = 0
__accept_utf8 = 0
__translate_attribute_references = 1
# Interface -- initialize and reset this instance
def __init__(self, **kw):
self.__fixed = 0
if 'accept_unquoted_attributes' in kw:
self.__accept_unquoted_attributes = kw['accept_unquoted_attributes']
if 'accept_missing_endtag_name' in kw:
self.__accept_missing_endtag_name = kw['accept_missing_endtag_name']
if 'map_case' in kw:
self.__map_case = kw['map_case']
if 'accept_utf8' in kw:
self.__accept_utf8 = kw['accept_utf8']
if 'translate_attribute_references' in kw:
self.__translate_attribute_references = kw['translate_attribute_references']
self.reset()
def __fixelements(self):
self.__fixed = 1
self.elements = {}
self.__fixdict(self.__dict__)
self.__fixclass(self.__class__)
def __fixclass(self, kl):
self.__fixdict(kl.__dict__)
for k in kl.__bases__:
self.__fixclass(k)
def __fixdict(self, dict):
for key in dict.keys():
if key[:6] == 'start_':
tag = key[6:]
start, end = self.elements.get(tag, (None, None))
if start is None:
self.elements[tag] = getattr(self, key), end
elif key[:4] == 'end_':
tag = key[4:]
start, end = self.elements.get(tag, (None, None))
if end is None:
self.elements[tag] = start, getattr(self, key)
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.nomoretags = 0
self.literal = 0
self.lineno = 1
self.__at_start = 1
self.__seen_doctype = None
self.__seen_starttag = 0
self.__use_namespaces = 0
self.__namespaces = {'xml':None} # xml is implicitly declared
# backward compatibility hack: if elements not overridden,
# fill it in ourselves
if self.elements is XMLParser.elements:
self.__fixelements()
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data):
self.rawdata = self.rawdata + data
self.goahead(0)
# Interface -- handle the remaining data
def close(self):
self.goahead(1)
if self.__fixed:
self.__fixed = 0
# remove self.elements so that we don't leak
del self.elements
# Interface -- translate references
def translate_references(self, data, all = 1):
if not self.__translate_attribute_references:
return data
i = 0
while 1:
res = amp.search(data, i)
if res is None:
return data
s = res.start(0)
res = ref.match(data, s)
if res is None:
self.syntax_error("bogus `&'")
i = s+1
continue
i = res.end(0)
str = res.group(1)
rescan = 0
if str[0] == '#':
if str[1] == 'x':
str = chr(int(str[2:], 16))
else:
str = chr(int(str[1:]))
if data[i - 1] != ';':
self.syntax_error("`;' missing after char reference")
i = i-1
elif all:
if str in self.entitydefs:
str = self.entitydefs[str]
rescan = 1
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
else:
self.syntax_error("reference to unknown entity `&%s;'" % str)
str = '&' + str + ';'
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
# when we get here, str contains the translated text and i points
# to the end of the string that is to be replaced
data = data[:s] + str + data[i:]
if rescan:
i = s
else:
i = s + len(str)
# Interface - return a dictionary of all namespaces currently valid
def getnamespace(self):
nsdict = {}
for t, d, nst in self.stack:
nsdict.update(d)
return nsdict
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if i > 0:
self.__at_start = 0
if self.nomoretags:
data = rawdata[i:n]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = n
break
res = interesting.search(rawdata, i)
if res:
j = res.start(0)
else:
j = n
if i < j:
data = rawdata[i:j]
if self.__at_start and space.match(data) is None:
self.syntax_error('illegal data at start of file')
self.__at_start = 0
if not self.stack and space.match(data) is None:
self.syntax_error('data not in content')
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
self.__seen_starttag = 1
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if commentopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_comment(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if cdataopen.match(rawdata, i):
k = self.parse_cdata(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = xmldecl.match(rawdata, i)
if res:
if not self.__at_start:
self.syntax_error("<?xml?> declaration not at start of document")
version, encoding, standalone = res.group('version',
'encoding',
'standalone')
if version[1:-1] != '1.0':
raise Error('only XML version 1.0 supported')
if encoding: encoding = encoding[1:-1]
if standalone: standalone = standalone[1:-1]
self.handle_xml(encoding, standalone)
i = res.end(0)
continue
res = procopen.match(rawdata, i)
if res:
k = self.parse_proc(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = doctype.match(rawdata, i)
if res:
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
if self.__seen_doctype:
self.syntax_error('multiple DOCTYPE elements')
if self.__seen_starttag:
self.syntax_error('DOCTYPE not at beginning of document')
k = self.parse_doctype(res)
if k < 0: break
self.__seen_doctype = res.group('name')
if self.__map_case:
self.__seen_doctype = self.__seen_doctype.lower()
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
elif rawdata[i] == '&':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
res = charref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in charref")
i = i-1
if not self.stack:
self.syntax_error('data not in content')
self.handle_charref(res.group('char')[:-1])
self.lineno = self.lineno + res.group(0).count('\n')
continue
res = entityref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in entityref")
i = i-1
name = res.group('name')
if self.__map_case:
name = name.lower()
if name in self.entitydefs:
self.rawdata = rawdata = rawdata[:res.start(0)] + self.entitydefs[name] + rawdata[i:]
n = len(rawdata)
i = res.start(0)
else:
self.unknown_entityref(name)
self.lineno = self.lineno + res.group(0).count('\n')
continue
elif rawdata[i] == ']':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
if n-i < 3:
break
if cdataclose.match(rawdata, i):
self.syntax_error("bogus `]]>'")
self.handle_data(rawdata[i])
i = i+1
continue
else:
raise Error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
break
# end while
if i > 0:
self.__at_start = 0
if end and i < n:
data = rawdata[i]
self.syntax_error("bogus `%s'" % data)
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
self.rawdata = rawdata[i+1:]
return self.goahead(end)
self.rawdata = rawdata[i:]
if end:
if not self.__seen_starttag:
self.syntax_error('no elements in file')
if self.stack:
self.syntax_error('missing end tags')
while self.stack:
self.finish_endtag(self.stack[-1][0])
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
raise Error('unexpected call to handle_comment')
res = commentclose.search(rawdata, i+4)
if res is None:
return -1
if doubledash.search(rawdata, i+4, res.start(0)):
self.syntax_error("`--' inside comment")
if rawdata[res.start(0)-1] == '-':
self.syntax_error('comment cannot end in three dashes')
if not self.__accept_utf8 and \
illegal.search(rawdata, i+4, res.start(0)):
self.syntax_error('illegal character in comment')
self.handle_comment(rawdata[i+4: res.start(0)])
return res.end(0)
# Internal -- handle DOCTYPE tag, return length or -1 if not terminated
def parse_doctype(self, res):
rawdata = self.rawdata
n = len(rawdata)
name = res.group('name')
if self.__map_case:
name = name.lower()
pubid, syslit = res.group('pubid', 'syslit')
if pubid is not None:
pubid = pubid[1:-1] # remove quotes
pubid = ' '.join(pubid.split()) # normalize
if syslit is not None: syslit = syslit[1:-1] # remove quotes
j = k = res.end(0)
if k >= n:
return -1
if rawdata[k] == '[':
level = 0
k = k+1
dq = sq = 0
while k < n:
c = rawdata[k]
if not sq and c == '"':
dq = not dq
elif not dq and c == "'":
sq = not sq
elif sq or dq:
pass
elif level <= 0 and c == ']':
res = endbracket.match(rawdata, k+1)
if res is None:
return -1
self.handle_doctype(name, pubid, syslit, rawdata[j+1:k])
return res.end(0)
elif c == '<':
level = level + 1
elif c == '>':
level = level - 1
if level < 0:
self.syntax_error("bogus `>' in DOCTYPE")
k = k+1
res = endbracketfind.match(rawdata, k)
if res is None:
return -1
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in DOCTYPE')
self.handle_doctype(name, pubid, syslit, None)
return res.end(0)
# Internal -- handle CDATA tag, return length or -1 if not terminated
def parse_cdata(self, i):
rawdata = self.rawdata
if rawdata[i:i+9] != '<![CDATA[':
raise Error('unexpected call to parse_cdata')
res = cdataclose.search(rawdata, i+9)
if res is None:
return -1
if not self.__accept_utf8 and \
illegal.search(rawdata, i+9, res.start(0)):
self.syntax_error('illegal character in CDATA')
if not self.stack:
self.syntax_error('CDATA not in content')
self.handle_cdata(rawdata[i+9:res.start(0)])
return res.end(0)
__xml_namespace_attributes = {'ns':None, 'src':None, 'prefix':None}
# Internal -- handle a processing instruction tag
def parse_proc(self, i):
rawdata = self.rawdata
end = procclose.search(rawdata, i)
if end is None:
return -1
j = end.start(0)
if not self.__accept_utf8 and illegal.search(rawdata, i+2, j):
self.syntax_error('illegal character in processing instruction')
res = tagfind.match(rawdata, i+2)
if res is None:
raise Error('unexpected call to parse_proc')
k = res.end(0)
name = res.group(0)
if self.__map_case:
name = name.lower()
if name == 'xml:namespace':
self.syntax_error('old-fashioned namespace declaration')
self.__use_namespaces = -1
# namespace declaration
# this must come after the <?xml?> declaration (if any)
# and before the <!DOCTYPE> (if any).
if self.__seen_doctype or self.__seen_starttag:
self.syntax_error('xml:namespace declaration too late in document')
attrdict, namespace, k = self.parse_attributes(name, k, j)
if namespace:
self.syntax_error('namespace declaration inside namespace declaration')
for attrname in attrdict.keys():
if not attrname in self.__xml_namespace_attributes:
self.syntax_error("unknown attribute `%s' in xml:namespace tag" % attrname)
if not 'ns' in attrdict or not 'prefix' in attrdict:
self.syntax_error('xml:namespace without required attributes')
prefix = attrdict.get('prefix')
if ncname.match(prefix) is None:
self.syntax_error('xml:namespace illegal prefix value')
return end.end(0)
if prefix in self.__namespaces:
self.syntax_error('xml:namespace prefix not unique')
self.__namespaces[prefix] = attrdict['ns']
else:
if name.lower() == 'xml':
self.syntax_error('illegal processing instruction target name')
self.handle_proc(name, rawdata[k:j])
return end.end(0)
# Internal -- parse attributes between i and j
def parse_attributes(self, tag, i, j):
rawdata = self.rawdata
attrdict = {}
namespace = {}
while i < j:
res = attrfind.match(rawdata, i)
if res is None:
break
attrname, attrvalue = res.group('name', 'value')
if self.__map_case:
attrname = attrname.lower()
i = res.end(0)
if attrvalue is None:
self.syntax_error("no value specified for attribute `%s'" % attrname)
attrvalue = attrname
elif attrvalue[:1] == "'" == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
elif not self.__accept_unquoted_attributes:
self.syntax_error("attribute `%s' value not quoted" % attrname)
res = xmlns.match(attrname)
if res is not None:
# namespace declaration
ncname = res.group('ncname')
namespace[ncname or ''] = attrvalue or None
if not self.__use_namespaces:
self.__use_namespaces = len(self.stack)+1
continue
if '<' in attrvalue:
self.syntax_error("`<' illegal in attribute value")
if attrname in attrdict:
self.syntax_error("attribute `%s' specified twice" % attrname)
attrvalue = attrvalue.translate(attrtrans)
attrdict[attrname] = self.translate_references(attrvalue)
return attrdict, namespace, i
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
rawdata = self.rawdata
# i points to start of tag
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
tag = starttagmatch.match(rawdata, i)
if tag is None or tag.end(0) != end.end(0):
self.syntax_error('garbage in starttag')
return end.end(0)
nstag = tagname = tag.group('tagname')
if self.__map_case:
nstag = tagname = nstag.lower()
if not self.__seen_starttag and self.__seen_doctype and \
tagname != self.__seen_doctype:
self.syntax_error('starttag does not match DOCTYPE')
if self.__seen_starttag and not self.stack:
self.syntax_error('multiple elements on top level')
k, j = tag.span('attrs')
attrdict, nsdict, k = self.parse_attributes(tagname, k, j)
self.stack.append((tagname, nsdict, nstag))
if self.__use_namespaces:
res = qname.match(tagname)
else:
res = None
if res is not None:
prefix, nstag = res.group('prefix', 'local')
if prefix is None:
prefix = ''
ns = None
for t, d, nst in self.stack:
if prefix in d:
ns = d[prefix]
if ns is None and prefix != '':
ns = self.__namespaces.get(prefix)
if ns is not None:
nstag = ns + ' ' + nstag
elif prefix != '':
nstag = prefix + ':' + nstag # undo split
self.stack[-1] = tagname, nsdict, nstag
# translate namespace of attributes
attrnamemap = {} # map from new name to old name (used for error reporting)
for key in attrdict.keys():
attrnamemap[key] = key
if self.__use_namespaces:
nattrdict = {}
for key, val in attrdict.items():
okey = key
res = qname.match(key)
if res is not None:
aprefix, key = res.group('prefix', 'local')
if self.__map_case:
key = key.lower()
if aprefix is not None:
ans = None
for t, d, nst in self.stack:
if aprefix in d:
ans = d[aprefix]
if ans is None:
ans = self.__namespaces.get(aprefix)
if ans is not None:
key = ans + ' ' + key
else:
key = aprefix + ':' + key
nattrdict[key] = val
attrnamemap[key] = okey
attrdict = nattrdict
attributes = self.attributes.get(nstag)
if attributes is not None:
for key in attrdict.keys():
if not key in attributes:
self.syntax_error("unknown attribute `%s' in tag `%s'" % (attrnamemap[key], tagname))
for key, val in attributes.items():
if val is not None and not key in attrdict:
attrdict[key] = val
method = self.elements.get(nstag, (None, None))[0]
self.finish_starttag(nstag, attrdict, method)
if tag.group('slash') == '/':
self.finish_endtag(tagname)
return tag.end(0)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
res = tagfind.match(rawdata, i+2)
if res is None:
if self.literal:
self.handle_data(rawdata[i])
return i+1
if not self.__accept_missing_endtag_name:
self.syntax_error('no name specified in end tag')
tag = self.stack[-1][0]
k = i+2
else:
tag = res.group(0)
if self.__map_case:
tag = tag.lower()
if self.literal:
if not self.stack or tag != self.stack[-1][0]:
self.handle_data(rawdata[i])
return i+1
k = res.end(0)
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in end tag')
self.finish_endtag(tag)
return end.end(0)
# Internal -- finish processing of start tag
def finish_starttag(self, tagname, attrdict, method):
if method is not None:
self.handle_starttag(tagname, method, attrdict)
else:
self.unknown_starttag(tagname, attrdict)
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
self.literal = 0
if not tag:
self.syntax_error('name-less end tag')
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
found = -1
for i in range(len(self.stack)):
if tag == self.stack[i][0]:
found = i
if found == -1:
self.syntax_error('unopened end tag')
return
while len(self.stack) > found:
if found < len(self.stack) - 1:
self.syntax_error('missing close tag for %s' % self.stack[-1][2])
nstag = self.stack[-1][2]
method = self.elements.get(nstag, (None, None))[1]
if method is not None:
self.handle_endtag(nstag, method)
else:
self.unknown_endtag(nstag)
if self.__use_namespaces == len(self.stack):
self.__use_namespaces = 0
del self.stack[-1]
# Overridable -- handle xml processing instruction
def handle_xml(self, encoding, standalone):
pass
# Overridable -- handle DOCTYPE
def handle_doctype(self, tag, pubid, syslit, data):
pass
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
if name[0] == 'x':
n = int(name[1:], 16)
else:
n = int(name)
except ValueError:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = {'lt': '<', # must use charref
'gt': '>',
'amp': '&', # must use charref
'quot': '"',
'apos': ''',
}
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle cdata, could be overridden
def handle_cdata(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle processing instructions, could be overridden
def handle_proc(self, name, data):
pass
# Example -- handle relatively harmless syntax errors, could be overridden
def syntax_error(self, message):
raise Error('Syntax error at line %d: %s' % (self.lineno, message))
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, name):
self.syntax_error("reference to unknown entity `&%s;'" % name)
class TestXMLParser(XMLParser):
def __init__(self, **kw):
self.testdata = ""
XMLParser.__init__(self, **kw)
def handle_xml(self, encoding, standalone):
self.flush()
print 'xml: encoding =',encoding,'standalone =',standalone
def handle_doctype(self, tag, pubid, syslit, data):
self.flush()
print 'DOCTYPE:',tag, repr(data)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_cdata(self, data):
self.flush()
print 'cdata:', repr(data)
def handle_proc(self, name, data):
self.flush()
print 'processing:',name,repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def syntax_error(self, message):
print 'error at line %d:' % self.lineno, message
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs.items():
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def close(self):
XMLParser.close(self)
self.flush()
def test(args = None):
import sys, getopt
from time import time
if not args:
args = sys.argv[1:]
opts, args = getopt.getopt(args, 'st')
klass = TestXMLParser
do_time = 0
for o, a in opts:
if o == '-s':
klass = XMLParser
elif o == '-t':
do_time = 1
if args:
file = args[0]
else:
file = 'test.xml'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
t0 = time()
try:
if do_time:
x.feed(data)
x.close()
else:
for c in data:
x.feed(c)
x.close()
except Error, msg:
t1 = time()
print msg
if do_time:
print 'total time: %g' % (t1-t0)
sys.exit(1)
t1 = time()
if do_time:
print 'total time: %g' % (t1-t0)
if __name__ == '__main__':
test()
| {
"repo_name": "salguarnieri/intellij-community",
"path": "python/lib/Lib/xmllib.py",
"copies": "160",
"size": "34848",
"license": "apache-2.0",
"hash": 3061496553857210000,
"line_mean": 36.5113024758,
"line_max": 109,
"alpha_frac": 0.4841023875,
"autogenerated": false,
"ratio": 4.1021777516185995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A parser for XML, using the derived class as static DTD."""
# Author: Sjoerd Mullender.
import re
import string
version = '0.3'
class Error(RuntimeError):
pass
# Regular expressions used for parsing
_S = '[ \t\r\n]+' # white space
_opS = '[ \t\r\n]*' # optional white space
_Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*' # valid XML name
_QStr = "(?:'[^']*'|\"[^\"]*\")" # quoted XML string
illegal = re.compile('[^\t\r\n -\176\240-\377]') # illegal chars in content
interesting = re.compile('[]&<]')
amp = re.compile('&')
ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+)[^-a-zA-Z0-9._:]')
entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]')
charref = re.compile('&#(?P<char>[0-9]+[^0-9]|x[0-9a-fA-F]+[^0-9a-fA-F])')
space = re.compile(_S + '$')
newline = re.compile('\n')
attrfind = re.compile(
_S + '(?P<name>' + _Name + ')'
'(' + _opS + '=' + _opS +
'(?P<value>'+_QStr+'|[-a-zA-Z0-9.:+*%?!\(\)_#=~]+))?')
starttagopen = re.compile('<' + _Name)
starttagend = re.compile(_opS + '(?P<slash>/?)>')
starttagmatch = re.compile('<(?P<tagname>'+_Name+')'
'(?P<attrs>(?:'+attrfind.pattern+')*)'+
starttagend.pattern)
endtagopen = re.compile('</')
endbracket = re.compile(_opS + '>')
endbracketfind = re.compile('(?:[^>\'"]|'+_QStr+')*>')
tagfind = re.compile(_Name)
cdataopen = re.compile(r'<!\[CDATA\[')
cdataclose = re.compile(r'\]\]>')
# this matches one of the following:
# SYSTEM SystemLiteral
# PUBLIC PubidLiteral SystemLiteral
_SystemLiteral = '(?P<%s>'+_QStr+')'
_PublicLiteral = '(?P<%s>"[-\'\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*"|' \
"'[-\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*')"
_ExternalId = '(?:SYSTEM|' \
'PUBLIC'+_S+_PublicLiteral%'pubid'+ \
')'+_S+_SystemLiteral%'syslit'
doctype = re.compile('<!DOCTYPE'+_S+'(?P<name>'+_Name+')'
'(?:'+_S+_ExternalId+')?'+_opS)
xmldecl = re.compile('<\?xml'+_S+
'version'+_opS+'='+_opS+'(?P<version>'+_QStr+')'+
'(?:'+_S+'encoding'+_opS+'='+_opS+
"(?P<encoding>'[A-Za-z][-A-Za-z0-9._]*'|"
'"[A-Za-z][-A-Za-z0-9._]*"))?'
'(?:'+_S+'standalone'+_opS+'='+_opS+
'(?P<standalone>\'(?:yes|no)\'|"(?:yes|no)"))?'+
_opS+'\?>')
procopen = re.compile(r'<\?(?P<proc>' + _Name + ')' + _opS)
procclose = re.compile(_opS + r'\?>')
commentopen = re.compile('<!--')
commentclose = re.compile('-->')
doubledash = re.compile('--')
attrtrans = string.maketrans(' \r\n\t', ' ')
# definitions for XML namespaces
_NCName = '[a-zA-Z_][-a-zA-Z0-9._]*' # XML Name, minus the ":"
ncname = re.compile(_NCName + '$')
qname = re.compile('(?:(?P<prefix>' + _NCName + '):)?' # optional prefix
'(?P<local>' + _NCName + ')$')
xmlns = re.compile('xmlns(?::(?P<ncname>'+_NCName+'))?$')
# XML parser base class -- find tags and call handler functions.
# Usage: p = XMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods with
# special names to handle tags: start_foo and end_foo to handle <foo>
# and </foo>, respectively. The data between tags is passed to the
# parser by calling self.handle_data() with some data as argument (the
# data may be split up in arbitrary chunks).
class XMLParser:
attributes = {} # default, to be overridden
elements = {} # default, to be overridden
# parsing options, settable using keyword args in __init__
__accept_unquoted_attributes = 0
__accept_missing_endtag_name = 0
__map_case = 0
__accept_utf8 = 0
__translate_attribute_references = 1
# Interface -- initialize and reset this instance
def __init__(self, **kw):
self.__fixed = 0
if kw.has_key('accept_unquoted_attributes'):
self.__accept_unquoted_attributes = kw['accept_unquoted_attributes']
if kw.has_key('accept_missing_endtag_name'):
self.__accept_missing_endtag_name = kw['accept_missing_endtag_name']
if kw.has_key('map_case'):
self.__map_case = kw['map_case']
if kw.has_key('accept_utf8'):
self.__accept_utf8 = kw['accept_utf8']
if kw.has_key('translate_attribute_references'):
self.__translate_attribute_references = kw['translate_attribute_references']
self.reset()
def __fixelements(self):
self.__fixed = 1
self.elements = {}
self.__fixdict(self.__dict__)
self.__fixclass(self.__class__)
def __fixclass(self, kl):
self.__fixdict(kl.__dict__)
for k in kl.__bases__:
self.__fixclass(k)
def __fixdict(self, dict):
for key in dict.keys():
if key[:6] == 'start_':
tag = key[6:]
start, end = self.elements.get(tag, (None, None))
if start is None:
self.elements[tag] = getattr(self, key), end
elif key[:4] == 'end_':
tag = key[4:]
start, end = self.elements.get(tag, (None, None))
if end is None:
self.elements[tag] = start, getattr(self, key)
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.nomoretags = 0
self.literal = 0
self.lineno = 1
self.__at_start = 1
self.__seen_doctype = None
self.__seen_starttag = 0
self.__use_namespaces = 0
self.__namespaces = {'xml':None} # xml is implicitly declared
# backward compatibility hack: if elements not overridden,
# fill it in ourselves
if self.elements is XMLParser.elements:
self.__fixelements()
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data):
self.rawdata = self.rawdata + data
self.goahead(0)
# Interface -- handle the remaining data
def close(self):
self.goahead(1)
if self.__fixed:
self.__fixed = 0
# remove self.elements so that we don't leak
del self.elements
# Interface -- translate references
def translate_references(self, data, all = 1):
if not self.__translate_attribute_references:
return data
i = 0
while 1:
res = amp.search(data, i)
if res is None:
return data
s = res.start(0)
res = ref.match(data, s)
if res is None:
self.syntax_error("bogus `&'")
i = s+1
continue
i = res.end(0)
str = res.group(1)
rescan = 0
if str[0] == '#':
if str[1] == 'x':
str = chr(int(str[2:], 16))
else:
str = chr(int(str[1:]))
if data[i - 1] != ';':
self.syntax_error("`;' missing after char reference")
i = i-1
elif all:
if self.entitydefs.has_key(str):
str = self.entitydefs[str]
rescan = 1
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
else:
self.syntax_error("reference to unknown entity `&%s;'" % str)
str = '&' + str + ';'
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
# when we get here, str contains the translated text and i points
# to the end of the string that is to be replaced
data = data[:s] + str + data[i:]
if rescan:
i = s
else:
i = s + len(str)
# Interface - return a dictionary of all namespaces currently valid
def getnamespace(self):
nsdict = {}
for t, d, nst in self.stack:
nsdict.update(d)
return nsdict
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if i > 0:
self.__at_start = 0
if self.nomoretags:
data = rawdata[i:n]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = n
break
res = interesting.search(rawdata, i)
if res:
j = res.start(0)
else:
j = n
if i < j:
data = rawdata[i:j]
if self.__at_start and space.match(data) is None:
self.syntax_error('illegal data at start of file')
self.__at_start = 0
if not self.stack and space.match(data) is None:
self.syntax_error('data not in content')
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
self.__seen_starttag = 1
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if commentopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_comment(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if cdataopen.match(rawdata, i):
k = self.parse_cdata(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = xmldecl.match(rawdata, i)
if res:
if not self.__at_start:
self.syntax_error("<?xml?> declaration not at start of document")
version, encoding, standalone = res.group('version',
'encoding',
'standalone')
if version[1:-1] != '1.0':
raise Error('only XML version 1.0 supported')
if encoding: encoding = encoding[1:-1]
if standalone: standalone = standalone[1:-1]
self.handle_xml(encoding, standalone)
i = res.end(0)
continue
res = procopen.match(rawdata, i)
if res:
k = self.parse_proc(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = doctype.match(rawdata, i)
if res:
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
if self.__seen_doctype:
self.syntax_error('multiple DOCTYPE elements')
if self.__seen_starttag:
self.syntax_error('DOCTYPE not at beginning of document')
k = self.parse_doctype(res)
if k < 0: break
self.__seen_doctype = res.group('name')
if self.__map_case:
self.__seen_doctype = self.__seen_doctype.lower()
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
elif rawdata[i] == '&':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
res = charref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in charref")
i = i-1
if not self.stack:
self.syntax_error('data not in content')
self.handle_charref(res.group('char')[:-1])
self.lineno = self.lineno + res.group(0).count('\n')
continue
res = entityref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in entityref")
i = i-1
name = res.group('name')
if self.__map_case:
name = name.lower()
if self.entitydefs.has_key(name):
self.rawdata = rawdata = rawdata[:res.start(0)] + self.entitydefs[name] + rawdata[i:]
n = len(rawdata)
i = res.start(0)
else:
self.unknown_entityref(name)
self.lineno = self.lineno + res.group(0).count('\n')
continue
elif rawdata[i] == ']':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
if n-i < 3:
break
if cdataclose.match(rawdata, i):
self.syntax_error("bogus `]]>'")
self.handle_data(rawdata[i])
i = i+1
continue
else:
raise Error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
break
# end while
if i > 0:
self.__at_start = 0
if end and i < n:
data = rawdata[i]
self.syntax_error("bogus `%s'" % data)
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
self.rawdata = rawdata[i+1:]
return self.goahead(end)
self.rawdata = rawdata[i:]
if end:
if not self.__seen_starttag:
self.syntax_error('no elements in file')
if self.stack:
self.syntax_error('missing end tags')
while self.stack:
self.finish_endtag(self.stack[-1][0])
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
raise Error('unexpected call to handle_comment')
res = commentclose.search(rawdata, i+4)
if res is None:
return -1
if doubledash.search(rawdata, i+4, res.start(0)):
self.syntax_error("`--' inside comment")
if rawdata[res.start(0)-1] == '-':
self.syntax_error('comment cannot end in three dashes')
if not self.__accept_utf8 and \
illegal.search(rawdata, i+4, res.start(0)):
self.syntax_error('illegal character in comment')
self.handle_comment(rawdata[i+4: res.start(0)])
return res.end(0)
# Internal -- handle DOCTYPE tag, return length or -1 if not terminated
def parse_doctype(self, res):
rawdata = self.rawdata
n = len(rawdata)
name = res.group('name')
if self.__map_case:
name = name.lower()
pubid, syslit = res.group('pubid', 'syslit')
if pubid is not None:
pubid = pubid[1:-1] # remove quotes
pubid = ' '.join(pubid.split()) # normalize
if syslit is not None: syslit = syslit[1:-1] # remove quotes
j = k = res.end(0)
if k >= n:
return -1
if rawdata[k] == '[':
level = 0
k = k+1
dq = sq = 0
while k < n:
c = rawdata[k]
if not sq and c == '"':
dq = not dq
elif not dq and c == "'":
sq = not sq
elif sq or dq:
pass
elif level <= 0 and c == ']':
res = endbracket.match(rawdata, k+1)
if res is None:
return -1
self.handle_doctype(name, pubid, syslit, rawdata[j+1:k])
return res.end(0)
elif c == '<':
level = level + 1
elif c == '>':
level = level - 1
if level < 0:
self.syntax_error("bogus `>' in DOCTYPE")
k = k+1
res = endbracketfind.match(rawdata, k)
if res is None:
return -1
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in DOCTYPE')
self.handle_doctype(name, pubid, syslit, None)
return res.end(0)
# Internal -- handle CDATA tag, return length or -1 if not terminated
def parse_cdata(self, i):
rawdata = self.rawdata
if rawdata[i:i+9] != '<![CDATA[':
raise Error('unexpected call to parse_cdata')
res = cdataclose.search(rawdata, i+9)
if res is None:
return -1
if not self.__accept_utf8 and \
illegal.search(rawdata, i+9, res.start(0)):
self.syntax_error('illegal character in CDATA')
if not self.stack:
self.syntax_error('CDATA not in content')
self.handle_cdata(rawdata[i+9:res.start(0)])
return res.end(0)
__xml_namespace_attributes = {'ns':None, 'src':None, 'prefix':None}
# Internal -- handle a processing instruction tag
def parse_proc(self, i):
rawdata = self.rawdata
end = procclose.search(rawdata, i)
if end is None:
return -1
j = end.start(0)
if not self.__accept_utf8 and illegal.search(rawdata, i+2, j):
self.syntax_error('illegal character in processing instruction')
res = tagfind.match(rawdata, i+2)
if res is None:
raise Error('unexpected call to parse_proc')
k = res.end(0)
name = res.group(0)
if self.__map_case:
name = name.lower()
if name == 'xml:namespace':
self.syntax_error('old-fashioned namespace declaration')
self.__use_namespaces = -1
# namespace declaration
# this must come after the <?xml?> declaration (if any)
# and before the <!DOCTYPE> (if any).
if self.__seen_doctype or self.__seen_starttag:
self.syntax_error('xml:namespace declaration too late in document')
attrdict, namespace, k = self.parse_attributes(name, k, j)
if namespace:
self.syntax_error('namespace declaration inside namespace declaration')
for attrname in attrdict.keys():
if not self.__xml_namespace_attributes.has_key(attrname):
self.syntax_error("unknown attribute `%s' in xml:namespace tag" % attrname)
if not attrdict.has_key('ns') or not attrdict.has_key('prefix'):
self.syntax_error('xml:namespace without required attributes')
prefix = attrdict.get('prefix')
if ncname.match(prefix) is None:
self.syntax_error('xml:namespace illegal prefix value')
return end.end(0)
if self.__namespaces.has_key(prefix):
self.syntax_error('xml:namespace prefix not unique')
self.__namespaces[prefix] = attrdict['ns']
else:
if name.lower() == 'xml':
self.syntax_error('illegal processing instruction target name')
self.handle_proc(name, rawdata[k:j])
return end.end(0)
# Internal -- parse attributes between i and j
def parse_attributes(self, tag, i, j):
rawdata = self.rawdata
attrdict = {}
namespace = {}
while i < j:
res = attrfind.match(rawdata, i)
if res is None:
break
attrname, attrvalue = res.group('name', 'value')
if self.__map_case:
attrname = attrname.lower()
i = res.end(0)
if attrvalue is None:
self.syntax_error("no value specified for attribute `%s'" % attrname)
attrvalue = attrname
elif attrvalue[:1] == "'" == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
elif not self.__accept_unquoted_attributes:
self.syntax_error("attribute `%s' value not quoted" % attrname)
res = xmlns.match(attrname)
if res is not None:
# namespace declaration
ncname = res.group('ncname')
namespace[ncname or ''] = attrvalue or None
if not self.__use_namespaces:
self.__use_namespaces = len(self.stack)+1
continue
if '<' in attrvalue:
self.syntax_error("`<' illegal in attribute value")
if attrdict.has_key(attrname):
self.syntax_error("attribute `%s' specified twice" % attrname)
attrvalue = attrvalue.translate(attrtrans)
attrdict[attrname] = self.translate_references(attrvalue)
return attrdict, namespace, i
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
rawdata = self.rawdata
# i points to start of tag
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
tag = starttagmatch.match(rawdata, i)
if tag is None or tag.end(0) != end.end(0):
self.syntax_error('garbage in starttag')
return end.end(0)
nstag = tagname = tag.group('tagname')
if self.__map_case:
nstag = tagname = nstag.lower()
if not self.__seen_starttag and self.__seen_doctype and \
tagname != self.__seen_doctype:
self.syntax_error('starttag does not match DOCTYPE')
if self.__seen_starttag and not self.stack:
self.syntax_error('multiple elements on top level')
k, j = tag.span('attrs')
attrdict, nsdict, k = self.parse_attributes(tagname, k, j)
self.stack.append((tagname, nsdict, nstag))
if self.__use_namespaces:
res = qname.match(tagname)
else:
res = None
if res is not None:
prefix, nstag = res.group('prefix', 'local')
if prefix is None:
prefix = ''
ns = None
for t, d, nst in self.stack:
if d.has_key(prefix):
ns = d[prefix]
if ns is None and prefix != '':
ns = self.__namespaces.get(prefix)
if ns is not None:
nstag = ns + ' ' + nstag
elif prefix != '':
nstag = prefix + ':' + nstag # undo split
self.stack[-1] = tagname, nsdict, nstag
# translate namespace of attributes
attrnamemap = {} # map from new name to old name (used for error reporting)
for key in attrdict.keys():
attrnamemap[key] = key
if self.__use_namespaces:
nattrdict = {}
for key, val in attrdict.items():
okey = key
res = qname.match(key)
if res is not None:
aprefix, key = res.group('prefix', 'local')
if self.__map_case:
key = key.lower()
if aprefix is None:
aprefix = ''
ans = None
for t, d, nst in self.stack:
if d.has_key(aprefix):
ans = d[aprefix]
if ans is None and aprefix != '':
ans = self.__namespaces.get(aprefix)
if ans is not None:
key = ans + ' ' + key
elif aprefix != '':
key = aprefix + ':' + key
elif ns is not None:
key = ns + ' ' + key
nattrdict[key] = val
attrnamemap[key] = okey
attrdict = nattrdict
attributes = self.attributes.get(nstag)
if attributes is not None:
for key in attrdict.keys():
if not attributes.has_key(key):
self.syntax_error("unknown attribute `%s' in tag `%s'" % (attrnamemap[key], tagname))
for key, val in attributes.items():
if val is not None and not attrdict.has_key(key):
attrdict[key] = val
method = self.elements.get(nstag, (None, None))[0]
self.finish_starttag(nstag, attrdict, method)
if tag.group('slash') == '/':
self.finish_endtag(tagname)
return tag.end(0)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
res = tagfind.match(rawdata, i+2)
if res is None:
if self.literal:
self.handle_data(rawdata[i])
return i+1
if not self.__accept_missing_endtag_name:
self.syntax_error('no name specified in end tag')
tag = self.stack[-1][0]
k = i+2
else:
tag = res.group(0)
if self.__map_case:
tag = tag.lower()
if self.literal:
if not self.stack or tag != self.stack[-1][0]:
self.handle_data(rawdata[i])
return i+1
k = res.end(0)
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in end tag')
self.finish_endtag(tag)
return end.end(0)
# Internal -- finish processing of start tag
def finish_starttag(self, tagname, attrdict, method):
if method is not None:
self.handle_starttag(tagname, method, attrdict)
else:
self.unknown_starttag(tagname, attrdict)
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
self.literal = 0
if not tag:
self.syntax_error('name-less end tag')
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
found = -1
for i in range(len(self.stack)):
if tag == self.stack[i][0]:
found = i
if found == -1:
self.syntax_error('unopened end tag')
return
while len(self.stack) > found:
if found < len(self.stack) - 1:
self.syntax_error('missing close tag for %s' % self.stack[-1][2])
nstag = self.stack[-1][2]
method = self.elements.get(nstag, (None, None))[1]
if method is not None:
self.handle_endtag(nstag, method)
else:
self.unknown_endtag(nstag)
if self.__use_namespaces == len(self.stack):
self.__use_namespaces = 0
del self.stack[-1]
# Overridable -- handle xml processing instruction
def handle_xml(self, encoding, standalone):
pass
# Overridable -- handle DOCTYPE
def handle_doctype(self, tag, pubid, syslit, data):
pass
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
if name[0] == 'x':
n = int(name[1:], 16)
else:
n = int(name)
except ValueError:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = {'lt': '<', # must use charref
'gt': '>',
'amp': '&', # must use charref
'quot': '"',
'apos': ''',
}
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle cdata, could be overridden
def handle_cdata(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle processing instructions, could be overridden
def handle_proc(self, name, data):
pass
# Example -- handle relatively harmless syntax errors, could be overridden
def syntax_error(self, message):
raise Error('Syntax error at line %d: %s' % (self.lineno, message))
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, name):
self.syntax_error("reference to unknown entity `&%s;'" % name)
class TestXMLParser(XMLParser):
def __init__(self, **kw):
self.testdata = ""
apply(XMLParser.__init__, (self,), kw)
def handle_xml(self, encoding, standalone):
self.flush()
print 'xml: encoding =',encoding,'standalone =',standalone
def handle_doctype(self, tag, pubid, syslit, data):
self.flush()
print 'DOCTYPE:',tag, `data`
def handle_data(self, data):
self.testdata = self.testdata + data
if len(`self.testdata`) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', `data`
def handle_cdata(self, data):
self.flush()
print 'cdata:', `data`
def handle_proc(self, name, data):
self.flush()
print 'processing:',name,`data`
def handle_comment(self, data):
self.flush()
r = `data`
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def syntax_error(self, message):
print 'error at line %d:' % self.lineno, message
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs.items():
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def close(self):
XMLParser.close(self)
self.flush()
def test(args = None):
import sys, getopt
from time import time
if not args:
args = sys.argv[1:]
opts, args = getopt.getopt(args, 'st')
klass = TestXMLParser
do_time = 0
for o, a in opts:
if o == '-s':
klass = XMLParser
elif o == '-t':
do_time = 1
if args:
file = args[0]
else:
file = 'test.xml'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
t0 = time()
try:
if do_time:
x.feed(data)
x.close()
else:
for c in data:
x.feed(c)
x.close()
except Error, msg:
t1 = time()
print msg
if do_time:
print 'total time: %g' % (t1-t0)
sys.exit(1)
t1 = time()
if do_time:
print 'total time: %g' % (t1-t0)
if __name__ == '__main__':
test()
| {
"repo_name": "Integral-Technology-Solutions/ConfigNOW",
"path": "Lib/xmllib.py",
"copies": "5",
"size": "34920",
"license": "mit",
"hash": 4782206203279085000,
"line_mean": 36.5888051668,
"line_max": 109,
"alpha_frac": 0.4829037801,
"autogenerated": false,
"ratio": 4.087078651685394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0021803774711565724,
"num_lines": 929
} |
"""A parser for XML, using the derived class as static DTD."""
# Author: Sjoerd Mullender.
import re
import string
version = '0.3'
# Regular expressions used for parsing
_S = '[ \t\r\n]+' # white space
_opS = '[ \t\r\n]*' # optional white space
_Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*' # valid XML name
_QStr = "(?:'[^']*'|\"[^\"]*\")" # quoted XML string
illegal = re.compile('[^\t\r\n -\176\240-\377]') # illegal chars in content
interesting = re.compile('[]&<]')
amp = re.compile('&')
ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+)[^-a-zA-Z0-9._:]')
entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]')
charref = re.compile('&#(?P<char>[0-9]+[^0-9]|x[0-9a-fA-F]+[^0-9a-fA-F])')
space = re.compile(_S + '$')
newline = re.compile('\n')
attrfind = re.compile(
_S + '(?P<name>' + _Name + ')'
'(' + _opS + '=' + _opS +
'(?P<value>'+_QStr+'|[-a-zA-Z0-9.:+*%?!\(\)_#=~]+))?')
starttagopen = re.compile('<' + _Name)
starttagend = re.compile(_opS + '(?P<slash>/?)>')
starttagmatch = re.compile('<(?P<tagname>'+_Name+')'
'(?P<attrs>(?:'+attrfind.pattern+')*)'+
starttagend.pattern)
endtagopen = re.compile('</')
endbracket = re.compile(_opS + '>')
endbracketfind = re.compile('(?:[^>\'"]|'+_QStr+')*>')
tagfind = re.compile(_Name)
cdataopen = re.compile(r'<!\[CDATA\[')
cdataclose = re.compile(r'\]\]>')
# this matches one of the following:
# SYSTEM SystemLiteral
# PUBLIC PubidLiteral SystemLiteral
_SystemLiteral = '(?P<%s>'+_QStr+')'
_PublicLiteral = '(?P<%s>"[-\'\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*"|' \
"'[-\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*')"
_ExternalId = '(?:SYSTEM|' \
'PUBLIC'+_S+_PublicLiteral%'pubid'+ \
')'+_S+_SystemLiteral%'syslit'
doctype = re.compile('<!DOCTYPE'+_S+'(?P<name>'+_Name+')'
'(?:'+_S+_ExternalId+')?'+_opS)
xmldecl = re.compile('<\?xml'+_S+
'version'+_opS+'='+_opS+'(?P<version>'+_QStr+')'+
'(?:'+_S+'encoding'+_opS+'='+_opS+
"(?P<encoding>'[A-Za-z][-A-Za-z0-9._]*'|"
'"[A-Za-z][-A-Za-z0-9._]*"))?'
'(?:'+_S+'standalone'+_opS+'='+_opS+
'(?P<standalone>\'(?:yes|no)\'|"(?:yes|no)"))?'+
_opS+'\?>')
procopen = re.compile(r'<\?(?P<proc>' + _Name + ')' + _opS)
procclose = re.compile(_opS + r'\?>')
commentopen = re.compile('<!--')
commentclose = re.compile('-->')
doubledash = re.compile('--')
attrtrans = string.maketrans(' \r\n\t', ' ')
# definitions for XML namespaces
_NCName = '[a-zA-Z_][-a-zA-Z0-9._]*' # XML Name, minus the ":"
ncname = re.compile(_NCName + '$')
qname = re.compile('(?:(?P<prefix>' + _NCName + '):)?' # optional prefix
'(?P<local>' + _NCName + ')$')
xmlns = re.compile('xmlns(?::(?P<ncname>'+_NCName+'))?$')
# XML parser base class -- find tags and call handler functions.
# Usage: p = XMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods with
# special names to handle tags: start_foo and end_foo to handle <foo>
# and </foo>, respectively. The data between tags is passed to the
# parser by calling self.handle_data() with some data as argument (the
# data may be split up in arbitrary chunks).
class XMLParser:
attributes = {} # default, to be overridden
elements = {} # default, to be overridden
# parsing options, settable using keyword args in __init__
__accept_unquoted_attributes = 0
__accept_missing_endtag_name = 0
__map_case = 0
__accept_utf8 = 0
__translate_attribute_references = 1
# Interface -- initialize and reset this instance
def __init__(self, **kw):
self.__fixed = 0
if kw.has_key('accept_unquoted_attributes'):
self.__accept_unquoted_attributes = kw['accept_unquoted_attributes']
if kw.has_key('accept_missing_endtag_name'):
self.__accept_missing_endtag_name = kw['accept_missing_endtag_name']
if kw.has_key('map_case'):
self.__map_case = kw['map_case']
if kw.has_key('accept_utf8'):
self.__accept_utf8 = kw['accept_utf8']
if kw.has_key('translate_attribute_references'):
self.__translate_attribute_references = kw['translate_attribute_references']
self.reset()
def __fixelements(self):
self.__fixed = 1
self.elements = {}
self.__fixdict(self.__dict__)
self.__fixclass(self.__class__)
def __fixclass(self, kl):
self.__fixdict(kl.__dict__)
for k in kl.__bases__:
self.__fixclass(k)
def __fixdict(self, dict):
for key in dict.keys():
if key[:6] == 'start_':
tag = key[6:]
start, end = self.elements.get(tag, (None, None))
if start is None:
self.elements[tag] = getattr(self, key), end
elif key[:4] == 'end_':
tag = key[4:]
start, end = self.elements.get(tag, (None, None))
if end is None:
self.elements[tag] = start, getattr(self, key)
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.nomoretags = 0
self.literal = 0
self.lineno = 1
self.__at_start = 1
self.__seen_doctype = None
self.__seen_starttag = 0
self.__use_namespaces = 0
self.__namespaces = {'xml':None} # xml is implicitly declared
# backward compatibility hack: if elements not overridden,
# fill it in ourselves
if self.elements is XMLParser.elements:
self.__fixelements()
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data):
self.rawdata = self.rawdata + data
self.goahead(0)
# Interface -- handle the remaining data
def close(self):
self.goahead(1)
if self.__fixed:
self.__fixed = 0
# remove self.elements so that we don't leak
del self.elements
# Interface -- translate references
def translate_references(self, data, all = 1):
if not self.__translate_attribute_references:
return data
i = 0
while 1:
res = amp.search(data, i)
if res is None:
return data
s = res.start(0)
res = ref.match(data, s)
if res is None:
self.syntax_error("bogus `&'")
i = s+1
continue
i = res.end(0)
str = res.group(1)
rescan = 0
if str[0] == '#':
if str[1] == 'x':
str = chr(string.atoi(str[2:], 16))
else:
str = chr(string.atoi(str[1:]))
if data[i - 1] != ';':
self.syntax_error("`;' missing after char reference")
i = i-1
elif all:
if self.entitydefs.has_key(str):
str = self.entitydefs[str]
rescan = 1
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
else:
self.syntax_error("reference to unknown entity `&%s;'" % str)
str = '&' + str + ';'
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
# when we get here, str contains the translated text and i points
# to the end of the string that is to be replaced
data = data[:s] + str + data[i:]
if rescan:
i = s
else:
i = s + len(str)
# Interface - return a dictionary of all namespaces currently valid
def getnamespace(self):
nsdict = {}
for t, d, nst in self.stack:
nsdict.update(d)
return nsdict
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if i > 0:
self.__at_start = 0
if self.nomoretags:
data = rawdata[i:n]
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = n
break
res = interesting.search(rawdata, i)
if res:
j = res.start(0)
else:
j = n
if i < j:
data = rawdata[i:j]
if self.__at_start and space.match(data) is None:
self.syntax_error('illegal data at start of file')
self.__at_start = 0
if not self.stack and space.match(data) is None:
self.syntax_error('data not in content')
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
self.__seen_starttag = 1
self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
i = k
continue
if commentopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = i+1
continue
k = self.parse_comment(i)
if k < 0: break
self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
i = k
continue
if cdataopen.match(rawdata, i):
k = self.parse_cdata(i)
if k < 0: break
self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
i = k
continue
res = xmldecl.match(rawdata, i)
if res:
if not self.__at_start:
self.syntax_error("<?xml?> declaration not at start of document")
version, encoding, standalone = res.group('version',
'encoding',
'standalone')
if version[1:-1] != '1.0':
raise RuntimeError, 'only XML version 1.0 supported'
if encoding: encoding = encoding[1:-1]
if standalone: standalone = standalone[1:-1]
self.handle_xml(encoding, standalone)
i = res.end(0)
continue
res = procopen.match(rawdata, i)
if res:
k = self.parse_proc(i)
if k < 0: break
self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
i = k
continue
res = doctype.match(rawdata, i)
if res:
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = i+1
continue
if self.__seen_doctype:
self.syntax_error('multiple DOCTYPE elements')
if self.__seen_starttag:
self.syntax_error('DOCTYPE not at beginning of document')
k = self.parse_doctype(res)
if k < 0: break
self.__seen_doctype = res.group('name')
if self.__map_case:
self.__seen_doctype = string.lower(self.__seen_doctype)
self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
i = k
continue
elif rawdata[i] == '&':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
res = charref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in charref")
i = i-1
if not self.stack:
self.syntax_error('data not in content')
self.handle_charref(res.group('char')[:-1])
self.lineno = self.lineno + string.count(res.group(0), '\n')
continue
res = entityref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in entityref")
i = i-1
name = res.group('name')
if self.__map_case:
name = string.lower(name)
if self.entitydefs.has_key(name):
self.rawdata = rawdata = rawdata[:res.start(0)] + self.entitydefs[name] + rawdata[i:]
n = len(rawdata)
i = res.start(0)
else:
self.unknown_entityref(name)
self.lineno = self.lineno + string.count(res.group(0), '\n')
continue
elif rawdata[i] == ']':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
if n-i < 3:
break
if cdataclose.match(rawdata, i):
self.syntax_error("bogus `]]>'")
self.handle_data(rawdata[i])
i = i+1
continue
else:
raise RuntimeError, 'neither < nor & ??'
# We get here only if incomplete matches but
# nothing else
break
# end while
if i > 0:
self.__at_start = 0
if end and i < n:
data = rawdata[i]
self.syntax_error("bogus `%s'" % data)
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
self.rawdata = rawdata[i+1:]
return self.goahead(end)
self.rawdata = rawdata[i:]
if end:
if not self.__seen_starttag:
self.syntax_error('no elements in file')
if self.stack:
self.syntax_error('missing end tags')
while self.stack:
self.finish_endtag(self.stack[-1][0])
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
if rawdata[i:i+4] <> '<!--':
raise RuntimeError, 'unexpected call to handle_comment'
res = commentclose.search(rawdata, i+4)
if res is None:
return -1
if doubledash.search(rawdata, i+4, res.start(0)):
self.syntax_error("`--' inside comment")
if rawdata[res.start(0)-1] == '-':
self.syntax_error('comment cannot end in three dashes')
if not self.__accept_utf8 and \
illegal.search(rawdata, i+4, res.start(0)):
self.syntax_error('illegal character in comment')
self.handle_comment(rawdata[i+4: res.start(0)])
return res.end(0)
# Internal -- handle DOCTYPE tag, return length or -1 if not terminated
def parse_doctype(self, res):
rawdata = self.rawdata
n = len(rawdata)
name = res.group('name')
if self.__map_case:
name = string.lower(name)
pubid, syslit = res.group('pubid', 'syslit')
if pubid is not None:
pubid = pubid[1:-1] # remove quotes
pubid = string.join(string.split(pubid)) # normalize
if syslit is not None: syslit = syslit[1:-1] # remove quotes
j = k = res.end(0)
if k >= n:
return -1
if rawdata[k] == '[':
level = 0
k = k+1
dq = sq = 0
while k < n:
c = rawdata[k]
if not sq and c == '"':
dq = not dq
elif not dq and c == "'":
sq = not sq
elif sq or dq:
pass
elif level <= 0 and c == ']':
res = endbracket.match(rawdata, k+1)
if res is None:
return -1
self.handle_doctype(name, pubid, syslit, rawdata[j+1:k])
return res.end(0)
elif c == '<':
level = level + 1
elif c == '>':
level = level - 1
if level < 0:
self.syntax_error("bogus `>' in DOCTYPE")
k = k+1
res = endbracketfind.match(rawdata, k)
if res is None:
return -1
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in DOCTYPE')
self.handle_doctype(name, pubid, syslit, None)
return res.end(0)
# Internal -- handle CDATA tag, return length or -1 if not terminated
def parse_cdata(self, i):
rawdata = self.rawdata
if rawdata[i:i+9] <> '<![CDATA[':
raise RuntimeError, 'unexpected call to parse_cdata'
res = cdataclose.search(rawdata, i+9)
if res is None:
return -1
if not self.__accept_utf8 and \
illegal.search(rawdata, i+9, res.start(0)):
self.syntax_error('illegal character in CDATA')
if not self.stack:
self.syntax_error('CDATA not in content')
self.handle_cdata(rawdata[i+9:res.start(0)])
return res.end(0)
__xml_namespace_attributes = {'ns':None, 'src':None, 'prefix':None}
# Internal -- handle a processing instruction tag
def parse_proc(self, i):
rawdata = self.rawdata
end = procclose.search(rawdata, i)
if end is None:
return -1
j = end.start(0)
if not self.__accept_utf8 and illegal.search(rawdata, i+2, j):
self.syntax_error('illegal character in processing instruction')
res = tagfind.match(rawdata, i+2)
if res is None:
raise RuntimeError, 'unexpected call to parse_proc'
k = res.end(0)
name = res.group(0)
if self.__map_case:
name = string.lower(name)
if name == 'xml:namespace':
self.syntax_error('old-fashioned namespace declaration')
self.__use_namespaces = -1
# namespace declaration
# this must come after the <?xml?> declaration (if any)
# and before the <!DOCTYPE> (if any).
if self.__seen_doctype or self.__seen_starttag:
self.syntax_error('xml:namespace declaration too late in document')
attrdict, namespace, k = self.parse_attributes(name, k, j)
if namespace:
self.syntax_error('namespace declaration inside namespace declaration')
for attrname in attrdict.keys():
if not self.__xml_namespace_attributes.has_key(attrname):
self.syntax_error("unknown attribute `%s' in xml:namespace tag" % attrname)
if not attrdict.has_key('ns') or not attrdict.has_key('prefix'):
self.syntax_error('xml:namespace without required attributes')
prefix = attrdict.get('prefix')
if ncname.match(prefix) is None:
self.syntax_error('xml:namespace illegal prefix value')
return end.end(0)
if self.__namespaces.has_key(prefix):
self.syntax_error('xml:namespace prefix not unique')
self.__namespaces[prefix] = attrdict['ns']
else:
if string.lower(name) == 'xml':
self.syntax_error('illegal processing instruction target name')
self.handle_proc(name, rawdata[k:j])
return end.end(0)
# Internal -- parse attributes between i and j
def parse_attributes(self, tag, i, j):
rawdata = self.rawdata
attrdict = {}
namespace = {}
while i < j:
res = attrfind.match(rawdata, i)
if res is None:
break
attrname, attrvalue = res.group('name', 'value')
if self.__map_case:
attrname = string.lower(attrname)
i = res.end(0)
if attrvalue is None:
self.syntax_error("no value specified for attribute `%s'" % attrname)
attrvalue = attrname
elif attrvalue[:1] == "'" == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
elif not self.__accept_unquoted_attributes:
self.syntax_error("attribute `%s' value not quoted" % attrname)
res = xmlns.match(attrname)
if res is not None:
# namespace declaration
ncname = res.group('ncname')
namespace[ncname or ''] = attrvalue or None
if not self.__use_namespaces:
self.__use_namespaces = len(self.stack)+1
continue
if '<' in attrvalue:
self.syntax_error("`<' illegal in attribute value")
if attrdict.has_key(attrname):
self.syntax_error("attribute `%s' specified twice" % attrname)
attrvalue = string.translate(attrvalue, attrtrans)
attrdict[attrname] = self.translate_references(attrvalue)
return attrdict, namespace, i
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
rawdata = self.rawdata
# i points to start of tag
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
tag = starttagmatch.match(rawdata, i)
if tag is None or tag.end(0) != end.end(0):
self.syntax_error('garbage in starttag')
return end.end(0)
nstag = tagname = tag.group('tagname')
if self.__map_case:
nstag = tagname = string.lower(nstag)
if not self.__seen_starttag and self.__seen_doctype and \
tagname != self.__seen_doctype:
self.syntax_error('starttag does not match DOCTYPE')
if self.__seen_starttag and not self.stack:
self.syntax_error('multiple elements on top level')
k, j = tag.span('attrs')
attrdict, nsdict, k = self.parse_attributes(tagname, k, j)
self.stack.append((tagname, nsdict, nstag))
if self.__use_namespaces:
res = qname.match(tagname)
else:
res = None
if res is not None:
prefix, nstag = res.group('prefix', 'local')
if prefix is None:
prefix = ''
ns = None
for t, d, nst in self.stack:
if d.has_key(prefix):
ns = d[prefix]
if ns is None and prefix != '':
ns = self.__namespaces.get(prefix)
if ns is not None:
nstag = ns + ' ' + nstag
elif prefix != '':
nstag = prefix + ':' + nstag # undo split
self.stack[-1] = tagname, nsdict, nstag
# translate namespace of attributes
if self.__use_namespaces:
nattrdict = {}
for key, val in attrdict.items():
res = qname.match(key)
if res is not None:
aprefix, key = res.group('prefix', 'local')
if self.__map_case:
key = string.lower(key)
if aprefix is None:
aprefix = ''
ans = None
for t, d, nst in self.stack:
if d.has_key(aprefix):
ans = d[aprefix]
if ans is None and aprefix != '':
ans = self.__namespaces.get(aprefix)
if ans is not None:
key = ans + ' ' + key
elif aprefix != '':
key = aprefix + ':' + key
elif ns is not None:
key = ns + ' ' + key
nattrdict[key] = val
attrdict = nattrdict
attributes = self.attributes.get(nstag)
if attributes is not None:
for key in attrdict.keys():
if not attributes.has_key(key):
self.syntax_error("unknown attribute `%s' in tag `%s'" % (key, tagname))
for key, val in attributes.items():
if val is not None and not attrdict.has_key(key):
attrdict[key] = val
method = self.elements.get(nstag, (None, None))[0]
self.finish_starttag(nstag, attrdict, method)
if tag.group('slash') == '/':
self.finish_endtag(tagname)
return tag.end(0)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
res = tagfind.match(rawdata, i+2)
if res is None:
if self.literal:
self.handle_data(rawdata[i])
return i+1
if not self.__accept_missing_endtag_name:
self.syntax_error('no name specified in end tag')
tag = self.stack[-1][0]
k = i+2
else:
tag = res.group(0)
if self.__map_case:
tag = string.lower(tag)
if self.literal:
if not self.stack or tag != self.stack[-1][0]:
self.handle_data(rawdata[i])
return i+1
self.literal = 0
k = res.end(0)
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in end tag')
self.finish_endtag(tag)
return end.end(0)
# Internal -- finish processing of start tag
def finish_starttag(self, tagname, attrdict, method):
if method is not None:
self.handle_starttag(tagname, method, attrdict)
else:
self.unknown_starttag(tagname, attrdict)
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
self.syntax_error('name-less end tag')
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
found = -1
for i in range(len(self.stack)):
if tag == self.stack[i][0]:
found = i
if found == -1:
self.syntax_error('unopened end tag')
return
while len(self.stack) > found:
if found < len(self.stack) - 1:
self.syntax_error('missing close tag for %s' % self.stack[-1][2])
nstag = self.stack[-1][2]
method = self.elements.get(nstag, (None, None))[1]
if method is not None:
self.handle_endtag(nstag, method)
else:
self.unknown_endtag(nstag)
if self.__use_namespaces == len(self.stack):
self.__use_namespaces = 0
del self.stack[-1]
# Overridable -- handle xml processing instruction
def handle_xml(self, encoding, standalone):
pass
# Overridable -- handle DOCTYPE
def handle_doctype(self, tag, pubid, syslit, data):
pass
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
if name[0] == 'x':
n = string.atoi(name[1:], 16)
else:
n = string.atoi(name)
except string.atoi_error:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = {'lt': '<', # must use charref
'gt': '>',
'amp': '&', # must use charref
'quot': '"',
'apos': ''',
}
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle cdata, could be overridden
def handle_cdata(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle processing instructions, could be overridden
def handle_proc(self, name, data):
pass
# Example -- handle relatively harmless syntax errors, could be overridden
def syntax_error(self, message):
raise RuntimeError, 'Syntax error at line %d: %s' % (self.lineno, message)
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, name):
self.syntax_error("reference to unknown entity `&%s;'" % name)
class TestXMLParser(XMLParser):
def __init__(self, **kw):
self.testdata = ""
apply(XMLParser.__init__, (self,), kw)
def handle_xml(self, encoding, standalone):
self.flush()
print 'xml: encoding =',encoding,'standalone =',standalone
def handle_doctype(self, tag, pubid, syslit, data):
self.flush()
print 'DOCTYPE:',tag, `data`
def handle_data(self, data):
self.testdata = self.testdata + data
if len(`self.testdata`) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', `data`
def handle_cdata(self, data):
self.flush()
print 'cdata:', `data`
def handle_proc(self, name, data):
self.flush()
print 'processing:',name,`data`
def handle_comment(self, data):
self.flush()
r = `data`
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def syntax_error(self, message):
print 'error at line %d:' % self.lineno, message
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs.items():
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def close(self):
XMLParser.close(self)
self.flush()
def test(args = None):
import sys, getopt
from time import time
if not args:
args = sys.argv[1:]
opts, args = getopt.getopt(args, 'st')
klass = TestXMLParser
do_time = 0
for o, a in opts:
if o == '-s':
klass = XMLParser
elif o == '-t':
do_time = 1
if args:
file = args[0]
else:
file = 'test.xml'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
t0 = time()
try:
if do_time:
x.feed(data)
x.close()
else:
for c in data:
x.feed(c)
x.close()
except RuntimeError, msg:
t1 = time()
print msg
if do_time:
print 'total time: %g' % (t1-t0)
sys.exit(1)
t1 = time()
if do_time:
print 'total time: %g' % (t1-t0)
if __name__ == '__main__':
test()
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.0/Lib/xmllib.py",
"copies": "4",
"size": "34935",
"license": "mit",
"hash": 2498084487694161400,
"line_mean": 36.9315960912,
"line_max": 109,
"alpha_frac": 0.4851009017,
"autogenerated": false,
"ratio": 4.076429404900817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6561530306600817,
"avg_score": null,
"num_lines": null
} |
"""A parser for XML, using the derived class as static DTD."""
# Author: Sjoerd Mullender.
import re
import string
version = '0.3'
class Error(RuntimeError):
pass
# Regular expressions used for parsing
_S = '[ \t\r\n]+' # white space
_opS = '[ \t\r\n]*' # optional white space
_Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*' # valid XML name
_QStr = "(?:'[^']*'|\"[^\"]*\")" # quoted XML string
illegal = re.compile('[^\t\r\n -\176\240-\377]') # illegal chars in content
interesting = re.compile('[]&<]')
amp = re.compile('&')
ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+)[^-a-zA-Z0-9._:]')
entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]')
charref = re.compile('&#(?P<char>[0-9]+[^0-9]|x[0-9a-fA-F]+[^0-9a-fA-F])')
space = re.compile(_S + '$')
newline = re.compile('\n')
attrfind = re.compile(
_S + '(?P<name>' + _Name + ')'
'(' + _opS + '=' + _opS +
'(?P<value>'+_QStr+'|[-a-zA-Z0-9.:+*%?!\(\)_#=~]+))?')
starttagopen = re.compile('<' + _Name)
starttagend = re.compile(_opS + '(?P<slash>/?)>')
starttagmatch = re.compile('<(?P<tagname>'+_Name+')'
'(?P<attrs>(?:'+attrfind.pattern+')*)'+
starttagend.pattern)
endtagopen = re.compile('</')
endbracket = re.compile(_opS + '>')
endbracketfind = re.compile('(?:[^>\'"]|'+_QStr+')*>')
tagfind = re.compile(_Name)
cdataopen = re.compile(r'<!\[CDATA\[')
cdataclose = re.compile(r'\]\]>')
# this matches one of the following:
# SYSTEM SystemLiteral
# PUBLIC PubidLiteral SystemLiteral
_SystemLiteral = '(?P<%s>'+_QStr+')'
_PublicLiteral = '(?P<%s>"[-\'\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*"|' \
"'[-\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*')"
_ExternalId = '(?:SYSTEM|' \
'PUBLIC'+_S+_PublicLiteral%'pubid'+ \
')'+_S+_SystemLiteral%'syslit'
doctype = re.compile('<!DOCTYPE'+_S+'(?P<name>'+_Name+')'
'(?:'+_S+_ExternalId+')?'+_opS)
xmldecl = re.compile('<\?xml'+_S+
'version'+_opS+'='+_opS+'(?P<version>'+_QStr+')'+
'(?:'+_S+'encoding'+_opS+'='+_opS+
"(?P<encoding>'[A-Za-z][-A-Za-z0-9._]*'|"
'"[A-Za-z][-A-Za-z0-9._]*"))?'
'(?:'+_S+'standalone'+_opS+'='+_opS+
'(?P<standalone>\'(?:yes|no)\'|"(?:yes|no)"))?'+
_opS+'\?>')
procopen = re.compile(r'<\?(?P<proc>' + _Name + ')' + _opS)
procclose = re.compile(_opS + r'\?>')
commentopen = re.compile('<!--')
commentclose = re.compile('-->')
doubledash = re.compile('--')
attrtrans = string.maketrans(' \r\n\t', ' ')
# definitions for XML namespaces
_NCName = '[a-zA-Z_][-a-zA-Z0-9._]*' # XML Name, minus the ":"
ncname = re.compile(_NCName + '$')
qname = re.compile('(?:(?P<prefix>' + _NCName + '):)?' # optional prefix
'(?P<local>' + _NCName + ')$')
xmlns = re.compile('xmlns(?::(?P<ncname>'+_NCName+'))?$')
# XML parser base class -- find tags and call handler functions.
# Usage: p = XMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods with
# special names to handle tags: start_foo and end_foo to handle <foo>
# and </foo>, respectively. The data between tags is passed to the
# parser by calling self.handle_data() with some data as argument (the
# data may be split up in arbitrary chunks).
class XMLParser:
attributes = {} # default, to be overridden
elements = {} # default, to be overridden
# parsing options, settable using keyword args in __init__
__accept_unquoted_attributes = 0
__accept_missing_endtag_name = 0
__map_case = 0
__accept_utf8 = 0
__translate_attribute_references = 1
# Interface -- initialize and reset this instance
def __init__(self, **kw):
self.__fixed = 0
if 'accept_unquoted_attributes' in kw:
self.__accept_unquoted_attributes = kw['accept_unquoted_attributes']
if 'accept_missing_endtag_name' in kw:
self.__accept_missing_endtag_name = kw['accept_missing_endtag_name']
if 'map_case' in kw:
self.__map_case = kw['map_case']
if 'accept_utf8' in kw:
self.__accept_utf8 = kw['accept_utf8']
if 'translate_attribute_references' in kw:
self.__translate_attribute_references = kw['translate_attribute_references']
self.reset()
def __fixelements(self):
self.__fixed = 1
self.elements = {}
self.__fixdict(self.__dict__)
self.__fixclass(self.__class__)
def __fixclass(self, kl):
self.__fixdict(kl.__dict__)
for k in kl.__bases__:
self.__fixclass(k)
def __fixdict(self, dict):
for key in dict.keys():
if key[:6] == 'start_':
tag = key[6:]
start, end = self.elements.get(tag, (None, None))
if start is None:
self.elements[tag] = getattr(self, key), end
elif key[:4] == 'end_':
tag = key[4:]
start, end = self.elements.get(tag, (None, None))
if end is None:
self.elements[tag] = start, getattr(self, key)
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.nomoretags = 0
self.literal = 0
self.lineno = 1
self.__at_start = 1
self.__seen_doctype = None
self.__seen_starttag = 0
self.__use_namespaces = 0
self.__namespaces = {'xml':None} # xml is implicitly declared
# backward compatibility hack: if elements not overridden,
# fill it in ourselves
if self.elements is XMLParser.elements:
self.__fixelements()
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data):
self.rawdata = self.rawdata + data
self.goahead(0)
# Interface -- handle the remaining data
def close(self):
self.goahead(1)
if self.__fixed:
self.__fixed = 0
# remove self.elements so that we don't leak
del self.elements
# Interface -- translate references
def translate_references(self, data, all = 1):
if not self.__translate_attribute_references:
return data
i = 0
while 1:
res = amp.search(data, i)
if res is None:
return data
s = res.start(0)
res = ref.match(data, s)
if res is None:
self.syntax_error("bogus `&'")
i = s+1
continue
i = res.end(0)
str = res.group(1)
rescan = 0
if str[0] == '#':
if str[1] == 'x':
str = chr(int(str[2:], 16))
else:
str = chr(int(str[1:]))
if data[i - 1] != ';':
self.syntax_error("`;' missing after char reference")
i = i-1
elif all:
if str in self.entitydefs:
str = self.entitydefs[str]
rescan = 1
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
else:
self.syntax_error("reference to unknown entity `&%s;'" % str)
str = '&' + str + ';'
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
# when we get here, str contains the translated text and i points
# to the end of the string that is to be replaced
data = data[:s] + str + data[i:]
if rescan:
i = s
else:
i = s + len(str)
# Interface - return a dictionary of all namespaces currently valid
def getnamespace(self):
nsdict = {}
for t, d, nst in self.stack:
nsdict.update(d)
return nsdict
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if i > 0:
self.__at_start = 0
if self.nomoretags:
data = rawdata[i:n]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = n
break
res = interesting.search(rawdata, i)
if res:
j = res.start(0)
else:
j = n
if i < j:
data = rawdata[i:j]
if self.__at_start and space.match(data) is None:
self.syntax_error('illegal data at start of file')
self.__at_start = 0
if not self.stack and space.match(data) is None:
self.syntax_error('data not in content')
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
self.__seen_starttag = 1
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if commentopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_comment(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if cdataopen.match(rawdata, i):
k = self.parse_cdata(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = xmldecl.match(rawdata, i)
if res:
if not self.__at_start:
self.syntax_error("<?xml?> declaration not at start of document")
version, encoding, standalone = res.group('version',
'encoding',
'standalone')
if version[1:-1] != '1.0':
raise Error('only XML version 1.0 supported')
if encoding: encoding = encoding[1:-1]
if standalone: standalone = standalone[1:-1]
self.handle_xml(encoding, standalone)
i = res.end(0)
continue
res = procopen.match(rawdata, i)
if res:
k = self.parse_proc(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = doctype.match(rawdata, i)
if res:
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
if self.__seen_doctype:
self.syntax_error('multiple DOCTYPE elements')
if self.__seen_starttag:
self.syntax_error('DOCTYPE not at beginning of document')
k = self.parse_doctype(res)
if k < 0: break
self.__seen_doctype = res.group('name')
if self.__map_case:
self.__seen_doctype = self.__seen_doctype.lower()
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
elif rawdata[i] == '&':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
res = charref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in charref")
i = i-1
if not self.stack:
self.syntax_error('data not in content')
self.handle_charref(res.group('char')[:-1])
self.lineno = self.lineno + res.group(0).count('\n')
continue
res = entityref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in entityref")
i = i-1
name = res.group('name')
if self.__map_case:
name = name.lower()
if name in self.entitydefs:
self.rawdata = rawdata = rawdata[:res.start(0)] + self.entitydefs[name] + rawdata[i:]
n = len(rawdata)
i = res.start(0)
else:
self.unknown_entityref(name)
self.lineno = self.lineno + res.group(0).count('\n')
continue
elif rawdata[i] == ']':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
if n-i < 3:
break
if cdataclose.match(rawdata, i):
self.syntax_error("bogus `]]>'")
self.handle_data(rawdata[i])
i = i+1
continue
else:
raise Error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
break
# end while
if i > 0:
self.__at_start = 0
if end and i < n:
data = rawdata[i]
self.syntax_error("bogus `%s'" % data)
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
self.rawdata = rawdata[i+1:]
return self.goahead(end)
self.rawdata = rawdata[i:]
if end:
if not self.__seen_starttag:
self.syntax_error('no elements in file')
if self.stack:
self.syntax_error('missing end tags')
while self.stack:
self.finish_endtag(self.stack[-1][0])
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
raise Error('unexpected call to handle_comment')
res = commentclose.search(rawdata, i+4)
if res is None:
return -1
if doubledash.search(rawdata, i+4, res.start(0)):
self.syntax_error("`--' inside comment")
if rawdata[res.start(0)-1] == '-':
self.syntax_error('comment cannot end in three dashes')
if not self.__accept_utf8 and \
illegal.search(rawdata, i+4, res.start(0)):
self.syntax_error('illegal character in comment')
self.handle_comment(rawdata[i+4: res.start(0)])
return res.end(0)
# Internal -- handle DOCTYPE tag, return length or -1 if not terminated
def parse_doctype(self, res):
rawdata = self.rawdata
n = len(rawdata)
name = res.group('name')
if self.__map_case:
name = name.lower()
pubid, syslit = res.group('pubid', 'syslit')
if pubid is not None:
pubid = pubid[1:-1] # remove quotes
pubid = ' '.join(pubid.split()) # normalize
if syslit is not None: syslit = syslit[1:-1] # remove quotes
j = k = res.end(0)
if k >= n:
return -1
if rawdata[k] == '[':
level = 0
k = k+1
dq = sq = 0
while k < n:
c = rawdata[k]
if not sq and c == '"':
dq = not dq
elif not dq and c == "'":
sq = not sq
elif sq or dq:
pass
elif level <= 0 and c == ']':
res = endbracket.match(rawdata, k+1)
if res is None:
return -1
self.handle_doctype(name, pubid, syslit, rawdata[j+1:k])
return res.end(0)
elif c == '<':
level = level + 1
elif c == '>':
level = level - 1
if level < 0:
self.syntax_error("bogus `>' in DOCTYPE")
k = k+1
res = endbracketfind.match(rawdata, k)
if res is None:
return -1
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in DOCTYPE')
self.handle_doctype(name, pubid, syslit, None)
return res.end(0)
# Internal -- handle CDATA tag, return length or -1 if not terminated
def parse_cdata(self, i):
rawdata = self.rawdata
if rawdata[i:i+9] != '<![CDATA[':
raise Error('unexpected call to parse_cdata')
res = cdataclose.search(rawdata, i+9)
if res is None:
return -1
if not self.__accept_utf8 and \
illegal.search(rawdata, i+9, res.start(0)):
self.syntax_error('illegal character in CDATA')
if not self.stack:
self.syntax_error('CDATA not in content')
self.handle_cdata(rawdata[i+9:res.start(0)])
return res.end(0)
__xml_namespace_attributes = {'ns':None, 'src':None, 'prefix':None}
# Internal -- handle a processing instruction tag
def parse_proc(self, i):
rawdata = self.rawdata
end = procclose.search(rawdata, i)
if end is None:
return -1
j = end.start(0)
if not self.__accept_utf8 and illegal.search(rawdata, i+2, j):
self.syntax_error('illegal character in processing instruction')
res = tagfind.match(rawdata, i+2)
if res is None:
raise Error('unexpected call to parse_proc')
k = res.end(0)
name = res.group(0)
if self.__map_case:
name = name.lower()
if name == 'xml:namespace':
self.syntax_error('old-fashioned namespace declaration')
self.__use_namespaces = -1
# namespace declaration
# this must come after the <?xml?> declaration (if any)
# and before the <!DOCTYPE> (if any).
if self.__seen_doctype or self.__seen_starttag:
self.syntax_error('xml:namespace declaration too late in document')
attrdict, namespace, k = self.parse_attributes(name, k, j)
if namespace:
self.syntax_error('namespace declaration inside namespace declaration')
for attrname in attrdict.keys():
if not attrname in self.__xml_namespace_attributes:
self.syntax_error("unknown attribute `%s' in xml:namespace tag" % attrname)
if not 'ns' in attrdict or not 'prefix' in attrdict:
self.syntax_error('xml:namespace without required attributes')
prefix = attrdict.get('prefix')
if ncname.match(prefix) is None:
self.syntax_error('xml:namespace illegal prefix value')
return end.end(0)
if prefix in self.__namespaces:
self.syntax_error('xml:namespace prefix not unique')
self.__namespaces[prefix] = attrdict['ns']
else:
if name.lower() == 'xml':
self.syntax_error('illegal processing instruction target name')
self.handle_proc(name, rawdata[k:j])
return end.end(0)
# Internal -- parse attributes between i and j
def parse_attributes(self, tag, i, j):
rawdata = self.rawdata
attrdict = {}
namespace = {}
while i < j:
res = attrfind.match(rawdata, i)
if res is None:
break
attrname, attrvalue = res.group('name', 'value')
if self.__map_case:
attrname = attrname.lower()
i = res.end(0)
if attrvalue is None:
self.syntax_error("no value specified for attribute `%s'" % attrname)
attrvalue = attrname
elif attrvalue[:1] == "'" == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
elif not self.__accept_unquoted_attributes:
self.syntax_error("attribute `%s' value not quoted" % attrname)
res = xmlns.match(attrname)
if res is not None:
# namespace declaration
ncname = res.group('ncname')
namespace[ncname or ''] = attrvalue or None
if not self.__use_namespaces:
self.__use_namespaces = len(self.stack)+1
continue
if '<' in attrvalue:
self.syntax_error("`<' illegal in attribute value")
if attrname in attrdict:
self.syntax_error("attribute `%s' specified twice" % attrname)
attrvalue = attrvalue.translate(attrtrans)
attrdict[attrname] = self.translate_references(attrvalue)
return attrdict, namespace, i
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
rawdata = self.rawdata
# i points to start of tag
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
tag = starttagmatch.match(rawdata, i)
if tag is None or tag.end(0) != end.end(0):
self.syntax_error('garbage in starttag')
return end.end(0)
nstag = tagname = tag.group('tagname')
if self.__map_case:
nstag = tagname = nstag.lower()
if not self.__seen_starttag and self.__seen_doctype and \
tagname != self.__seen_doctype:
self.syntax_error('starttag does not match DOCTYPE')
if self.__seen_starttag and not self.stack:
self.syntax_error('multiple elements on top level')
k, j = tag.span('attrs')
attrdict, nsdict, k = self.parse_attributes(tagname, k, j)
self.stack.append((tagname, nsdict, nstag))
if self.__use_namespaces:
res = qname.match(tagname)
else:
res = None
if res is not None:
prefix, nstag = res.group('prefix', 'local')
if prefix is None:
prefix = ''
ns = None
for t, d, nst in self.stack:
if prefix in d:
ns = d[prefix]
if ns is None and prefix != '':
ns = self.__namespaces.get(prefix)
if ns is not None:
nstag = ns + ' ' + nstag
elif prefix != '':
nstag = prefix + ':' + nstag # undo split
self.stack[-1] = tagname, nsdict, nstag
# translate namespace of attributes
attrnamemap = {} # map from new name to old name (used for error reporting)
for key in attrdict.keys():
attrnamemap[key] = key
if self.__use_namespaces:
nattrdict = {}
for key, val in attrdict.items():
okey = key
res = qname.match(key)
if res is not None:
aprefix, key = res.group('prefix', 'local')
if self.__map_case:
key = key.lower()
if aprefix is not None:
ans = None
for t, d, nst in self.stack:
if aprefix in d:
ans = d[aprefix]
if ans is None:
ans = self.__namespaces.get(aprefix)
if ans is not None:
key = ans + ' ' + key
else:
key = aprefix + ':' + key
nattrdict[key] = val
attrnamemap[key] = okey
attrdict = nattrdict
attributes = self.attributes.get(nstag)
if attributes is not None:
for key in attrdict.keys():
if not key in attributes:
self.syntax_error("unknown attribute `%s' in tag `%s'" % (attrnamemap[key], tagname))
for key, val in attributes.items():
if val is not None and not key in attrdict:
attrdict[key] = val
method = self.elements.get(nstag, (None, None))[0]
self.finish_starttag(nstag, attrdict, method)
if tag.group('slash') == '/':
self.finish_endtag(tagname)
return tag.end(0)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
res = tagfind.match(rawdata, i+2)
if res is None:
if self.literal:
self.handle_data(rawdata[i])
return i+1
if not self.__accept_missing_endtag_name:
self.syntax_error('no name specified in end tag')
tag = self.stack[-1][0]
k = i+2
else:
tag = res.group(0)
if self.__map_case:
tag = tag.lower()
if self.literal:
if not self.stack or tag != self.stack[-1][0]:
self.handle_data(rawdata[i])
return i+1
k = res.end(0)
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in end tag')
self.finish_endtag(tag)
return end.end(0)
# Internal -- finish processing of start tag
def finish_starttag(self, tagname, attrdict, method):
if method is not None:
self.handle_starttag(tagname, method, attrdict)
else:
self.unknown_starttag(tagname, attrdict)
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
self.literal = 0
if not tag:
self.syntax_error('name-less end tag')
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
found = -1
for i in range(len(self.stack)):
if tag == self.stack[i][0]:
found = i
if found == -1:
self.syntax_error('unopened end tag')
return
while len(self.stack) > found:
if found < len(self.stack) - 1:
self.syntax_error('missing close tag for %s' % self.stack[-1][2])
nstag = self.stack[-1][2]
method = self.elements.get(nstag, (None, None))[1]
if method is not None:
self.handle_endtag(nstag, method)
else:
self.unknown_endtag(nstag)
if self.__use_namespaces == len(self.stack):
self.__use_namespaces = 0
del self.stack[-1]
# Overridable -- handle xml processing instruction
def handle_xml(self, encoding, standalone):
pass
# Overridable -- handle DOCTYPE
def handle_doctype(self, tag, pubid, syslit, data):
pass
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
if name[0] == 'x':
n = int(name[1:], 16)
else:
n = int(name)
except ValueError:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = {'lt': '<', # must use charref
'gt': '>',
'amp': '&', # must use charref
'quot': '"',
'apos': ''',
}
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle cdata, could be overridden
def handle_cdata(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle processing instructions, could be overridden
def handle_proc(self, name, data):
pass
# Example -- handle relatively harmless syntax errors, could be overridden
def syntax_error(self, message):
raise Error('Syntax error at line %d: %s' % (self.lineno, message))
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, name):
self.syntax_error("reference to unknown entity `&%s;'" % name)
class TestXMLParser(XMLParser):
def __init__(self, **kw):
self.testdata = ""
XMLParser.__init__(self, **kw)
def handle_xml(self, encoding, standalone):
self.flush()
print 'xml: encoding =',encoding,'standalone =',standalone
def handle_doctype(self, tag, pubid, syslit, data):
self.flush()
print 'DOCTYPE:',tag, repr(data)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_cdata(self, data):
self.flush()
print 'cdata:', repr(data)
def handle_proc(self, name, data):
self.flush()
print 'processing:',name,repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def syntax_error(self, message):
print 'error at line %d:' % self.lineno, message
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs.items():
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def close(self):
XMLParser.close(self)
self.flush()
def test(args = None):
import sys, getopt
from time import time
if not args:
args = sys.argv[1:]
opts, args = getopt.getopt(args, 'st')
klass = TestXMLParser
do_time = 0
for o, a in opts:
if o == '-s':
klass = XMLParser
elif o == '-t':
do_time = 1
if args:
file = args[0]
else:
file = 'test.xml'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
t0 = time()
try:
if do_time:
x.feed(data)
x.close()
else:
for c in data:
x.feed(c)
x.close()
except Error, msg:
t1 = time()
print msg
if do_time:
print 'total time: %g' % (t1-t0)
sys.exit(1)
t1 = time()
if do_time:
print 'total time: %g' % (t1-t0)
if __name__ == '__main__':
test()
| {
"repo_name": "inessadl/kinect-2-libras",
"path": "Kinect2Libras/KinectFingerTracking/Lib/xmllib.py",
"copies": "41",
"size": "34728",
"license": "apache-2.0",
"hash": -977224960648606500,
"line_mean": 36.5437837838,
"line_max": 109,
"alpha_frac": 0.483010827,
"autogenerated": false,
"ratio": 4.101570804299043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# sgmlop support added by fredrik@pythonware.com (May 19, 1998)
__version__=''' $Id: xmllib.py 3342 2008-12-12 15:55:34Z andy $ '''
__doc__='''From before xmllib was in the Python standard library.
Probably ought to be removed'''
import re
import string
try:
import sgmlop # this works for both builtin on the path or relative
except ImportError:
sgmlop = None
# standard entity defs
ENTITYDEFS = {
'lt': '<',
'gt': '>',
'amp': '&',
'quot': '"',
'apos': '\''
}
# XML parser base class -- find tags and call handler functions.
# Usage: p = XMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods with
# special names to handle tags: start_foo and end_foo to handle <foo>
# and </foo>, respectively. The data between tags is passed to the
# parser by calling self.handle_data() with some data as argument (the
# data may be split up in arbutrary chunks). Entity references are
# passed by calling self.handle_entityref() with the entity reference
# as argument.
# --------------------------------------------------------------------
# original re-based XML parser
_S = '[ \t\r\n]+'
_opS = '[ \t\r\n]*'
_Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*'
interesting = re.compile('[&<]')
incomplete = re.compile('&(' + _Name + '|#[0-9]*|#x[0-9a-fA-F]*)?|'
'<([a-zA-Z_:][^<>]*|'
'/([a-zA-Z_:][^<>]*)?|'
'![^<>]*|'
'\?[^<>]*)?')
ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+);?')
entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]')
charref = re.compile('&#(?P<char>[0-9]+[^0-9]|x[0-9a-fA-F]+[^0-9a-fA-F])')
space = re.compile(_S)
newline = re.compile('\n')
starttagopen = re.compile('<' + _Name)
endtagopen = re.compile('</')
starttagend = re.compile(_opS + '(?P<slash>/?)>')
endbracket = re.compile('>')
tagfind = re.compile(_Name)
cdataopen = re.compile('<!\[CDATA\[')
cdataclose = re.compile('\]\]>')
special = re.compile('<!(?P<special>[^<>]*)>')
procopen = re.compile('<\?(?P<proc>' + _Name + ')' + _S)
procclose = re.compile('\?>')
commentopen = re.compile('<!--')
commentclose = re.compile('-->')
doubledash = re.compile('--')
attrfind = re.compile(
_opS + '(?P<name>' + _Name + ')'
'(' + _opS + '=' + _opS +
'(?P<value>\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9.:+*%?!()_#=~]+))')
class SlowXMLParser:
# Interface -- initialize and reset this instance
def __init__(self, verbose=0):
self.verbose = verbose
self.reset()
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
self.lineno = 1
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data):
self.rawdata = self.rawdata + data
self.goahead(0)
# Interface -- handle the remaining data
def close(self):
self.goahead(1)
# Interface -- translate references
def translate_references(self, data):
newdata = []
i = 0
while 1:
res = ref.search(data, i)
if res is None:
newdata.append(data[i:])
return string.join(newdata, '')
if data[res.end(0) - 1] != ';':
self.syntax_error(self.lineno,
'; missing after entity/char reference')
newdata.append(data[i:res.start(0)])
str = res.group(1)
if str[0] == '#':
if str[1] == 'x':
newdata.append(chr(string.atoi(str[2:], 16)))
else:
newdata.append(chr(string.atoi(str[1:])))
else:
try:
newdata.append(self.entitydefs[str])
except KeyError:
# can't do it, so keep the entity ref in
newdata.append('&' + str + ';')
i = res.end(0)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
data = rawdata[i:n]
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = n
break
res = interesting.search(rawdata, i)
if res:
j = res.start(0)
else:
j = n
if i < j:
data = rawdata[i:j]
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
i = k
self.literal = 0
continue
if commentopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = i+1
continue
k = self.parse_comment(i)
if k < 0: break
self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
i = k
continue
if cdataopen.match(rawdata, i):
k = self.parse_cdata(i)
if k < 0: break
self.lineno = self.lineno + string.count(rawdata[i:i], '\n')
i = k
continue
res = procopen.match(rawdata, i)
if res:
k = self.parse_proc(i, res)
if k < 0: break
self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
i = k
continue
res = special.match(rawdata, i)
if res:
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = i+1
continue
self.handle_special(res.group('special'))
self.lineno = self.lineno + string.count(res.group(0), '\n')
i = res.end(0)
continue
elif rawdata[i] == '&':
res = charref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error(self.lineno, '; missing in charref')
i = i-1
self.handle_charref(res.group('char')[:-1])
self.lineno = self.lineno + string.count(res.group(0), '\n')
continue
res = entityref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error(self.lineno, '; missing in entityref')
i = i-1
self.handle_entityref(res.group('name'))
self.lineno = self.lineno + string.count(res.group(0), '\n')
continue
else:
raise RuntimeError, 'neither < nor & ??'
# We get here only if incomplete matches but
# nothing else
res = incomplete.match(rawdata, i)
if not res:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = i+1
continue
j = res.end(0)
if j == n:
break # Really incomplete
self.syntax_error(self.lineno, 'bogus < or &')
data = res.group(0)
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = j
# end while
if end and i < n:
data = rawdata[i:n]
self.handle_data(data)
self.lineno = self.lineno + string.count(data, '\n')
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
if rawdata[i:i+4] <> '<!--':
raise RuntimeError, 'unexpected call to handle_comment'
res = commentclose.search(rawdata, i+4)
if not res:
return -1
# doubledash search will succeed because it's a subset of commentclose
if doubledash.search(rawdata, i+4).start(0) < res.start(0):
self.syntax_error(self.lineno, "`--' inside comment")
self.handle_comment(rawdata[i+4: res.start(0)])
return res.end(0)
# Internal -- handle CDATA tag, return lenth or -1 if not terminated
def parse_cdata(self, i):
rawdata = self.rawdata
if rawdata[i:i+9] <> '<![CDATA[':
raise RuntimeError, 'unexpected call to handle_cdata'
res = cdataclose.search(rawdata, i+9)
if not res:
return -1
self.handle_cdata(rawdata[i+9:res.start(0)])
return res.end(0)
def parse_proc(self, i, res):
rawdata = self.rawdata
if not res:
raise RuntimeError, 'unexpected call to parse_proc'
name = res.group('proc')
res = procclose.search(rawdata, res.end(0))
if not res:
return -1
self.handle_proc(name, rawdata[res.pos:res.start(0)])
return res.end(0)
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
rawdata = self.rawdata
# i points to start of tag
end = endbracket.search(rawdata, i+1)
if not end:
return -1
j = end.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrdict = {}
res = tagfind.match(rawdata, i+1)
if not res:
raise RuntimeError, 'unexpected call to parse_starttag'
k = res.end(0)
tag = res.group(0)
if hasattr(self, tag + '_attributes'):
attrlist = getattr(self, tag + '_attributes')
else:
attrlist = None
self.lasttag = tag
while k < j:
res = attrfind.match(rawdata, k)
if not res: break
attrname, attrvalue = res.group('name', 'value')
if attrvalue is None:
self.syntax_error(self.lineno, 'no attribute value specified')
attrvalue = attrname
elif attrvalue[:1] == "'" == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
else:
self.syntax_error(self.lineno, 'attribute value not quoted')
if attrlist is not None and attrname not in attrlist:
self.syntax_error(self.lineno,
'unknown attribute %s of element %s' %
(attrname, tag))
if attrdict.has_key(attrname):
self.syntax_error(self.lineno, 'attribute specified twice')
attrdict[attrname] = self.translate_references(attrvalue)
k = res.end(0)
res = starttagend.match(rawdata, k)
if not res:
self.syntax_error(self.lineno, 'garbage in start tag')
self.finish_starttag(tag, attrdict)
if res and res.group('slash') == '/':
self.finish_endtag(tag)
return end.end(0)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
end = endbracket.search(rawdata, i+1)
if not end:
return -1
res = tagfind.match(rawdata, i+2)
if not res:
self.syntax_error(self.lineno, 'no name specified in end tag')
tag = ''
k = i+2
else:
tag = res.group(0)
k = res.end(0)
if k != end.start(0):
# check that there is only white space at end of tag
res = space.match(rawdata, k)
if res is None or res.end(0) != end.start(0):
self.syntax_error(self.lineno, 'garbage in end tag')
self.finish_endtag(tag)
return end.end(0)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
self.stack.append(tag)
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
if name[0] == 'x':
n = string.atoi(name[1:], 16)
else:
n = string.atoi(name)
except string.atoi_error:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = ENTITYDEFS
# Example -- handle entity reference, no need to override
def handle_entityref(self, name):
table = self.entitydefs
if table.has_key(name):
self.handle_data(table[name])
else:
self.unknown_entityref(name)
return
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle cdata, could be overridden
def handle_cdata(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle processing instructions, could be overridden
def handle_proc(self, name, data):
pass
# Example -- handle special instructions, could be overridden
def handle_special(self, data):
pass
# Example -- handle relatively harmless syntax errors, could be overridden
def syntax_error(self, lineno, message):
raise RuntimeError, 'Syntax error at line %d: %s' % (lineno, message)
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
# --------------------------------------------------------------------
# accelerated XML parser
class FastXMLParser:
# Interface -- initialize and reset this instance
def __init__(self, verbose=0):
self.verbose = verbose
self.reset()
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
self.lineno = 1
self.parser = sgmlop.XMLParser()
self.feed = self.parser.feed
self.parser.register(self)
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data): # overridden by reset
self.parser.feed(data)
# Interface -- handle the remaining data
def close(self):
try:
self.parser.close()
finally:
self.parser = None
# Interface -- translate references
def translate_references(self, data):
newdata = []
i = 0
while 1:
res = ref.search(data, i)
if res is None:
newdata.append(data[i:])
return string.join(newdata, '')
if data[res.end(0) - 1] != ';':
self.syntax_error(self.lineno,
'; missing after entity/char reference')
newdata.append(data[i:res.start(0)])
str = res.group(1)
if str[0] == '#':
if str[1] == 'x':
newdata.append(chr(string.atoi(str[2:], 16)))
else:
newdata.append(chr(string.atoi(str[1:])))
else:
try:
newdata.append(self.entitydefs[str])
except KeyError:
# can't do it, so keep the entity ref in
newdata.append('&' + str + ';')
i = res.end(0)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
self.stack.append(tag)
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
if name[0] == 'x':
n = string.atoi(name[1:], 16)
else:
n = string.atoi(name)
except string.atoi_error:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = ENTITYDEFS
# Example -- handle entity reference, no need to override
def handle_entityref(self, name):
table = self.entitydefs
if table.has_key(name):
self.handle_data(table[name])
else:
self.unknown_entityref(name)
return
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle cdata, could be overridden
def handle_cdata(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle processing instructions, could be overridden
def handle_proc(self, name, data):
pass
# Example -- handle special instructions, could be overridden
def handle_special(self, data):
pass
# Example -- handle relatively harmless syntax errors, could be overridden
def syntax_error(self, lineno, message):
raise RuntimeError, 'Syntax error at line %d: %s' % (lineno, message)
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
#sgmlop = None
# pick a suitable parser
if sgmlop:
XMLParser = FastXMLParser
else:
XMLParser = SlowXMLParser
# --------------------------------------------------------------------
# test stuff
class TestXMLParser(XMLParser):
def __init__(self, verbose=0):
self.testdata = ""
XMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(`self.testdata`) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', `data`
def handle_cdata(self, data):
self.flush()
print 'cdata:', `data`
def handle_proc(self, name, data):
self.flush()
print 'processing:',name,`data`
def handle_special(self, data):
self.flush()
print 'special:',`data`
def handle_comment(self, data):
self.flush()
r = `data`
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def syntax_error(self, lineno, message):
print 'error at line %d:' % lineno, message
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs.items():
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def close(self):
XMLParser.close(self)
self.flush()
def test(args = None):
import sys
if not args:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = XMLParser
else:
klass = TestXMLParser
if args:
file = args[0]
else:
file = 'test.xml'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__': #NO_REPORTLAB_TEST
test()
| {
"repo_name": "commtrack/temp-aquatest",
"path": "reportlab/lib/xmllib.py",
"copies": "1",
"size": "26609",
"license": "bsd-3-clause",
"hash": 8476653236130839000,
"line_mean": 32.4230271669,
"line_max": 80,
"alpha_frac": 0.4914502612,
"autogenerated": false,
"ratio": 4.149228130360206,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5140678391560206,
"avg_score": null,
"num_lines": null
} |
"""A parser for XML, using the derived class as static DTD."""
# Author: Sjoerd Mullender.
import re
import string
import warnings
warnings.warn("The xmllib module is obsolete. Use xml.sax instead.",
DeprecationWarning, 2)
del warnings
version = '0.3'
class Error(RuntimeError):
pass
# Regular expressions used for parsing
_S = '[ \t\r\n]+' # white space
_opS = '[ \t\r\n]*' # optional white space
_Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*' # valid XML name
_QStr = "(?:'[^']*'|\"[^\"]*\")" # quoted XML string
illegal = re.compile('[^\t\r\n -\176\240-\377]') # illegal chars in content
interesting = re.compile('[]&<]')
amp = re.compile('&')
ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+)[^-a-zA-Z0-9._:]')
entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]')
charref = re.compile('&#(?P<char>[0-9]+[^0-9]|x[0-9a-fA-F]+[^0-9a-fA-F])')
space = re.compile(_S + '$')
newline = re.compile('\n')
attrfind = re.compile(
_S + '(?P<name>' + _Name + ')'
'(' + _opS + '=' + _opS +
'(?P<value>'+_QStr+'|[-a-zA-Z0-9.:+*%?!\(\)_#=~]+))?')
starttagopen = re.compile('<' + _Name)
starttagend = re.compile(_opS + '(?P<slash>/?)>')
starttagmatch = re.compile('<(?P<tagname>'+_Name+')'
'(?P<attrs>(?:'+attrfind.pattern+')*)'+
starttagend.pattern)
endtagopen = re.compile('</')
endbracket = re.compile(_opS + '>')
endbracketfind = re.compile('(?:[^>\'"]|'+_QStr+')*>')
tagfind = re.compile(_Name)
cdataopen = re.compile(r'<!\[CDATA\[')
cdataclose = re.compile(r'\]\]>')
# this matches one of the following:
# SYSTEM SystemLiteral
# PUBLIC PubidLiteral SystemLiteral
_SystemLiteral = '(?P<%s>'+_QStr+')'
_PublicLiteral = '(?P<%s>"[-\'\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*"|' \
"'[-\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*')"
_ExternalId = '(?:SYSTEM|' \
'PUBLIC'+_S+_PublicLiteral%'pubid'+ \
')'+_S+_SystemLiteral%'syslit'
doctype = re.compile('<!DOCTYPE'+_S+'(?P<name>'+_Name+')'
'(?:'+_S+_ExternalId+')?'+_opS)
xmldecl = re.compile('<\?xml'+_S+
'version'+_opS+'='+_opS+'(?P<version>'+_QStr+')'+
'(?:'+_S+'encoding'+_opS+'='+_opS+
"(?P<encoding>'[A-Za-z][-A-Za-z0-9._]*'|"
'"[A-Za-z][-A-Za-z0-9._]*"))?'
'(?:'+_S+'standalone'+_opS+'='+_opS+
'(?P<standalone>\'(?:yes|no)\'|"(?:yes|no)"))?'+
_opS+'\?>')
procopen = re.compile(r'<\?(?P<proc>' + _Name + ')' + _opS)
procclose = re.compile(_opS + r'\?>')
commentopen = re.compile('<!--')
commentclose = re.compile('-->')
doubledash = re.compile('--')
attrtrans = string.maketrans(' \r\n\t', ' ')
# definitions for XML namespaces
_NCName = '[a-zA-Z_][-a-zA-Z0-9._]*' # XML Name, minus the ":"
ncname = re.compile(_NCName + '$')
qname = re.compile('(?:(?P<prefix>' + _NCName + '):)?' # optional prefix
'(?P<local>' + _NCName + ')$')
xmlns = re.compile('xmlns(?::(?P<ncname>'+_NCName+'))?$')
# XML parser base class -- find tags and call handler functions.
# Usage: p = XMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods with
# special names to handle tags: start_foo and end_foo to handle <foo>
# and </foo>, respectively. The data between tags is passed to the
# parser by calling self.handle_data() with some data as argument (the
# data may be split up in arbitrary chunks).
class XMLParser:
attributes = {} # default, to be overridden
elements = {} # default, to be overridden
# parsing options, settable using keyword args in __init__
__accept_unquoted_attributes = 0
__accept_missing_endtag_name = 0
__map_case = 0
__accept_utf8 = 0
__translate_attribute_references = 1
# Interface -- initialize and reset this instance
def __init__(self, **kw):
self.__fixed = 0
if 'accept_unquoted_attributes' in kw:
self.__accept_unquoted_attributes = kw['accept_unquoted_attributes']
if 'accept_missing_endtag_name' in kw:
self.__accept_missing_endtag_name = kw['accept_missing_endtag_name']
if 'map_case' in kw:
self.__map_case = kw['map_case']
if 'accept_utf8' in kw:
self.__accept_utf8 = kw['accept_utf8']
if 'translate_attribute_references' in kw:
self.__translate_attribute_references = kw['translate_attribute_references']
self.reset()
def __fixelements(self):
self.__fixed = 1
self.elements = {}
self.__fixdict(self.__dict__)
self.__fixclass(self.__class__)
def __fixclass(self, kl):
self.__fixdict(kl.__dict__)
for k in kl.__bases__:
self.__fixclass(k)
def __fixdict(self, dict):
for key in dict.keys():
if key[:6] == 'start_':
tag = key[6:]
start, end = self.elements.get(tag, (None, None))
if start is None:
self.elements[tag] = getattr(self, key), end
elif key[:4] == 'end_':
tag = key[4:]
start, end = self.elements.get(tag, (None, None))
if end is None:
self.elements[tag] = start, getattr(self, key)
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.nomoretags = 0
self.literal = 0
self.lineno = 1
self.__at_start = 1
self.__seen_doctype = None
self.__seen_starttag = 0
self.__use_namespaces = 0
self.__namespaces = {'xml':None} # xml is implicitly declared
# backward compatibility hack: if elements not overridden,
# fill it in ourselves
if self.elements is XMLParser.elements:
self.__fixelements()
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data):
self.rawdata = self.rawdata + data
self.goahead(0)
# Interface -- handle the remaining data
def close(self):
self.goahead(1)
if self.__fixed:
self.__fixed = 0
# remove self.elements so that we don't leak
del self.elements
# Interface -- translate references
def translate_references(self, data, all = 1):
if not self.__translate_attribute_references:
return data
i = 0
while 1:
res = amp.search(data, i)
if res is None:
return data
s = res.start(0)
res = ref.match(data, s)
if res is None:
self.syntax_error("bogus `&'")
i = s+1
continue
i = res.end(0)
str = res.group(1)
rescan = 0
if str[0] == '#':
if str[1] == 'x':
str = chr(int(str[2:], 16))
else:
str = chr(int(str[1:]))
if data[i - 1] != ';':
self.syntax_error("`;' missing after char reference")
i = i-1
elif all:
if str in self.entitydefs:
str = self.entitydefs[str]
rescan = 1
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
else:
self.syntax_error("reference to unknown entity `&%s;'" % str)
str = '&' + str + ';'
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
# when we get here, str contains the translated text and i points
# to the end of the string that is to be replaced
data = data[:s] + str + data[i:]
if rescan:
i = s
else:
i = s + len(str)
# Interface - return a dictionary of all namespaces currently valid
def getnamespace(self):
nsdict = {}
for t, d, nst in self.stack:
nsdict.update(d)
return nsdict
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if i > 0:
self.__at_start = 0
if self.nomoretags:
data = rawdata[i:n]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = n
break
res = interesting.search(rawdata, i)
if res:
j = res.start(0)
else:
j = n
if i < j:
data = rawdata[i:j]
if self.__at_start and space.match(data) is None:
self.syntax_error('illegal data at start of file')
self.__at_start = 0
if not self.stack and space.match(data) is None:
self.syntax_error('data not in content')
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
self.__seen_starttag = 1
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if commentopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_comment(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if cdataopen.match(rawdata, i):
k = self.parse_cdata(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = xmldecl.match(rawdata, i)
if res:
if not self.__at_start:
self.syntax_error("<?xml?> declaration not at start of document")
version, encoding, standalone = res.group('version',
'encoding',
'standalone')
if version[1:-1] != '1.0':
raise Error('only XML version 1.0 supported')
if encoding: encoding = encoding[1:-1]
if standalone: standalone = standalone[1:-1]
self.handle_xml(encoding, standalone)
i = res.end(0)
continue
res = procopen.match(rawdata, i)
if res:
k = self.parse_proc(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = doctype.match(rawdata, i)
if res:
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
if self.__seen_doctype:
self.syntax_error('multiple DOCTYPE elements')
if self.__seen_starttag:
self.syntax_error('DOCTYPE not at beginning of document')
k = self.parse_doctype(res)
if k < 0: break
self.__seen_doctype = res.group('name')
if self.__map_case:
self.__seen_doctype = self.__seen_doctype.lower()
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
elif rawdata[i] == '&':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
res = charref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in charref")
i = i-1
if not self.stack:
self.syntax_error('data not in content')
self.handle_charref(res.group('char')[:-1])
self.lineno = self.lineno + res.group(0).count('\n')
continue
res = entityref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in entityref")
i = i-1
name = res.group('name')
if self.__map_case:
name = name.lower()
if name in self.entitydefs:
self.rawdata = rawdata = rawdata[:res.start(0)] + self.entitydefs[name] + rawdata[i:]
n = len(rawdata)
i = res.start(0)
else:
self.unknown_entityref(name)
self.lineno = self.lineno + res.group(0).count('\n')
continue
elif rawdata[i] == ']':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
if n-i < 3:
break
if cdataclose.match(rawdata, i):
self.syntax_error("bogus `]]>'")
self.handle_data(rawdata[i])
i = i+1
continue
else:
raise Error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
break
# end while
if i > 0:
self.__at_start = 0
if end and i < n:
data = rawdata[i]
self.syntax_error("bogus `%s'" % data)
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
self.rawdata = rawdata[i+1:]
return self.goahead(end)
self.rawdata = rawdata[i:]
if end:
if not self.__seen_starttag:
self.syntax_error('no elements in file')
if self.stack:
self.syntax_error('missing end tags')
while self.stack:
self.finish_endtag(self.stack[-1][0])
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
raise Error('unexpected call to handle_comment')
res = commentclose.search(rawdata, i+4)
if res is None:
return -1
if doubledash.search(rawdata, i+4, res.start(0)):
self.syntax_error("`--' inside comment")
if rawdata[res.start(0)-1] == '-':
self.syntax_error('comment cannot end in three dashes')
if not self.__accept_utf8 and \
illegal.search(rawdata, i+4, res.start(0)):
self.syntax_error('illegal character in comment')
self.handle_comment(rawdata[i+4: res.start(0)])
return res.end(0)
# Internal -- handle DOCTYPE tag, return length or -1 if not terminated
def parse_doctype(self, res):
rawdata = self.rawdata
n = len(rawdata)
name = res.group('name')
if self.__map_case:
name = name.lower()
pubid, syslit = res.group('pubid', 'syslit')
if pubid is not None:
pubid = pubid[1:-1] # remove quotes
pubid = ' '.join(pubid.split()) # normalize
if syslit is not None: syslit = syslit[1:-1] # remove quotes
j = k = res.end(0)
if k >= n:
return -1
if rawdata[k] == '[':
level = 0
k = k+1
dq = sq = 0
while k < n:
c = rawdata[k]
if not sq and c == '"':
dq = not dq
elif not dq and c == "'":
sq = not sq
elif sq or dq:
pass
elif level <= 0 and c == ']':
res = endbracket.match(rawdata, k+1)
if res is None:
return -1
self.handle_doctype(name, pubid, syslit, rawdata[j+1:k])
return res.end(0)
elif c == '<':
level = level + 1
elif c == '>':
level = level - 1
if level < 0:
self.syntax_error("bogus `>' in DOCTYPE")
k = k+1
res = endbracketfind.match(rawdata, k)
if res is None:
return -1
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in DOCTYPE')
self.handle_doctype(name, pubid, syslit, None)
return res.end(0)
# Internal -- handle CDATA tag, return length or -1 if not terminated
def parse_cdata(self, i):
rawdata = self.rawdata
if rawdata[i:i+9] != '<![CDATA[':
raise Error('unexpected call to parse_cdata')
res = cdataclose.search(rawdata, i+9)
if res is None:
return -1
if not self.__accept_utf8 and \
illegal.search(rawdata, i+9, res.start(0)):
self.syntax_error('illegal character in CDATA')
if not self.stack:
self.syntax_error('CDATA not in content')
self.handle_cdata(rawdata[i+9:res.start(0)])
return res.end(0)
__xml_namespace_attributes = {'ns':None, 'src':None, 'prefix':None}
# Internal -- handle a processing instruction tag
def parse_proc(self, i):
rawdata = self.rawdata
end = procclose.search(rawdata, i)
if end is None:
return -1
j = end.start(0)
if not self.__accept_utf8 and illegal.search(rawdata, i+2, j):
self.syntax_error('illegal character in processing instruction')
res = tagfind.match(rawdata, i+2)
if res is None:
raise Error('unexpected call to parse_proc')
k = res.end(0)
name = res.group(0)
if self.__map_case:
name = name.lower()
if name == 'xml:namespace':
self.syntax_error('old-fashioned namespace declaration')
self.__use_namespaces = -1
# namespace declaration
# this must come after the <?xml?> declaration (if any)
# and before the <!DOCTYPE> (if any).
if self.__seen_doctype or self.__seen_starttag:
self.syntax_error('xml:namespace declaration too late in document')
attrdict, namespace, k = self.parse_attributes(name, k, j)
if namespace:
self.syntax_error('namespace declaration inside namespace declaration')
for attrname in attrdict.keys():
if not attrname in self.__xml_namespace_attributes:
self.syntax_error("unknown attribute `%s' in xml:namespace tag" % attrname)
if not 'ns' in attrdict or not 'prefix' in attrdict:
self.syntax_error('xml:namespace without required attributes')
prefix = attrdict.get('prefix')
if ncname.match(prefix) is None:
self.syntax_error('xml:namespace illegal prefix value')
return end.end(0)
if prefix in self.__namespaces:
self.syntax_error('xml:namespace prefix not unique')
self.__namespaces[prefix] = attrdict['ns']
else:
if name.lower() == 'xml':
self.syntax_error('illegal processing instruction target name')
self.handle_proc(name, rawdata[k:j])
return end.end(0)
# Internal -- parse attributes between i and j
def parse_attributes(self, tag, i, j):
rawdata = self.rawdata
attrdict = {}
namespace = {}
while i < j:
res = attrfind.match(rawdata, i)
if res is None:
break
attrname, attrvalue = res.group('name', 'value')
if self.__map_case:
attrname = attrname.lower()
i = res.end(0)
if attrvalue is None:
self.syntax_error("no value specified for attribute `%s'" % attrname)
attrvalue = attrname
elif attrvalue[:1] == "'" == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
elif not self.__accept_unquoted_attributes:
self.syntax_error("attribute `%s' value not quoted" % attrname)
res = xmlns.match(attrname)
if res is not None:
# namespace declaration
ncname = res.group('ncname')
namespace[ncname or ''] = attrvalue or None
if not self.__use_namespaces:
self.__use_namespaces = len(self.stack)+1
continue
if '<' in attrvalue:
self.syntax_error("`<' illegal in attribute value")
if attrname in attrdict:
self.syntax_error("attribute `%s' specified twice" % attrname)
attrvalue = attrvalue.translate(attrtrans)
attrdict[attrname] = self.translate_references(attrvalue)
return attrdict, namespace, i
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
rawdata = self.rawdata
# i points to start of tag
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
tag = starttagmatch.match(rawdata, i)
if tag is None or tag.end(0) != end.end(0):
self.syntax_error('garbage in starttag')
return end.end(0)
nstag = tagname = tag.group('tagname')
if self.__map_case:
nstag = tagname = nstag.lower()
if not self.__seen_starttag and self.__seen_doctype and \
tagname != self.__seen_doctype:
self.syntax_error('starttag does not match DOCTYPE')
if self.__seen_starttag and not self.stack:
self.syntax_error('multiple elements on top level')
k, j = tag.span('attrs')
attrdict, nsdict, k = self.parse_attributes(tagname, k, j)
self.stack.append((tagname, nsdict, nstag))
if self.__use_namespaces:
res = qname.match(tagname)
else:
res = None
if res is not None:
prefix, nstag = res.group('prefix', 'local')
if prefix is None:
prefix = ''
ns = None
for t, d, nst in self.stack:
if prefix in d:
ns = d[prefix]
if ns is None and prefix != '':
ns = self.__namespaces.get(prefix)
if ns is not None:
nstag = ns + ' ' + nstag
elif prefix != '':
nstag = prefix + ':' + nstag # undo split
self.stack[-1] = tagname, nsdict, nstag
# translate namespace of attributes
attrnamemap = {} # map from new name to old name (used for error reporting)
for key in attrdict.keys():
attrnamemap[key] = key
if self.__use_namespaces:
nattrdict = {}
for key, val in attrdict.items():
okey = key
res = qname.match(key)
if res is not None:
aprefix, key = res.group('prefix', 'local')
if self.__map_case:
key = key.lower()
if aprefix is not None:
ans = None
for t, d, nst in self.stack:
if aprefix in d:
ans = d[aprefix]
if ans is None:
ans = self.__namespaces.get(aprefix)
if ans is not None:
key = ans + ' ' + key
else:
key = aprefix + ':' + key
nattrdict[key] = val
attrnamemap[key] = okey
attrdict = nattrdict
attributes = self.attributes.get(nstag)
if attributes is not None:
for key in attrdict.keys():
if not key in attributes:
self.syntax_error("unknown attribute `%s' in tag `%s'" % (attrnamemap[key], tagname))
for key, val in attributes.items():
if val is not None and not key in attrdict:
attrdict[key] = val
method = self.elements.get(nstag, (None, None))[0]
self.finish_starttag(nstag, attrdict, method)
if tag.group('slash') == '/':
self.finish_endtag(tagname)
return tag.end(0)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
res = tagfind.match(rawdata, i+2)
if res is None:
if self.literal:
self.handle_data(rawdata[i])
return i+1
if not self.__accept_missing_endtag_name:
self.syntax_error('no name specified in end tag')
tag = self.stack[-1][0]
k = i+2
else:
tag = res.group(0)
if self.__map_case:
tag = tag.lower()
if self.literal:
if not self.stack or tag != self.stack[-1][0]:
self.handle_data(rawdata[i])
return i+1
k = res.end(0)
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in end tag')
self.finish_endtag(tag)
return end.end(0)
# Internal -- finish processing of start tag
def finish_starttag(self, tagname, attrdict, method):
if method is not None:
self.handle_starttag(tagname, method, attrdict)
else:
self.unknown_starttag(tagname, attrdict)
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
self.literal = 0
if not tag:
self.syntax_error('name-less end tag')
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
found = -1
for i in range(len(self.stack)):
if tag == self.stack[i][0]:
found = i
if found == -1:
self.syntax_error('unopened end tag')
return
while len(self.stack) > found:
if found < len(self.stack) - 1:
self.syntax_error('missing close tag for %s' % self.stack[-1][2])
nstag = self.stack[-1][2]
method = self.elements.get(nstag, (None, None))[1]
if method is not None:
self.handle_endtag(nstag, method)
else:
self.unknown_endtag(nstag)
if self.__use_namespaces == len(self.stack):
self.__use_namespaces = 0
del self.stack[-1]
# Overridable -- handle xml processing instruction
def handle_xml(self, encoding, standalone):
pass
# Overridable -- handle DOCTYPE
def handle_doctype(self, tag, pubid, syslit, data):
pass
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
if name[0] == 'x':
n = int(name[1:], 16)
else:
n = int(name)
except ValueError:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = {'lt': '<', # must use charref
'gt': '>',
'amp': '&', # must use charref
'quot': '"',
'apos': ''',
}
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle cdata, could be overridden
def handle_cdata(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle processing instructions, could be overridden
def handle_proc(self, name, data):
pass
# Example -- handle relatively harmless syntax errors, could be overridden
def syntax_error(self, message):
raise Error('Syntax error at line %d: %s' % (self.lineno, message))
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, name):
self.syntax_error("reference to unknown entity `&%s;'" % name)
class TestXMLParser(XMLParser):
def __init__(self, **kw):
self.testdata = ""
XMLParser.__init__(self, **kw)
def handle_xml(self, encoding, standalone):
self.flush()
print 'xml: encoding =',encoding,'standalone =',standalone
def handle_doctype(self, tag, pubid, syslit, data):
self.flush()
print 'DOCTYPE:',tag, repr(data)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_cdata(self, data):
self.flush()
print 'cdata:', repr(data)
def handle_proc(self, name, data):
self.flush()
print 'processing:',name,repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def syntax_error(self, message):
print 'error at line %d:' % self.lineno, message
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs.items():
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def close(self):
XMLParser.close(self)
self.flush()
def test(args = None):
import sys, getopt
from time import time
if not args:
args = sys.argv[1:]
opts, args = getopt.getopt(args, 'st')
klass = TestXMLParser
do_time = 0
for o, a in opts:
if o == '-s':
klass = XMLParser
elif o == '-t':
do_time = 1
if args:
file = args[0]
else:
file = 'test.xml'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
t0 = time()
try:
if do_time:
x.feed(data)
x.close()
else:
for c in data:
x.feed(c)
x.close()
except Error, msg:
t1 = time()
print msg
if do_time:
print 'total time: %g' % (t1-t0)
sys.exit(1)
t1 = time()
if do_time:
print 'total time: %g' % (t1-t0)
if __name__ == '__main__':
test()
| {
"repo_name": "diegocortassa/TACTIC",
"path": "src/context/client/tactic-api-python-4.0.api04/Lib/xmllib.py",
"copies": "25",
"size": "35795",
"license": "epl-1.0",
"hash": -8053630126297608000,
"line_mean": 36.4892473118,
"line_max": 109,
"alpha_frac": 0.4713228104,
"autogenerated": false,
"ratio": 4.187529246607394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A parser object for Psipred Ss2 files"""
from __future__ import print_function
__author__ = "Felix Simkovic & Jens Thomas"
__date__ = "13 Jan 2016"
__version__ = "0.1"
import collections
import logging
import warnings
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class PsipredSs2Parser(object):
"""Parser for psipred ss2 file"""
def __init__(self, ss2file=None):
self.residues = None
if ss2file:
self.parse(ss2file)
@property
def secondary_structure(self):
"""The secondary structure
Returns
-------
str
The secondary structure one-letter codes
"""
if not self.residues:
return None
return "".join([i.ss for i in self.residues])
def parse(self, ss2file):
"""Parse a secondary structure file
Parameters
----------
ss2file : str
The path to the Psipred ss2 file
"""
PSIPredResidueInfo = collections.namedtuple(
"PSIPredResidueInfo", ["rank", "residue", "ss", "coil", "helix", "strand"]
)
residues = []
with open(ss2file, 'r') as fhin:
for line in iter(fhin.readline, ''):
if line[0] == '#' or not line.strip():
continue
line = line.split()
rank = int(line[0])
residue = line[1]
ss = line[2]
coil, helix, strand = map(float, line[3:6])
residues.append(
PSIPredResidueInfo(rank=rank, residue=residue, ss=ss, coil=coil, helix=helix, strand=strand)
)
self.residues = tuple(residues)
return
def check_content(self):
"""Check the secondary structure composition"""
H = len([i for i in self.residues if i.ss == "H"])
E = len([i for i in self.residues if i.ss == "E"])
if H > 0 and E > 0:
logging.info('Your protein is predicted to be mixed alpha beta, your chances of success are intermediate')
if H == 0 and E > 0:
logging.info('Your protein is predicted to be all beta, your chances of success are low')
if H > 0 and E == 0:
logging.info('Your protein is predicted to be all alpha, your chances of success are high')
if H == 0 and E == 0:
logging.info('Your protein is has no predicted secondary structure, your chances of success are low')
return
def checkContent(self):
"""Check the secondary structure composition"""
warnings.warn(
DeprecationWarning, "This function will be removed in a future release - use check_content() instead"
)
return self.check_content()
def getSecondaryStructure(self):
"""Get the secondary structure content"""
warnings.warn(
DeprecationWarning,
"This function will be removed in a future release - use attribute secondary_structure instead",
)
return self.secondary_structure
| {
"repo_name": "rigdenlab/ample",
"path": "ample/parsers/psipred_parser.py",
"copies": "2",
"size": "3084",
"license": "bsd-3-clause",
"hash": 4166241374481594000,
"line_mean": 29.5346534653,
"line_max": 118,
"alpha_frac": 0.5703631647,
"autogenerated": false,
"ratio": 4.134048257372654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5704411422072654,
"avg_score": null,
"num_lines": null
} |
# A parser to parse the topics and questions schema for a single Analysis Type
# and populate the database
import json
from thresher.models import *
###### EXCEPTIONS ######
########################
class TopicsSchemaParser(object):
"""
Parses a json schema of topics and questions and populates the database
"""
def __init__(self, topic_obj, schema, dependencies):
"""
topic_obj: The Topic object that is the parent of subtopics in schema
schema: A json schema as a string or loaded json with subtopics
dependencies: The list of answers that point to another question
"""
self.topic_obj = topic_obj
# if the schema is a string, tries to load it as json, otherwise,
# assumes it's already json
if isinstance(schema, str) or isinstance(schema, unicode):
self.schema_json = json.loads(schema)
else:
self.schema_json = schema
# ensure that the analysis_type is valid
if not isinstance(topic_obj, Topic):
raise ValueError("schema must be an instance of Topic model")
self.dep = dependencies
def load_answers(self, answers, question):
"""
Creates the answers instances for a given question.
answers: A list of answers
question: The question that answers belongs to
"""
# find the corresponding topic and question ids
for answer_args in answers:
# create the next question reference, it will be rewritten in
# load_next_question
answer_args['question'] = question
# Create the answer in the database
answer = Answer.objects.create(**answer_args)
def load_questions(self, questions, topic):
"""
Creates the questions instances for the given topic.
questions: A list of questions
topic: The topic that questions belongs to
"""
for question_args in questions:
# Create the topic
question_args['topic'] = topic
# Store the answers for later
answers = question_args.pop('answers')
# Create the Question
question = Question.objects.create(**question_args)
# Load the Question's answers
self.load_answers(answers, question)
def load_topics(self):
"""
Loads all the topics, their questions and their answers.
"""
for topic_args in self.schema_json:
# Get the questions to add them later
questions = topic_args.pop('questions')
# Change id to order
topic_args['order'] = topic_args.pop('id')
# Set reference to parent
topic_args['parent'] = self.topic_obj
# Create the topic with the values in topic_args
topic = Topic.objects.create(**topic_args)
self.load_questions(questions, topic)
self.load_next_question()
self.load_dependencies()
def load_next_question(self):
"""
Loads all mandatory next_questions to Answer objects.
If an answer does not point to another question, that
signals the end. Also populates each mandatory question
with a default next question.
"""
topics = Topic.objects.filter(parent=self.topic_obj)
for topic in topics:
questions = Question.objects.filter(topic=topic,
contingency=False) \
.order_by('question_id')
for i in range(len(questions) - 1):
self.write_answers(questions[i], questions[i + 1])
def write_answers(self, curr_question, next_question):
"""
Helper method for load_next_question.
Writes the default next answer to the current question and its answers.
curr_question: the curr_question to be modified
next_question: the next_question curr_question should point to by
default
"""
curr_question.default_next = next_question
curr_question.save()
answers = Answer.objects.filter(question=curr_question)
for answer in answers:
answer.next_question = next_question
answer.save()
def load_dependencies(self):
"""
Loads dependencies into targeted answers.
"""
topics = Topic.objects.filter(parent=self.topic_obj)
for dep in self.dep:
topic = topics.filter(order=dep.topic)
question = Question.objects.filter(topic=topic,
question_id=dep.question)[0]
answers = Answer.objects.filter(
question=question)
next_question = Question.objects.filter(
topic=topic, question_id=dep.next_question)[0]
next_question_answers = Answer.objects.filter(
question=next_question)
next_question.default_next = question.default_next
next_question.save()
# First we populate the contingency question's answers with the
# default next answer
for answer in next_question_answers:
answer.next_question = next_question.default_next
answer.save()
# Now we point the current question's answer to the next question
if dep.answer == '*':
answers = answers
else:
answers = answers.filter(answer_id=dep.answer)
for answer in answers:
answer.next_question = next_question
answer.save()
| {
"repo_name": "Goodly/text-thresher-backend",
"path": "parse_schema.py",
"copies": "1",
"size": "5717",
"license": "apache-2.0",
"hash": 7913756455237670000,
"line_mean": 38.979020979,
"line_max": 79,
"alpha_frac": 0.5857967465,
"autogenerated": false,
"ratio": 4.869676320272572,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5955473066772573,
"avg_score": null,
"num_lines": null
} |
'''
A tokenizer for PDF streams.
In general, documentation used was "PDF reference",
sixth edition, for PDF version 1.7, dated November 2006.
'''
from __future__ import generators
try:
set
except NameError:
from sets import Set as set
import re
from pdfobjects import PdfString, PdfObject
class _PrimitiveTokens(object):
# Table 3.1, page 50 of reference, defines whitespace
whitespaceset = set('\x00\t\n\f\r ')
# Text on page 50 defines delimiter characters
delimiterset = set('()<>{}[]/%')
# Coalesce contiguous whitespace into a single token
whitespace_pattern = '[%s]+' % ''.join(whitespaceset)
# In addition to the delimiters, we also use '\', which
# is special in some contexts in PDF.
delimiter_pattern = '\\\\|\\' + '|\\'.join(delimiterset)
# Dictionary delimiters are '<<' and '>>'. Look for
# these before the single variety.
dictdelim_pattern = r'\<\<|\>\>'
pattern = '(%s|%s|%s)' % (whitespace_pattern,
dictdelim_pattern, delimiter_pattern)
re_func = re.compile(pattern).finditer
del whitespace_pattern, dictdelim_pattern
del delimiter_pattern, pattern
def __init__(self, fdata):
class MyIterator(object):
def next():
if not tokens:
startloc = self.startloc
for match in next_match[0]:
start = match.start()
end = match.end()
tappend(fdata[start:end])
if start > startloc:
tappend(fdata[startloc:start])
self.startloc = end
break
else:
s = fdata[startloc:]
self.startloc = len(fdata)
if s:
tappend(s)
if not tokens:
raise StopIteration
return tpop()
next = staticmethod(next)
self.fdata = fdata
self.tokens = tokens = []
self.iterator = iterator = MyIterator()
self.next = iterator.next
self.next_match = next_match = [None]
tappend = tokens.append
tpop = tokens.pop
def setstart(self, startloc):
self.startloc = startloc
self.next_match[0] = self.re_func(self.fdata, startloc)
def __iter__(self):
return self.iterator
def coalesce(self, result):
''' This function coalesces tokens together up until
the next delimiter or whitespace.
All of the coalesced tokens will either be non-matches,
or will be a matched backslash. We distinguish the
non-matches by the fact that next() will have left
a following match inside self.tokens for the actual match.
'''
tokens = self.tokens
whitespace = self.whitespaceset
# Optimized path for usual case -- regular data (not a name string),
# with no escape character, and followed by whitespace.
if tokens:
token = tokens.pop()
if token != '\\':
if token[0] not in whitespace:
tokens.append(token)
return
result.append(token)
# Non-optimized path. Either start of a name string received,
# or we just had one escape.
for token in self:
if tokens:
result.append(token)
token = tokens.pop()
if token != '\\':
if token[0] not in whitespace:
tokens.append(token)
return
result.append(token)
def floc(self):
return self.startloc - sum([len(x) for x in self.tokens])
class PdfTokens(object):
def __init__(self, fdata, startloc=0, strip_comments=True):
def comment(token):
tokens = [token]
for token in primitive:
tokens.append(token)
if token[0] in whitespaceset and ('\n' in token or '\r' in token):
break
return not strip_comments and ''.join(tokens)
def single(token):
return token
def regular_string(token):
def escaped():
escaped = False
i = -2
while tokens[i] == '\\':
escaped = not escaped
i -= 1
return escaped
tokens = [token]
nestlevel = 1
for token in primitive:
tokens.append(token)
if token in '()' and not escaped():
nestlevel += token == '(' or -1
if not nestlevel:
break
else:
assert 0, "Unexpected end of token stream"
return PdfString(''.join(tokens))
def hex_string(token):
tokens = [token]
for token in primitive:
tokens.append(token)
if token == '>':
break
while tokens[-2] == '>>':
tokens.append(tokens.pop(-2))
return PdfString(''.join(tokens))
def normal_data(token):
# Obscure optimization -- we can get here with
# whitespace or regular character data. If we get
# here with whitespace, then there won't be an additional
# token queued up in the primitive object, otherwise there
# will...
if primitive_tokens: #if token[0] not in whitespaceset:
tokens = [token]
primitive.coalesce(tokens)
return PdfObject(''.join(tokens))
def name_string(token):
tokens = [token]
primitive.coalesce(tokens)
token = ''.join(tokens)
if '#' in token:
substrs = token.split('#')
substrs.reverse()
tokens = [substrs.pop()]
while substrs:
s = substrs.pop()
tokens.append(chr(int(s[:2], 16)))
tokens.append(s[2:])
token = ''.join(tokens)
return PdfObject(token)
def broken(token):
assert 0, token
dispatch = {
'(': regular_string,
')': broken,
'<': hex_string,
'>': broken,
'[': single,
']': single,
'{': single,
'}': single,
'/': name_string,
'%' : comment,
'<<': single,
'>>': single,
}.get
class MyIterator(object):
def next():
while not tokens:
token = primitive_next()
token = dispatch(token, normal_data)(token)
if token:
return token
return tokens.pop()
next = staticmethod(next)
self.primitive = primitive = _PrimitiveTokens(fdata)
self.setstart = primitive.setstart
primitive.setstart(startloc)
self.fdata = fdata
self.strip_comments = strip_comments
self.tokens = tokens = []
self.iterator = iterator = MyIterator()
self.next = iterator.next
primitive_next = primitive.next
primitive_tokens = primitive.tokens
whitespaceset = _PrimitiveTokens.whitespaceset
def floc(self):
return self.primitive.floc() - sum([len(x) for x in self.tokens])
floc = property(floc)
def __iter__(self):
return self.iterator
def multiple(self, count):
next = self.next
return [next() for i in range(count)]
| {
"repo_name": "ddd332/presto",
"path": "presto-docs/target/sphinx/rst2pdf/extensions/vectorpdf/pdfrw/pdftokens.py",
"copies": "3",
"size": "7963",
"license": "apache-2.0",
"hash": 1715859251694604500,
"line_mean": 30.9799196787,
"line_max": 82,
"alpha_frac": 0.5082255431,
"autogenerated": false,
"ratio": 4.6540035067212155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6662229049821216,
"avg_score": null,
"num_lines": null
} |
'''
Converts pdfrw objects into reportlab objects.
Designed for and tested with rl 2.3.
Knows too much about reportlab internals.
What can you do?
The interface to this function is through the makerl() function.
Parameters:
canv - a reportlab "canvas" (also accepts a "document")
pdfobj - a pdfrw PDF object
Returns:
A corresponding reportlab object, or if the
object is a PDF Form XObject, the name to
use with reportlab for the object.
Will recursively convert all necessary objects.
Be careful when converting a page -- if /Parent is set,
will recursively convert all pages!
Notes:
1) Original objects are annotated with a
derived_rl_obj attribute which points to the
reportlab object. This keeps multiple reportlab
objects from being generated for the same pdfobj
via repeated calls to makerl. This is great for
not putting too many objects into the
new PDF, but not so good if you are modifying
objects for different pages. Then you
need to do your own deep copying (of circular
structures). You're on your own.
2) ReportLab seems weird about FormXObjects.
They pass around a partial name instead of the
object or a reference to it. So we have to
reach into reportlab and get a number for
a unique name. I guess this is to make it
where you can combine page streams with
impunity, but that's just a guess.
3) Updated 1/23/2010 to handle multipass documents
(e.g. with a table of contents). These have
a different doc object on every pass.
'''
from reportlab.pdfbase import pdfdoc as rldocmodule
from pdfrw.objects import PdfDict, PdfArray, PdfName
RLStream = rldocmodule.PDFStream
RLDict = rldocmodule.PDFDictionary
RLArray = rldocmodule.PDFArray
def _makedict(rldoc, pdfobj):
rlobj = rldict = RLDict()
if pdfobj.indirect:
rlobj.__RefOnly__ = 1
rlobj = rldoc.Reference(rlobj)
pdfobj.derived_rl_obj[rldoc] = rlobj, None
for key, value in pdfobj.iteritems():
rldict[key[1:]] = makerl_recurse(rldoc, value)
return rlobj
def _makestream(rldoc, pdfobj, xobjtype=PdfName.XObject):
rldict = RLDict()
rlobj = RLStream(rldict, pdfobj.stream)
if pdfobj.Type == xobjtype:
shortname = 'pdfrw_%s' % (rldoc.objectcounter+1)
fullname = rldoc.getXObjectName(shortname)
else:
shortname = fullname = None
result = rldoc.Reference(rlobj, fullname)
pdfobj.derived_rl_obj[rldoc] = result, shortname
for key, value in pdfobj.iteritems():
rldict[key[1:]] = makerl_recurse(rldoc, value)
return result
def _makearray(rldoc, pdfobj):
rlobj = rlarray = RLArray([])
if pdfobj.indirect:
rlobj.__RefOnly__ = 1
rlobj = rldoc.Reference(rlobj)
pdfobj.derived_rl_obj[rldoc] = rlobj, None
mylist = rlarray.sequence
for value in pdfobj:
mylist.append(makerl_recurse(rldoc, value))
return rlobj
def _makestr(rldoc, pdfobj):
assert isinstance(pdfobj, (float, int, str)), repr(pdfobj)
return pdfobj
def makerl_recurse(rldoc, pdfobj):
docdict = getattr(pdfobj, 'derived_rl_obj', None)
if docdict is not None:
value = docdict.get(rldoc)
if value is not None:
return value[0]
if isinstance(pdfobj, PdfDict):
if pdfobj.stream is not None:
func = _makestream
else:
func = _makedict
if docdict is None:
pdfobj.private.derived_rl_obj = {}
elif isinstance(pdfobj, PdfArray):
func = _makearray
if docdict is None:
pdfobj.derived_rl_obj = {}
else:
func = _makestr
return func(rldoc, pdfobj)
def makerl(canv, pdfobj):
try:
rldoc = canv._doc
except AttributeError:
rldoc = canv
rlobj = makerl_recurse(rldoc, pdfobj)
try:
name = pdfobj.derived_rl_obj[rldoc][1]
except AttributeError:
name = None
return name or rlobj
| {
"repo_name": "lamby/pkg-pdfrw",
"path": "pdfrw/toreportlab.py",
"copies": "4",
"size": "4229",
"license": "mit",
"hash": 7467492906035797000,
"line_mean": 29.4244604317,
"line_max": 69,
"alpha_frac": 0.6552376448,
"autogenerated": false,
"ratio": 3.648835202761001,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010864400682023781,
"num_lines": 139
} |
'''
Currently, this sad little file only knows how to decompress
using the flate (zlib) algorithm. Maybe more later, but it's
not a priority for me...
'''
import zlib
from pdfrw.objects import PdfDict, PdfName
from pdfrw.errors import log
def streamobjects(mylist, isinstance=isinstance, PdfDict=PdfDict):
for obj in mylist:
if isinstance(obj, PdfDict) and obj.stream is not None:
yield obj
def uncompress(mylist, warnings=set(), flate = PdfName.FlateDecode,
decompress=zlib.decompressobj, isinstance=isinstance, list=list, len=len):
ok = True
for obj in streamobjects(mylist):
ftype = obj.Filter
if ftype is None:
continue
if isinstance(ftype, list) and len(ftype) == 1:
# todo: multiple filters
ftype = ftype[0]
parms = obj.DecodeParms
if ftype != flate or parms is not None:
msg = 'Not decompressing: cannot use filter %s with parameters %s' % (repr(ftype), repr(parms))
if msg not in warnings:
warnings.add(msg)
log.warning(msg)
ok = False
else:
dco = decompress()
error = None
try:
data = dco.decompress(obj.stream)
except Exception, s:
error = str(s)
if error is None:
assert not dco.unconsumed_tail
if dco.unused_data.strip():
error = 'Unconsumed compression data: %s' % repr(dco.unused_data[:20])
if error is None:
obj.Filter = None
obj.stream = data
else:
log.error('%s %s' % (error, repr(obj.indirect)))
return ok
| {
"repo_name": "ralsina/pdfrw",
"path": "pdfrw/uncompress.py",
"copies": "4",
"size": "1894",
"license": "mit",
"hash": -6176890301825510000,
"line_mean": 35.4230769231,
"line_max": 107,
"alpha_frac": 0.5760295671,
"autogenerated": false,
"ratio": 3.9789915966386555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019296908080840291,
"num_lines": 52
} |
'''
Currently, this sad little file only knows how to decompress
using the flate (zlib) algorithm. Maybe more later, but it's
not a priority for me...
'''
from __future__ import generators
try:
set
except NameError:
from sets import Set as set
import zlib
from pdfobjects import PdfDict, PdfName
def streamobjects(mylist):
for obj in mylist:
if isinstance(obj, PdfDict) and obj.stream is not None:
yield obj
def uncompress(mylist, warnings=set()):
flate = PdfName.FlateDecode
for obj in streamobjects(mylist):
ftype = obj.Filter
if ftype is None:
continue
if isinstance(ftype, list) and len(ftype) == 1:
# todo: multiple filters
ftype = ftype[0]
parms = obj.DecodeParms
if ftype != flate or parms is not None:
msg = 'Not decompressing: cannot use filter %s with parameters %s' % (repr(ftype), repr(parms))
if msg not in warnings:
warnings.add(msg)
print msg
else:
obj.stream = zlib.decompress(obj.stream)
obj.Filter = None
def compress(mylist):
flate = PdfName.FlateDecode
for obj in streamobjects(mylist):
ftype = obj.Filter
if ftype is not None:
continue
oldstr = obj.stream
newstr = zlib.compress(oldstr)
if len(newstr) < len(oldstr) + 30:
obj.stream = newstr
obj.Filter = flate
obj.DecodeParms = None
| {
"repo_name": "kulbirsaini/pdfrw-fork",
"path": "pdfrw/pdfcompress.py",
"copies": "3",
"size": "1656",
"license": "mit",
"hash": -2773308251459607000,
"line_mean": 28.0526315789,
"line_max": 107,
"alpha_frac": 0.6117149758,
"autogenerated": false,
"ratio": 3.789473684210526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5901188660010526,
"avg_score": null,
"num_lines": null
} |
'''
Currently, this sad little file only knows how to decompress
using the flate (zlib) algorithm. Maybe more later, but it's
not a priority for me...
'''
import zlib
from pdfrw.objects import PdfDict, PdfName
from pdfrw.errors import log
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def streamobjects(mylist, isinstance=isinstance, PdfDict=PdfDict):
for obj in mylist:
if isinstance(obj, PdfDict) and obj.stream is not None:
yield obj
def uncompress(mylist, warnings=set(), flate=PdfName.FlateDecode,
decompress=zlib.decompressobj, isinstance=isinstance, list=list, len=len):
ok = True
for obj in streamobjects(mylist):
ftype = obj.Filter
if ftype is None:
continue
if isinstance(ftype, list) and len(ftype) == 1:
# todo: multiple filters
ftype = ftype[0]
parms = obj.DecodeParms
if ftype != flate or parms is not None:
msg = 'Not decompressing: cannot use filter %s with parameters %s' % (repr(ftype), repr(parms))
if msg not in warnings:
warnings.add(msg)
log.warning(msg)
ok = False
else:
dco = decompress()
error = None
try:
data = dco.decompress(obj.stream)
if parms:
# try png predictor
predictor = int(parms['/Predictor']) or 1
# predictor 1 == no predictor
if predictor != 1:
columns = int(parms['/Columns'])
# PNG prediction:
if predictor >= 10 and predictor <= 15:
output = StringIO()
# PNG prediction can vary from row to row
rowlen = columns + 1
assert len(data) % rowlen == 0
prev_rowdata = (0,) * rowlen
for row in xrange(len(data) / rowlen):
rowdata = [ord(x) for x in
data[(row * rowlen):((row + 1) * rowlen)]]
filter_byte = rowdata[0]
if filter_byte == 0:
pass
elif filter_byte == 1:
for i in xrange(2, rowlen):
rowdata[i] = (rowdata[i] +
rowdata[i - 1]) % 256
elif filter_byte == 2:
for i in xrange(1, rowlen):
rowdata[i] = (rowdata[i] +
prev_rowdata[i]) % 256
else:
# unsupported PNG filter
raise Exception(('Unsupported PNG '
'filter %r') % filter_byte)
prev_rowdata = rowdata
output.write(''.join([chr(x) for x in
rowdata[1:]]))
data = output.getvalue()
else:
# unsupported predictor
raise Exception(('Unsupported flatedecode'
' predictor %r') % predictor)
except Exception, s:
error = str(s)
if error is None:
assert not dco.unconsumed_tail
if dco.unused_data.strip():
error = 'Unconsumed compression data: %s' % repr(
dco.unused_data[:20])
if error is None:
obj.Filter = None
obj.stream = data
else:
log.error('%s %s' % (error, repr(obj.indirect)))
return ok
| {
"repo_name": "zhzhzoo/pdfrw",
"path": "pdfrw/uncompress.py",
"copies": "1",
"size": "4280",
"license": "mit",
"hash": -8809255261579455000,
"line_mean": 41.3762376238,
"line_max": 107,
"alpha_frac": 0.4331775701,
"autogenerated": false,
"ratio": 5.041224970553593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00032701582394526374,
"num_lines": 101
} |
'''
This module contains code to build PDF "Form XObjects".
A Form XObject allows a fragment from one PDF file to be cleanly
included in another PDF file.
Reference for syntax: "Parameters for opening PDF files" from SDK 8.1
http://www.adobe.com/devnet/acrobat/pdfs/pdf_open_parameters.pdf
supported 'page=xxx', 'viewrect=<left>,<top>,<width>,<height>'
Units are in points
Reference for content: Adobe PDF reference, sixth edition, version 1.7
http://www.adobe.com/devnet/acrobat/pdfs/pdf_reference_1-7.pdf
Form xobjects discussed chapter 4.9, page 355
'''
from pdfobjects import PdfDict, PdfArray, PdfName
from pdfreader import PdfReader
class ViewInfo(object):
''' Instantiate ViewInfo with a uri, and it will parse out
the filename, page, and viewrect into object attributes.
'''
doc = None
docname = None
page = None
viewrect = None
def __init__(self, pageinfo='', **kw):
pageinfo=pageinfo.split('#',1)
if len(pageinfo) == 2:
pageinfo[1:] = pageinfo[1].replace('&', '#').split('#')
for key in 'page viewrect'.split():
if pageinfo[0].startswith(key+'='):
break
else:
self.docname = pageinfo.pop(0)
for item in pageinfo:
key, value = item.split('=')
key = key.strip()
value = value.replace(',', ' ').split()
if key == 'page':
assert len(value) == 1
setattr(self, key, int(value[0]))
elif key == 'viewrect':
assert len(value) == 4
setattr(self, key, [float(x) for x in value])
else:
log.error('Unknown option: %s', key)
for key, value in kw.iteritems():
assert hasattr(self, key), key
setattr(self, key, value)
def getrects(inheritable, pageinfo):
''' Given the inheritable attributes of a page and
the desired pageinfo rectangle, return the page's
media box and the calculated boundary (clip) box.
'''
mbox = tuple([float(x) for x in inheritable.MediaBox])
vrect = pageinfo.viewrect
if vrect is None:
cbox = tuple([float(x) for x in (inheritable.CropBox or mbox)])
else:
mleft, mbot, mright, mtop = mbox
x, y, w, h = vrect
cleft = mleft + x
ctop = mtop - y
cright = cleft + w
cbot = ctop - h
cbox = max(mleft, cleft), max(mbot, cbot), min(mright, cright), min(mtop, ctop)
return mbox, cbox
def _cache_xobj(contents, resources, mbox, bbox):
''' Return a cached Form XObject, or create a new one and cache it.
'''
cachedict = contents.xobj_cachedict
if cachedict is None:
cachedict = contents.private.xobj_cachedict = {}
result = cachedict.get(bbox)
if result is None:
func = (_get_fullpage, _get_subpage)[mbox != bbox]
result = PdfDict(
func(contents, resources, mbox, bbox),
Type = PdfName.XObject,
Subtype = PdfName.Form,
FormType = 1,
BBox = PdfArray(bbox),
)
cachedict[bbox] = result
return result
def _get_fullpage(contents, resources, mbox, bbox):
''' fullpage is easy. Just copy the contents,
set up the resources, and let _cache_xobj handle the
rest.
'''
return PdfDict(contents, Resources=resources)
def _get_subpage(contents, resources, mbox, bbox):
''' subpages *could* be as easy as full pages, but we
choose to complicate life by creating a Form XObject
for the page, and then one that references it for
the subpage, on the off-chance that we want multiple
items from the page.
'''
return PdfDict(
stream = '/FullPage Do\n',
Resources = PdfDict(
XObject = PdfDict(
FullPage = _cache_xobj(contents, resources, mbox, mbox)
)
)
)
def pagexobj(page, viewinfo=ViewInfo(), allow_compressed=True):
''' pagexobj creates and returns a Form XObject for
a given view within a page (Defaults to entire page.)
'''
inheritable = page.inheritable
resources = inheritable.Resources
mbox, bbox = getrects(inheritable, viewinfo)
contents = page.Contents
# Make sure the only attribute is length
# All the filters must have been executed
assert int(contents.Length) == len(contents.stream)
if not allow_compressed:
assert len([x for x in contents.iteritems()]) == 1
return _cache_xobj(contents, resources, mbox, bbox)
def docxobj(pageinfo, doc=None, allow_compressed=True):
''' docxobj creates and returns an actual Form XObject.
Can work standalone, or in conjunction with
the CacheXObj class (below).
'''
if not isinstance(pageinfo, ViewInfo):
pageinfo = ViewInfo(pageinfo)
# If we're explicitly passed a document,
# make sure we don't have one implicitly as well.
# If no implicit or explicit doc, then read one in
# from the filename.
if doc is not None:
assert pageinfo.doc is None
pageinfo.doc = doc
elif pageinfo.doc is not None:
doc = pageinfo.doc
else:
doc = pageinfo.doc = PdfReader(pageinfo.docname, decompress = not allow_compressed)
assert isinstance(doc, PdfReader)
sourcepage = doc.pages[(pageinfo.page or 1) - 1]
return pagexobj(sourcepage, pageinfo, allow_compressed)
class CacheXObj(object):
''' Use to keep from reparsing files over and over,
and to keep from making the output too much
bigger than it ought to be by replicating
unnecessary object copies.
'''
def __init__(self, decompress=False):
''' Set decompress true if you need
the Form XObjects to be decompressed.
Will decompress what it can and scream
about the rest.
'''
self.cached_pdfs = {}
self.decompress = decompress
def load(self, sourcename):
''' Load a Form XObject from a uri
'''
info = ViewInfo(sourcename)
fname = info.docname
pcache = self.cached_pdfs
doc = pcache.get(fname)
if doc is None:
doc = pcache[fname] = PdfReader(fname, decompress=self.decompress)
return docxobj(info, doc, allow_compressed=not self.decompress)
| {
"repo_name": "ddd332/presto",
"path": "presto-docs/target/sphinx/rst2pdf/extensions/vectorpdf/pdfrw/buildxobj.py",
"copies": "2",
"size": "6560",
"license": "apache-2.0",
"hash": -4232938430954895400,
"line_mean": 33.3455497382,
"line_max": 91,
"alpha_frac": 0.6138719512,
"autogenerated": false,
"ratio": 3.7984944991314418,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5412366450331442,
"avg_score": null,
"num_lines": null
} |
'''
Objects that can occur in PDF files. The most important
objects are arrays and dicts. Either of these can be
indirect or not, and dicts could have an associated
stream.
'''
from __future__ import generators
try:
set
except NameError:
from sets import Set as set
import re
class PdfObject(str):
indirect = False
class PdfArray(list):
indirect = False
class PdfName(object):
def __getattr__(self, name):
return self(name)
def __call__(self, name):
return PdfObject('/' + name)
PdfName = PdfName()
class PdfString(str):
indirect = False
unescape_dict = {'\\b':'\b', '\\f':'\f', '\\n':'\n',
'\\r':'\r', '\\t':'\t',
'\\\r\n': '', '\\\r':'', '\\\n':'',
'\\\\':'\\', '\\':'',
}
unescape_pattern = r'(\\\\|\\b|\\f|\\n|\\r|\\t|\\\r\n|\\\r|\\\n|\\[0-9]+|\\)'
unescape_func = re.compile(unescape_pattern).split
hex_pattern = '([a-fA-F0-9][a-fA-F0-9]|[a-fA-F0-9])'
hex_func = re.compile(hex_pattern).split
hex_pattern2 = '([a-fA-F0-9][a-fA-F0-9][a-fA-F0-9][a-fA-F0-9]|[a-fA-F0-9][a-fA-F0-9]|[a-fA-F0-9])'
hex_func2 = re.compile(hex_pattern2).split
hex_funcs = hex_func, hex_func2
indirect = False
def decode_regular(self, remap=chr):
assert self[0] == '(' and self[-1] == ')'
mylist = self.unescape_func(self[1:-1])
result = []
unescape = self.unescape_dict.get
for chunk in mylist:
chunk = unescape(chunk, chunk)
if chunk.startswith('\\') and len(chunk) > 1:
value = int(chunk[1:], 8)
# FIXME: TODO: Handle unicode here
if value > 127:
value = 127
chunk = remap(value)
if chunk:
result.append(chunk)
return ''.join(result)
def decode_hex(self, remap=chr, twobytes=False):
data = ''.join(self.split())
data = self.hex_funcs[twobytes](data)
chars = data[1::2]
other = data[0::2]
assert other[0] == '<' and other[-1] == '>' and ''.join(other) == '<>', self
return ''.join([remap(int(x, 16)) for x in chars])
def decode(self, remap=chr, twobytes=False):
if self.startswith('('):
return self.decode_regular(remap)
else:
return self.decode_hex(remap, twobytes)
def encode(cls, source, usehex=False):
assert not usehex, "Not supported yet"
if isinstance(source, unicode):
source = source.encode('utf-8')
else:
source = str(source)
source = source.replace('\\', '\\\\')
source = source.replace('(', '\\(')
source = source.replace(')', '\\)')
return cls('(' +source + ')')
encode = classmethod(encode)
class PdfDict(dict):
indirect = False
stream = None
_special = dict(indirect = ('indirect', False),
stream = ('stream', True),
_stream = ('stream', False),
)
def __setitem__(self, name, value):
assert name.startswith('/'), name
if value is not None:
dict.__setitem__(self, name, value)
elif name in self:
del self[name]
def __init__(self, *args, **kw):
if args:
if len(args) == 1:
args = args[0]
self.update(args)
if isinstance(args, PdfDict):
self.indirect = args.indirect
self._stream = args.stream
for key, value in kw.iteritems():
setattr(self, key, value)
def __getattr__(self, name):
return self.get(PdfName(name))
def __setattr__(self, name, value):
info = self._special.get(name)
if info is None:
self[PdfName(name)] = value
else:
name, setlen = info
self.__dict__[name] = value
if setlen:
notnone = value is not None
self.Length = notnone and PdfObject(len(value)) or None
def iteritems(self):
for key, value in dict.iteritems(self):
if value is not None:
assert key.startswith('/'), (key, value)
yield key, value
def inheritable(self):
''' Search through ancestors as needed for inheritable
dictionary items
'''
class Search(object):
def __init__(self, basedict):
self.basedict = basedict
def __getattr__(self, name):
return self[name]
def __getitem__(self, name):
visited = set()
mydict = self.basedict
while 1:
value = getattr(mydict, name)
if value is not None:
return value
myid = id(mydict)
assert myid not in visited
visited.add(myid)
mydict = mydict.Parent
if mydict is None:
return
return Search(self)
inheritable = property(inheritable)
def private(self):
''' Allows setting private metadata for use in
processing (not sent to PDF file)
'''
class Private(object):
pass
result = Private()
result.__dict__ = self.__dict__
return result
private = property(private)
class IndirectPdfDict(PdfDict):
indirect = True
| {
"repo_name": "ddd332/presto",
"path": "presto-docs/target/sphinx/rst2pdf/extensions/vectorpdf/pdfrw/pdfobjects.py",
"copies": "2",
"size": "5677",
"license": "apache-2.0",
"hash": 3358826319210814500,
"line_mean": 30.0218579235,
"line_max": 102,
"alpha_frac": 0.5085432447,
"autogenerated": false,
"ratio": 3.9044016506189823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5412944895318983,
"avg_score": null,
"num_lines": null
} |
'''
The PdfReader class reads an entire PDF file into memory and
parses the top-level container objects. (It does not parse
into streams.) The object subclasses PdfDict, and the
document pages are stored in a list in the pages attribute
of the object.
'''
import gc
import re
import os
import pickle
from pdfrw.errors import PdfParseError, log
from pdfrw.tokens import PdfTokens
from pdfrw.objects import PdfDict, PdfArray, PdfName, PdfObject, PdfIndirect
from pdfrw.uncompress import uncompress
class PdfReader(PdfDict):
warned_bad_stream_start = False # Use to keep from spewing warnings
warned_bad_stream_end = False # Use to keep from spewing warnings
def findindirect(self, objnum, gennum, PdfIndirect=PdfIndirect, int=int):
''' Return a previously loaded indirect object, or create
a placeholder for it.
'''
key = int(objnum), int(gennum)
result = self.indirect_objects.get(key)
if result is None:
self.indirect_objects[key] = result = PdfIndirect(key)
self.deferred_objects.add(key)
result._loader = self.loadindirect
return result
def readarray(self, source, PdfArray=PdfArray):
''' Found a [ token. Parse the tokens after that.
'''
specialget = self.special.get
result = []
pop = result.pop
append = result.append
for value in source:
if value in ']R':
if value == ']':
break
generation = pop()
value = self.findindirect(pop(), generation)
else:
func = specialget(value)
if func is not None:
value = func(source)
append(value)
return PdfArray(result)
def readdict(self, source, PdfDict=PdfDict):
''' Found a << token. Parse the tokens after that.
'''
specialget = self.special.get
result = PdfDict()
next = source.next
tok = next()
while tok != '>>':
if not tok.startswith('/'):
# Just skip the incorrect /name object.
source.warning('Expected PDF /name object')
tok = next()
continue
key = tok
value = next()
func = specialget(value)
if func is not None:
# Just keep working when bad token occurs.
if func == self.badtoken:
tok = value
continue
value = func(source)
tok = next()
else:
tok = next()
if value.isdigit() and tok.isdigit():
if next() != 'R':
source.exception(
'Expected "R" following two integers')
value = self.findindirect(value, tok)
tok = next()
result[key] = value
return result
def empty_obj(self, source, PdfObject=PdfObject):
''' Some silly git put an empty object in the
file. Back up so the caller sees the endobj.
'''
source.floc = source.tokstart
return PdfObject()
def badtoken(self, source):
''' Didn't see that coming.
'''
source.exception('Unexpected delimiter')
def findstream(self, obj, tok, source, PdfDict=PdfDict,
isinstance=isinstance, len=len):
''' Figure out if there is a content stream
following an object, and return the start
pointer to the content stream if so.
(We can't read it yet, because we might not
know how long it is, because Length might
be an indirect object.)
'''
isdict = isinstance(obj, PdfDict)
if not isdict or tok != 'stream':
source.exception("Expected 'endobj'%s token",
isdict and " or 'stream'" or '')
fdata = source.fdata
startstream = source.tokstart + len(tok)
# Skip the possible delimiters.
possible_delimiters = ('\r', '\n', ' ')
gotcr = gotlf = False
while fdata[startstream] in possible_delimiters:
if fdata[startstream] == '\r':
gotcr = True
if fdata[startstream] == '\n':
gotlf = True
startstream += 1
if not gotlf:
if not gotcr:
source.warning(r'stream keyword not followed by \n')
self.private.warned_bad_stream_start = True
if not self.warned_bad_stream_start:
source.warning(r"stream keyword terminated by \r without \n")
self.private.warned_bad_stream_start = True
return startstream
def readstream(self, obj, startstream, source,
streamending='endstream endobj'.split(), int=int):
fdata = source.fdata
# Get a length by looking 'endstream'
end_loc = fdata.find('endstream', startstream)
possible_delimiters = ('\r', '\n', ' ')
while fdata[end_loc-1] in possible_delimiters:
end_loc -= 1
observed_length = end_loc - startstream
if obj.Length == None:
length = observed_length
source.warning('Lacking the stream length declaration, using the observed value %d.' % (observed_length))
else:
try:
length = int(obj.Length)
except:
source.warning('Incorrect representation of stream length: %s. Use observed value %d instead.' % (obj.Length, observed_length))
length = observed_length
if length != observed_length:
source.warning('Inconsistent stream length: %d declared, %d observed.' % (length, observed_length))
length = observed_length
source.floc = target_endstream = startstream + length
endit = source.multiple(2)
obj._stream = fdata[startstream:target_endstream]
if endit == streamending:
return
# The length attribute does not match the distance between the
# stream and endstream keywords.
do_warn, self.private.warned_bad_stream_end = (self.warned_bad_stream_end,
False)
# TODO: Extract maxstream from dictionary of object offsets
# and use rfind instead of find.
maxstream = len(fdata) - 20
endstream = fdata.find('endstream', startstream, maxstream)
source.floc = startstream
room = endstream - startstream
if endstream < 0:
source.error('Could not find endstream')
return
if (length == room + 1 and
fdata[startstream - 2:startstream] == '\r\n'):
source.warning(r"stream keyword terminated by \r without \n")
obj._stream = fdata[startstream - 1:target_endstream - 1]
return
source.floc = endstream
if length > room:
source.error('stream /Length attribute (%d) appears to '
'be too big (size %d) -- adjusting',
length, room)
obj.stream = fdata[startstream:endstream]
return
if fdata[target_endstream:endstream].rstrip():
source.error('stream /Length attribute (%d) might be '
'smaller than data size (%d)',
length, room)
obj.stream = fdata[startstream:endstream]
return
endobj = fdata.find('endobj', endstream, maxstream)
if endobj < 0:
source.error('Could not find endobj after endstream')
return
if fdata[endstream:endobj].rstrip() != 'endstream':
source.error('Unexpected data between endstream and endobj')
return
source.error('Illegal endstream/endobj combination')
def loadindirect(self, key):
result = self.indirect_objects.get(key)
if not isinstance(result, PdfIndirect):
return result
source = self.source
offset = int(self.source.obj_offsets.get(key, '0'))
if not offset:
log.warning("Did not find PDF object %s" % (key,))
return None
# Read the object header and validate it
objnum, gennum = key
source.floc = offset
objid = source.multiple(3)
ok = len(objid) == 3
ok = ok and objid[0].isdigit() and int(objid[0]) == objnum
ok = ok and objid[1].isdigit() and int(objid[1]) == gennum
ok = ok and objid[2] == 'obj'
if not ok:
source.floc = offset
source.next()
objheader = '%d %d obj' % (objnum, gennum)
fdata = source.fdata
offset2 = (fdata.find('\n' + objheader) + 1 or
fdata.find('\r' + objheader) + 1)
if (not offset2 or
fdata.find(fdata[offset2 - 1] + objheader, offset2) > 0):
source.warning("Expected indirect object '%s'" % objheader)
return None
source.warning("Indirect object %s found at incorrect "
"offset %d (expected offset %d)" %
(objheader, offset2, offset))
source.floc = offset2 + len(objheader)
# Read the object, and call special code if it starts
# an array or dictionary
obj = source.next()
func = self.special.get(obj)
if func is not None:
obj = func(source)
self.indirect_objects[key] = obj
self.deferred_objects.remove(key)
# Mark the object as indirect, and
# add it to the list of streams if it starts a stream
obj.indirect = key
tok = source.next()
if tok != 'endobj':
self.readstream(obj, self.findstream(obj, tok, source), source)
return obj
def findxref(fdata):
''' Find the cross reference section at the end of a file
'''
startloc = fdata.rfind('startxref')
if startloc < 0:
raise PdfParseError('Did not find "startxref" at end of file')
source = PdfTokens(fdata, startloc, False)
tok = source.next()
assert tok == 'startxref' # (We just checked this...)
tableloc = source.next_default()
if not tableloc.isdigit():
source.exception('Expected table location')
if source.next_default().rstrip().lstrip('%') != 'EOF':
source.exception('Expected %%EOF')
return startloc, PdfTokens(fdata, int(tableloc), True)
findxref = staticmethod(findxref)
# Parse through the byte stream when there's no xref table available.
def slow_parse_xref(self, source):
setdefault = source.obj_offsets.setdefault
add_offset = source.all_offsets.append
def get_obj_ids(fdata):
m = re.findall('\d+\s\d+\sobj', fdata, re.DOTALL)
return m
fdata = source.fdata
obj_ids = get_obj_ids(fdata)
xref = {}
cur_pos = 0
for obj_id in obj_ids:
cur_pos = fdata.find(obj_id, cur_pos)
#print obj_id, cur_pos
obj_idx_id = int(obj_id.split()[0])
obj_gen_num = int(obj_id.split()[1])
xref[obj_idx_id] = cur_pos
cur_pos += len(obj_id) # Done: Fixed a parsing bug here. "7 0 obj" and "17 o obj" are confusing before.
#print xref
for objnum,offset in xref.items():
generation = 0
setdefault((objnum, generation), offset)
add_offset(offset)
def parsexref(self, source, int=int, range=range):
''' Parse (one of) the cross-reference file section(s)
'''
fdata = source.fdata
setdefault = source.obj_offsets.setdefault
add_offset = source.all_offsets.append
next = source.next
tok = next()
if tok != 'xref':
source.exception('Expected "xref" keyword')
start = source.floc
try:
while 1:
tok = next()
if tok == 'trailer':
return
startobj = int(tok)
for objnum in range(startobj, startobj + int(next())):
offset = int(next())
generation = int(next())
inuse = next()
if inuse == 'n':
if offset != 0:
setdefault((objnum, generation), offset)
add_offset(offset)
elif inuse != 'f':
raise ValueError
except:
pass
try:
# Table formatted incorrectly. See if
# we can figure it out anyway.
end = source.fdata.rindex('trailer', start)
table = source.fdata[start:end].splitlines()
for line in table:
tokens = line.split()
if len(tokens) == 2:
objnum = int(tokens[0])
elif len(tokens) == 3:
offset, generation, inuse = (int(tokens[0]),
int(tokens[1]), tokens[2])
if offset != 0 and inuse == 'n':
setdefault((objnum, generation), offset)
add_offset(offset)
objnum += 1
elif tokens:
log.error('Invalid line in xref table: %s' % repr(line))
raise ValueError
log.warning('Badly formatted xref table')
source.floc = end
source.next()
except:
source.floc = start
source.exception('Invalid table format')
def readpages(self, node):
pagename = PdfName.Page
pagesname = PdfName.Pages
catalogname = PdfName.Catalog
typename = PdfName.Type
kidname = PdfName.Kids
# PDFs can have arbitrarily nested Pages/Page
# dictionary structures.
def readnode(node):
nodetype = node[typename]
if nodetype == pagename:
yield node
elif nodetype == pagesname:
for node in node[kidname]:
for node in readnode(node):
yield node
elif nodetype == catalogname:
for node in readnode(node[pagesname]):
yield node
else:
log.error('Expected /Page or /Pages dictionary, got %s' %
repr(node))
try:
return list(readnode(node))
except (AttributeError, TypeError), s:
log.error('Invalid page tree: %s' % s)
return []
def __init__(self, fname=None, fdata=None, decompress=False,
disable_gc=True, slow_parsing=True):
# Runs a lot faster with GC off.
disable_gc = disable_gc and gc.isenabled()
try:
if disable_gc:
gc.disable()
if fname is not None:
assert fdata is None
# Allow reading preexisting streams like pyPdf
if hasattr(fname, 'read'):
fdata = fname.read()
else:
try:
f = open(fname, 'rb')
fdata = f.read()
f.close()
except IOError:
raise PdfParseError('Could not read PDF file %s' %
fname)
assert fdata is not None
if not fdata.startswith('%PDF-'):
startloc = fdata.find('%PDF-')
if startloc >= 0:
log.warning('PDF header not at beginning of file')
else:
lines = fdata.lstrip().splitlines()
if not lines:
raise PdfParseError('Empty PDF file!')
raise PdfParseError('Invalid PDF header: %s' %
repr(lines[0]))
endloc = fdata.rfind('%EOF')
if endloc < 0:
log.error('EOF mark not found: %s' %
repr(fdata[-20:]))
endloc = len(fdata) - 6
endloc += 6
junk = fdata[endloc:]
# Done: It is not necessary to truncate the string.
# Some PDFs just use wrong EOF at the end to confuse parsers.
#fdata = fdata[:endloc]
if junk.rstrip('\00').strip():
log.warning('Extra data at end of file')
private = self.private
private.indirect_objects = {}
private.deferred_objects = set()
private.special = {'<<': self.readdict,
'[': self.readarray,
'endobj': self.empty_obj,
}
for tok in r'\ ( ) < > { } ] >> %'.split():
self.special[tok] = self.badtoken
if slow_parsing == True:
startloc = 0
source = PdfTokens(fdata, startloc, True)
private.source = source
# Calling next() just for complete the structure of source by adding source.current.
source.next()
source.all_offsets = []
source.obj_offsets = {}
self.slow_parse_xref(source)
# Done: add slow parsing for multiple trailers.
trailer_loc = fdata.find('trailer')
newdict = None
while trailer_loc >= 0:
source.floc = trailer_loc
assert source.next() == "trailer" # trailer
tok = source.next() # <<
if tok != '<<':
source.exception('Expected "<<" starting catalog')
# Ignored the corrupted trailer.
try:
tmpdict = self.readdict(source)
except:
pass
else:
if not newdict:
newdict = tmpdict
else:
newdict.update(tmpdict)
finally:
trailer_loc = fdata.find('trailer', trailer_loc+1)
if newdict is not None:
newdict.Prev = None
else:
source.exception("No trailer.")
else:
startloc, source = self.findxref(fdata)
private.source = source
xref_table_list = []
source.all_offsets = []
while 1:
source.obj_offsets = {}
# Loop through all the cross-reference tables
self.parsexref(source)
tok = source.next()
if tok != '<<':
source.exception('Expected "<<" starting catalog')
newdict = self.readdict(source)
token = source.next()
if token != 'startxref' and not xref_table_list:
source.warning('Expected "startxref" at end of xref table')
# Loop if any previously-written tables.
prev = newdict.Prev
if prev is None:
break
if not xref_table_list:
newdict.Prev = None
original_indirect = self.indirect_objects.copy()
original_newdict = newdict
source.floc = int(prev)
xref_table_list.append(source.obj_offsets)
self.indirect_objects.clear()
if xref_table_list:
for update in reversed(xref_table_list):
source.obj_offsets.update(update)
self.indirect_objects.clear()
self.indirect_objects.update(original_indirect)
newdict = original_newdict
self.update(newdict)
# self.read_all_indirect(source)
private.pages = self.readpages(self.Root)
if decompress:
self.uncompress()
# For compatibility with pyPdf
private.numPages = len(self.pages)
finally:
if disable_gc:
gc.enable()
# load the trace
fname_trace = fname + '.trace'
if os.path.isfile(fname_trace):
f = open(fname_trace, 'rb')
private.active_trace = pickle.load(f)
f.close()
# For compatibility with pyPdf
def getPage(self, pagenum):
return self.pages[pagenum]
def read_all(self):
deferred = self.deferred_objects
prev = set()
while 1:
new = deferred - prev
if not new:
break
prev |= deferred
for key in new:
self.loadindirect(key)
def uncompress(self):
self.read_all()
uncompress(self.indirect_objects.itervalues())
| {
"repo_name": "mzweilin/pdfrw",
"path": "pdfrw/pdfreader.py",
"copies": "1",
"size": "21671",
"license": "mit",
"hash": -9061637252224820000,
"line_mean": 37.4237588652,
"line_max": 143,
"alpha_frac": 0.5056527156,
"autogenerated": false,
"ratio": 4.518557130942452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5524209846542453,
"avg_score": null,
"num_lines": null
} |
'''
This file is an example parser that will parse a graphics stream
into a reportlab canvas.
Needs work on fonts and unicode, but works on a few PDFs.
Better to use Form XObjects for most things (see the example in rl1).
'''
from inspect import getargspec
import find_pdfrw
from pdfrw import PdfTokens
from pdfrw.pdfobjects import PdfString
#############################################################################
# Graphics parsing
def parse_array(self, token='[', params=None):
mylist = []
for token in self.tokens:
if token == ']':
break
mylist.append(token)
self.params.append(mylist)
def parse_savestate(self, token='q', params=''):
self.canv.saveState()
def parse_restorestate(self, token='Q', params=''):
self.canv.restoreState()
def parse_transform(self, token='cm', params='ffffff'):
self.canv.transform(*params)
def parse_linewidth(self, token='w', params='f'):
self.canv.setLineWidth(*params)
def parse_linecap(self, token='J', params='i'):
self.canv.setLineCap(*params)
def parse_linejoin(self, token='j', params='i'):
self.canv.setLineJoin(*params)
def parse_miterlimit(self, token='M', params='f'):
self.canv.setMiterLimit(*params)
def parse_dash(self, token='d', params='as'): # Array, string
self.canv.setDash(*params)
def parse_intent(self, token='ri', params='n'):
# TODO: add logging
pass
def parse_flatness(self, token='i', params='i'):
# TODO: add logging
pass
def parse_gstate(self, token='gs', params='n'):
# TODO: add logging
# Could parse stuff we care about from here later
pass
def parse_move(self, token='m', params='ff'):
if self.gpath is None:
self.gpath = self.canv.beginPath()
self.gpath.moveTo(*params)
self.current_point = params
def parse_line(self, token='l', params='ff'):
self.gpath.lineTo(*params)
self.current_point = params
def parse_curve(self, token='c', params='ffffff'):
self.gpath.curveTo(*params)
self.current_point = params[-2:]
def parse_curve1(self, token='v', params='ffff'):
parse_curve(self, token, tuple(self.current_point) + tuple(params))
def parse_curve2(self, token='y', params='ffff'):
parse_curve(self, token, tuple(params) + tuple(params[-2:]))
def parse_close(self, token='h', params=''):
self.gpath.close()
def parse_rect(self, token='re', params='ffff'):
if self.gpath is None:
self.gpath = self.canv.beginPath()
self.gpath.rect(*params)
self.current_point = params[-2:]
def parse_stroke(self, token='S', params=''):
finish_path(self, 1, 0, 0)
def parse_close_stroke(self, token='s', params=''):
self.gpath.close()
finish_path(self, 1, 0, 0)
def parse_fill(self, token='f', params=''):
finish_path(self, 0, 1, 1)
def parse_fill_compat(self, token='F', params=''):
finish_path(self, 0, 1, 1)
def parse_fill_even_odd(self, token='f*', params=''):
finish_path(self, 0, 1, 0)
def parse_fill_stroke_even_odd(self, token='B*', params=''):
finish_path(self, 1, 1, 0)
def parse_fill_stroke(self, token='B', params=''):
finish_path(self, 1, 1, 1)
def parse_close_fill_stroke_even_odd(self, token='b*', params=''):
self.gpath.close()
finish_path(self, 1, 1, 0)
def parse_close_fill_stroke(self, token='b', params=''):
self.gpath.close()
finish_path(self, 1, 1, 1)
def parse_nop(self, token='n', params=''):
finish_path(self, 0, 0, 0)
def finish_path(self, stroke, fill, fillmode):
if self.gpath is not None:
canv = self.canv
canv._fillMode, oldmode = fillmode, canv._fillMode
canv.drawPath(self.gpath, stroke, fill)
canv._fillMode = oldmode
self.gpath = None
def parse_clip_path(self, token='W', params=''):
# TODO: add logging
pass
def parse_clip_path_even_odd(self, token='W*', params=''):
# TODO: add logging
pass
def parse_stroke_gray(self, token='G', params='f'):
self.canv.setStrokeGray(*params)
def parse_fill_gray(self, token='g', params='f'):
self.canv.setFillGray(*params)
def parse_stroke_rgb(self, token='RG', params='fff'):
self.canv.setStrokeColorRGB(*params)
def parse_fill_rgb(self, token='rg', params='fff'):
self.canv.setFillColorRGB(*params)
def parse_stroke_cmyk(self, token='K', params='ffff'):
self.canv.setStrokeColorCMYK(*params)
def parse_fill_cmyk(self, token='k', params='ffff'):
self.canv.setFillColorCMYK(*params)
#############################################################################
# Text parsing
def parse_begin_text(self, token='BT', params=''):
assert self.tpath is None
self.tpath = self.canv.beginText()
def parse_text_transform(self, token='Tm', params='ffffff'):
path = self.tpath
# Stoopid optimization to remove nop
try:
code = path._code
except AttributeError:
pass
else:
if code[-1] == '1 0 0 1 0 0 Tm':
code.pop()
path.setTextTransform(*params)
def parse_setfont(self, token='Tf', params='nf'):
fontinfo = self.fontdict[params[0]]
self.tpath._setFont(fontinfo.name, params[1])
self.curfont = fontinfo
def parse_text_out(self, token='Tj', params='t'):
text = params[0].decode(self.curfont.remap, self.curfont.twobyte)
self.tpath.textOut(text)
def parse_TJ(self, token='TJ', params='a'):
remap = self.curfont.remap
twobyte = self.curfont.twobyte
result = []
for x in params[0]:
if isinstance(x, PdfString):
result.append(x.decode(remap, twobyte))
else:
# TODO: Adjust spacing between characters here
int(x)
text = ''.join(result)
self.tpath.textOut(text)
def parse_end_text(self, token='ET', params=''):
assert self.tpath is not None
self.canv.drawText(self.tpath)
self.tpath=None
def parse_move_cursor(self, token='Td', params='ff'):
self.tpath.moveCursor(params[0], -params[1])
def parse_set_leading(self, token='TL', params='f'):
self.tpath.setLeading(*params)
def parse_text_line(self, token='T*', params=''):
self.tpath.textLine()
def parse_set_char_space(self, token='Tc', params='f'):
self.tpath.setCharSpace(*params)
def parse_set_word_space(self, token='Tw', params='f'):
self.tpath.setWordSpace(*params)
def parse_set_hscale(self, token='Tz', params='f'):
self.tpath.setHorizScale(params[0] - 100)
def parse_set_rise(self, token='Ts', params='f'):
self.tpath.setRise(*params)
def parse_xobject(self, token='Do', params='n'):
# TODO: Need to do this
pass
class FontInfo(object):
''' Pretty basic -- needs a lot of work to work right for all fonts
'''
lookup = {
'BitstreamVeraSans' : 'Helvetica', # WRONG -- have to learn about font stuff...
}
def __init__(self, source):
name = source.BaseFont[1:]
self.name = self.lookup.get(name, name)
self.remap = chr
self.twobyte = False
info = source.ToUnicode
if not info:
return
info = info.stream.split('beginbfchar')[1].split('endbfchar')[0]
info = list(PdfTokens(info))
assert not len(info) & 1
info2 = []
for x in info:
assert x[0] == '<' and x[-1] == '>' and len(x) in (4,6), x
i = int(x[1:-1], 16)
info2.append(i)
self.remap = dict((x,chr(y)) for (x,y) in zip(info2[::2], info2[1::2])).get
self.twobyte = len(info[0]) > 4
#############################################################################
# Control structures
def findparsefuncs():
def checkname(n):
assert n.startswith('/')
return n
def checkarray(a):
assert isinstance(a, list), a
return a
def checktext(t):
assert isinstance(t, PdfString)
return t
fixparam = dict(f=float, i=int, n=checkname, a=checkarray, s=str, t=checktext)
fixcache = {}
def fixlist(params):
try:
result = fixcache[params]
except KeyError:
result = tuple(fixparam[x] for x in params)
fixcache[params] = result
return result
dispatch = {}
expected_args = 'self token params'.split()
for key, func in globals().iteritems():
if key.startswith('parse_'):
args, varargs, keywords, defaults = getargspec(func)
assert args == expected_args and varargs is None \
and keywords is None and len(defaults) == 2, \
(key, args, varargs, keywords, defaults)
token, params = defaults
if params is not None:
params = fixlist(params)
value = func, params
assert dispatch.setdefault(token, value) is value, repr(token)
return dispatch
class _ParseClass(object):
dispatch = findparsefuncs()
@classmethod
def parsepage(cls, page, canvas=None):
self = cls()
contents = page.Contents
if contents.Filter is not None:
raise SystemExit('Cannot parse graphics -- page encoded with %s' % contents.Filter)
dispatch = cls.dispatch.get
self.tokens = tokens = iter(PdfTokens(contents.stream))
self.params = params = []
self.canv = canvas
self.gpath = None
self.tpath = None
self.fontdict = dict((x,FontInfo(y)) for (x, y) in page.Resources.Font.iteritems())
for token in self.tokens:
info = dispatch(token)
if info is None:
params.append(token)
continue
func, paraminfo = info
if paraminfo is None:
func(self, token, ())
continue
delta = len(params) - len(paraminfo)
if delta:
if delta < 0:
print 'Operator %s expected %s parameters, got %s' % (token, len(paraminfo), params)
params[:] = []
continue
else:
print "Unparsed parameters/commands:", params[:delta]
del params[:delta]
paraminfo = zip(paraminfo, params)
try:
params[:] = [x(y) for (x,y) in paraminfo]
except:
for i, (x,y) in enumerate(paraminfo):
try:
x(y)
except:
raise # For now
continue
func(self, token, params)
params[:] = []
def debugparser(undisturbed = set('parse_array'.split())):
def debugdispatch():
def getvalue(oldval):
name = oldval[0].__name__
def myfunc(self, token, params):
print '%s called %s(%s)' % (token, name, ', '.join(str(x) for x in params))
if name in undisturbed:
myfunc = oldval[0]
return myfunc, oldval[1]
return dict((x, getvalue(y)) for (x,y) in _ParseClass.dispatch.iteritems())
class _DebugParse(_ParseClass):
dispatch = debugdispatch()
return _DebugParse.parsepage
parsepage = _ParseClass.parsepage
if __name__ == '__main__':
import sys
from pdfreader import PdfReader
parse = debugparser()
fname, = sys.argv[1:]
pdf = PdfReader(fname)
for i, page in enumerate(pdf.pages):
print '\nPage %s ------------------------------------' % i
parse(page)
| {
"repo_name": "lamby/pkg-pdfrw",
"path": "examples/rl2/decodegraphics.py",
"copies": "7",
"size": "11551",
"license": "mit",
"hash": 7086003697870987000,
"line_mean": 29.5582010582,
"line_max": 104,
"alpha_frac": 0.5855770063,
"autogenerated": false,
"ratio": 3.491837968561064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00449747732052069,
"num_lines": 378
} |
from pdfrw.objects.pdfindirect import PdfIndirect
from pdfrw.objects.pdfobject import PdfObject
def _resolved():
pass
class PdfArray(list):
''' A PdfArray maps the PDF file array object into a Python list.
It has an indirect attribute which defaults to False.
'''
indirect = False
def __init__(self, source=[]):
self._resolve = self._resolver
self.extend(source)
def _resolver(self, isinstance=isinstance, enumerate=enumerate,
listiter=list.__iter__,
PdfIndirect=PdfIndirect, resolved=_resolved,
PdfNull=PdfObject('null')):
for index, value in enumerate(list.__iter__(self)):
if isinstance(value, PdfIndirect):
value = value.real_value()
if value is None:
value = PdfNull
self[index] = value
self._resolve = resolved
def __getitem__(self, index, listget=list.__getitem__):
self._resolve()
return listget(self, index)
def __getslice__(self, index, listget=list.__getslice__):
self._resolve()
return listget(self, index)
def __iter__(self, listiter=list.__iter__):
self._resolve()
return listiter(self)
def count(self, item):
self._resolve()
return list.count(self, item)
def index(self, item):
self._resolve()
return list.index(self, item)
def remove(self, item):
self._resolve()
return list.remove(self, item)
def sort(self, *args, **kw):
self._resolve()
return list.sort(self, *args, **kw)
def pop(self, *args):
self._resolve()
return list.pop(self, *args)
| {
"repo_name": "tajtiattila/pdfrw",
"path": "pdfrw/objects/pdfarray.py",
"copies": "4",
"size": "1905",
"license": "mit",
"hash": 2203348472656169200,
"line_mean": 31.2881355932,
"line_max": 69,
"alpha_frac": 0.5769028871,
"autogenerated": false,
"ratio": 4.044585987261146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005423028232317755,
"num_lines": 59
} |
from pdfrw.objects.pdfindirect import PdfIndirect
from pdfrw.objects.pdfobject import PdfObject
def _resolved():
pass
class PdfArray(list):
''' A PdfArray maps the PDF file array object into a Python list.
It has an indirect attribute which defaults to False.
'''
indirect = False
def __init__(self, source=[]):
self._resolve = self._resolver
self.extend(source)
def _resolver(self, isinstance=isinstance, enumerate=enumerate,
listiter=list.__iter__, PdfIndirect=PdfIndirect,
resolved=_resolved, PdfNull=PdfObject('null')):
for index, value in enumerate(list.__iter__(self)):
if isinstance(value, PdfIndirect):
value = value.real_value()
if value is None:
value = PdfNull
self[index] = value
self._resolve = resolved
def __getitem__(self, index, listget=list.__getitem__):
self._resolve()
return listget(self, index)
def __getslice__(self, i, j, listget=list.__getslice__):
self._resolve()
return listget(self, i, j)
def __iter__(self, listiter=list.__iter__):
self._resolve()
return listiter(self)
def count(self, item):
self._resolve()
return list.count(self, item)
def index(self, item):
self._resolve()
return list.index(self, item)
def remove(self, item):
self._resolve()
return list.remove(self, item)
def sort(self, *args, **kw):
self._resolve()
return list.sort(self, *args, **kw)
def pop(self, *args):
self._resolve()
return list.pop(self, *args)
| {
"repo_name": "mzweilin/pdfrw",
"path": "pdfrw/objects/pdfarray.py",
"copies": "1",
"size": "1873",
"license": "mit",
"hash": 1893097362119835400,
"line_mean": 28.265625,
"line_max": 69,
"alpha_frac": 0.5835557928,
"autogenerated": false,
"ratio": 3.951476793248945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5035032586048945,
"avg_score": null,
"num_lines": null
} |
from pdfrw.objects.pdfname import PdfName
from pdfrw.objects.pdfindirect import PdfIndirect
from pdfrw.objects.pdfobject import PdfObject
import copy
class _DictSearch(object):
''' Used to search for inheritable attributes.
'''
def __init__(self, basedict):
self.basedict = basedict
def __getattr__(self, name, PdfName=PdfName):
return self[PdfName(name)]
def __getitem__(self, name, set=set, getattr=getattr, id=id):
visited = set()
mydict = self.basedict
while 1:
value = mydict[name]
if value is not None:
return value
myid = id(mydict)
assert myid not in visited
visited.add(myid)
mydict = mydict.Parent
if mydict is None:
return
class _Private(object):
''' Used to store private attributes (not output to PDF files)
on PdfDict classes
'''
def __init__(self, pdfdict):
vars(self)['pdfdict'] = pdfdict
def __setattr__(self, name, value):
vars(self.pdfdict)[name] = value
class PdfDict(dict):
''' PdfDict objects are subclassed dictionaries
with the following features:
- Every key in the dictionary starts with "/"
- A dictionary item can be deleted by assigning it to None
- Keys that (after the initial "/") conform to Python
naming conventions can also be accessed (set and retrieved)
as attributes of the dictionary. E.g. mydict.Page is the
same thing as mydict['/Page']
- Private attributes (not in the PDF space) can be set
on the dictionary object attribute dictionary by using
the private attribute:
mydict.private.foo = 3
mydict.foo = 5
x = mydict.foo # x will now contain 3
y = mydict['/foo'] # y will now contain 5
Most standard adobe dictionary keys start with an upper case letter,
so to avoid conflicts, it is best to start private attributes with
lower case letters.
- PdfDicts have the following read-only properties:
- private -- as discussed above, provides write access to
dictionary's attributes
- inheritable -- this creates and returns a "view" attribute
that will search through the object hierarchy for
any desired attribute, such as /Rotate or /MediaBox
- PdfDicts also have the following special attributes:
- indirect is not stored in the PDF dictionary, but in the object's
attribute dictionary
- stream is also stored in the object's attribute dictionary
and will also update the stream length.
- _stream will store in the object's attribute dictionary without
updating the stream length.
It is possible, for example, to have a PDF name such as "/indirect"
or "/stream", but you cannot access such a name as an attribute:
mydict.indirect -- accesses object's attribute dictionary
mydict["/indirect"] -- accesses actual PDF dictionary
'''
indirect = False
stream = None
_special = dict(indirect=('indirect', False),
stream=('stream', True),
_stream=('stream', False),
)
def __setitem__(self, name, value, setter=dict.__setitem__):
assert name.startswith('/'), name
if value is not None:
setter(self, name, value)
elif name in self:
del self[name]
def __init__(self, *args, **kw):
if args:
if len(args) == 1:
args = args[0]
self.update(args)
if isinstance(args, PdfDict):
self.indirect = args.indirect
self._stream = args.stream
for key, value in kw.iteritems():
setattr(self, key, value)
def __getattr__(self, name, PdfName=PdfName):
''' If the attribute doesn't exist on the dictionary object,
try to slap a '/' in front of it and get it out
of the actual dictionary itself.
'''
return self.get(PdfName(name))
def get(self, key, dictget=dict.get, isinstance=isinstance,
PdfIndirect=PdfIndirect):
''' Get a value out of the dictionary,
after resolving any indirect objects.
'''
value = dictget(self, key)
# To make the object pickleable.
if value == None:
#print "key:", key
if key.startswith('__') or key.startswith('/__'):
raise AttributeError
if isinstance(value, PdfIndirect):
self[key] = value = value.real_value()
return value
def __getitem__(self, key):
return self.get(key)
def __setattr__(self, name, value, special=_special.get,
PdfName=PdfName, vars=vars):
''' Set an attribute on the dictionary. Handle the keywords
indirect, stream, and _stream specially (for content objects)
'''
info = special(name)
if info is None:
self[PdfName(name)] = value
else:
name, setlen = info
vars(self)[name] = value
if setlen:
notnone = value is not None
self.Length = notnone and PdfObject(len(value)) or None
def iteritems(self, dictiter=dict.iteritems,
isinstance=isinstance, PdfIndirect=PdfIndirect):
''' Iterate over the dictionary, resolving any unresolved objects
'''
for key, value in list(dictiter(self)):
if isinstance(value, PdfIndirect):
self[key] = value = value.real_value()
if value is not None:
assert key.startswith('/'), (key, value)
yield key, value
def items(self):
return list(self.iteritems())
def itervalues(self):
for key, value in self.iteritems():
yield value
def values(self):
return list((value for key, value in self.iteritems()))
def keys(self):
return list((key for key, value in self.iteritems()))
def __iter__(self):
for key, value in self.iteritems():
yield key
def iterkeys(self):
return iter(self)
def copy(self):
return type(self)(self)
def pop(self, key):
value = self.get(key)
del self[key]
return value
def popitem(self):
key, value = dict.pop(self)
if isinstance(value, PdfIndirect):
value = value.real_value()
return value
def inheritable(self):
''' Search through ancestors as needed for inheritable
dictionary items.
NOTE: You might think it would be a good idea
to cache this class, but then you'd have to worry
about it pointing to the wrong dictionary if you
made a copy of the object...
'''
return _DictSearch(self)
inheritable = property(inheritable)
def private(self):
''' Allows setting private metadata for use in
processing (not sent to PDF file).
See note on inheritable
'''
return _Private(self)
private = property(private)
def __deepcopy__(self, memo):
result = PdfDict()
memo[id(self)] = result
for k,v in self.items():
result[copy.deepcopy(k, memo)] = copy.deepcopy(v, memo)
result.stream = self.stream
result.indirect = self.indirect
if self.active_trace != None:
result.active_trace = copy.deepcopy(self.active_trace)
return result
class IndirectPdfDict(PdfDict):
''' IndirectPdfDict is a convenience class. You could
create a direct PdfDict and then set indirect = True on it,
or you could just create an IndirectPdfDict.
'''
indirect = True
| {
"repo_name": "mzweilin/pdfrw",
"path": "pdfrw/objects/pdfdict.py",
"copies": "1",
"size": "8239",
"license": "mit",
"hash": 5301754810966906000,
"line_mean": 32.6285714286,
"line_max": 79,
"alpha_frac": 0.577740017,
"autogenerated": false,
"ratio": 4.468004338394794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005197486643036001,
"num_lines": 245
} |
from pdfrw.objects.pdfname import PdfName
from pdfrw.objects.pdfindirect import PdfIndirect
from pdfrw.objects.pdfobject import PdfObject
class _DictSearch(object):
''' Used to search for inheritable attributes.
'''
def __init__(self, basedict):
self.basedict = basedict
def __getattr__(self, name, PdfName=PdfName):
return self[PdfName(name)]
def __getitem__(self, name, set=set, getattr=getattr, id=id):
visited = set()
mydict = self.basedict
while 1:
value = mydict[name]
if value is not None:
return value
myid = id(mydict)
assert myid not in visited
visited.add(myid)
mydict = mydict.Parent
if mydict is None:
return
class _Private(object):
''' Used to store private attributes (not output to PDF files)
on PdfDict classes
'''
def __init__(self, pdfdict):
vars(self)['pdfdict'] = pdfdict
def __setattr__(self, name, value):
vars(self.pdfdict)[name] = value
class PdfDict(dict):
''' PdfDict objects are subclassed dictionaries with the following features:
- Every key in the dictionary starts with "/"
- A dictionary item can be deleted by assigning it to None
- Keys that (after the initial "/") conform to Python naming conventions
can also be accessed (set and retrieved) as attributes of the dictionary.
E.g. mydict.Page is the same thing as mydict['/Page']
- Private attributes (not in the PDF space) can be set on the dictionary
object attribute dictionary by using the private attribute:
mydict.private.foo = 3
mydict.foo = 5
x = mydict.foo # x will now contain 3
y = mydict['/foo'] # y will now contain 5
Most standard adobe dictionary keys start with an upper case letter,
so to avoid conflicts, it is best to start private attributes with
lower case letters.
- PdfDicts have the following read-only properties:
- private -- as discussed above, provides write access to dictionary's
attributes
- inheritable -- this creates and returns a "view" attribute that
will search through the object hierarchy for any desired
attribute, such as /Rotate or /MediaBox
- PdfDicts also have the following special attributes:
- indirect is not stored in the PDF dictionary, but in the object's
attribute dictionary
- stream is also stored in the object's attribute dictionary
and will also update the stream length.
- _stream will store in the object's attribute dictionary without
updating the stream length.
It is possible, for example, to have a PDF name such as "/indirect"
or "/stream", but you cannot access such a name as an attribute:
mydict.indirect -- accesses object's attribute dictionary
mydict["/indirect"] -- accesses actual PDF dictionary
'''
indirect = False
stream = None
_special = dict(indirect=('indirect', False),
stream=('stream', True),
_stream=('stream', False),
)
whitespace = '\x00 \t\f'
delimiters = r'()<>{}[\]/%'
forbidden = whitespace + delimiters
def __setitem__(self, name, value, setter=dict.__setitem__):
assert name.startswith('/'), name
assert not any((c in self.forbidden) for c in name[1:]), name
if value is not None:
setter(self, name, value)
elif name in self:
del self[name]
def __init__(self, *args, **kw):
if args:
if len(args) == 1:
args = args[0]
self.update(args)
if isinstance(args, PdfDict):
self.indirect = args.indirect
self._stream = args.stream
for key, value in kw.iteritems():
setattr(self, key, value)
def __getattr__(self, name, PdfName=PdfName):
''' If the attribute doesn't exist on the dictionary object,
try to slap a '/' in front of it and get it out
of the actual dictionary itself.
'''
return self.get(PdfName(name))
def get(self, key, dictget=dict.get, isinstance=isinstance,
PdfIndirect=PdfIndirect):
''' Get a value out of the dictionary, after resolving any indirect
objects.
'''
value = dictget(self, key)
if isinstance(value, PdfIndirect):
self[key] = value = value.real_value()
return value
def __getitem__(self, key):
return self.get(key)
def __setattr__(self, name, value, special=_special.get, PdfName=PdfName,
vars=vars):
''' Set an attribute on the dictionary. Handle the keywords
indirect, stream, and _stream specially (for content objects)
'''
info = special(name)
if info is None:
self[PdfName(name)] = value
else:
name, setlen = info
vars(self)[name] = value
if setlen:
notnone = value is not None
self.Length = notnone and PdfObject(len(value)) or None
def iteritems(self, dictiter=dict.iteritems, isinstance=isinstance,
PdfIndirect=PdfIndirect):
''' Iterate over the dictionary, resolving any unresolved objects
'''
for key, value in list(dictiter(self)):
if isinstance(value, PdfIndirect):
self[key] = value = value.real_value()
if value is not None:
assert key.startswith('/'), (key, value)
yield key, value
def items(self):
return list(self.iteritems())
def itervalues(self):
for key, value in self.iteritems():
yield value
def values(self):
return list((value for key, value in self.iteritems()))
def keys(self):
return list((key for key, value in self.iteritems()))
def __iter__(self):
for key, value in self.iteritems():
yield key
def iterkeys(self):
return iter(self)
def copy(self):
return type(self)(self)
def pop(self, key):
value = self.get(key)
del self[key]
return value
def popitem(self):
key, value = dict.pop(self)
if isinstance(value, PdfIndirect):
value = value.real_value()
return value
def inheritable(self):
''' Search through ancestors as needed for inheritable
dictionary items.
NOTE: You might think it would be a good idea
to cache this class, but then you'd have to worry
about it pointing to the wrong dictionary if you
made a copy of the object...
'''
return _DictSearch(self)
inheritable = property(inheritable)
def private(self):
''' Allows setting private metadata for use in
processing (not sent to PDF file).
See note on inheritable
'''
return _Private(self)
private = property(private)
class IndirectPdfDict(PdfDict):
''' IndirectPdfDict is a convenience class. You could
create a direct PdfDict and then set indirect = True on it,
or you could just create an IndirectPdfDict.
'''
indirect = True
| {
"repo_name": "zhzhzoo/pdfrw",
"path": "pdfrw/objects/pdfdict.py",
"copies": "1",
"size": "7771",
"license": "mit",
"hash": 6856068111144022000,
"line_mean": 33.3849557522,
"line_max": 83,
"alpha_frac": 0.582035774,
"autogenerated": false,
"ratio": 4.4481969089868345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5530232682986834,
"avg_score": null,
"num_lines": null
} |
import re
class PdfString(str):
''' A PdfString is an encoded string. It has a decode
method to get the actual string data out, and there
is an encode class method to create such a string.
Like any PDF object, it could be indirect, but it
defaults to being a direct object.
'''
indirect = False
unescape_dict = {'\\b':'\b', '\\f':'\f', '\\n':'\n',
'\\r':'\r', '\\t':'\t',
'\\\r\n': '', '\\\r':'', '\\\n':'',
'\\\\':'\\', '\\':'',
}
unescape_pattern = r'(\\\\|\\b|\\f|\\n|\\r|\\t|\\\r\n|\\\r|\\\n|\\[0-9]+|\\)'
unescape_func = re.compile(unescape_pattern).split
hex_pattern = '([a-fA-F0-9][a-fA-F0-9]|[a-fA-F0-9])'
hex_func = re.compile(hex_pattern).split
hex_pattern2 = '([a-fA-F0-9][a-fA-F0-9][a-fA-F0-9][a-fA-F0-9]|[a-fA-F0-9][a-fA-F0-9]|[a-fA-F0-9])'
hex_func2 = re.compile(hex_pattern2).split
hex_funcs = hex_func, hex_func2
def decode_regular(self, remap=chr):
assert self[0] == '(' and self[-1] == ')'
mylist = self.unescape_func(self[1:-1])
result = []
unescape = self.unescape_dict.get
for chunk in mylist:
chunk = unescape(chunk, chunk)
if chunk.startswith('\\') and len(chunk) > 1:
value = int(chunk[1:], 8)
# FIXME: TODO: Handle unicode here
if value > 127:
value = 127
chunk = remap(value)
if chunk:
result.append(chunk)
return ''.join(result)
def decode_hex(self, remap=chr, twobytes=False):
data = ''.join(self.split())
data = self.hex_funcs[twobytes](data)
chars = data[1::2]
other = data[0::2]
assert other[0] == '<' and other[-1] == '>' and ''.join(other) == '<>', self
return ''.join([remap(int(x, 16)) for x in chars])
def decode(self, remap=chr, twobytes=False):
if self.startswith('('):
return self.decode_regular(remap)
else:
return self.decode_hex(remap, twobytes)
def encode(cls, source, usehex=False):
assert not usehex, "Not supported yet"
if isinstance(source, unicode):
source = source.encode('utf-8')
else:
source = str(source)
source = source.replace('\\', '\\\\')
source = source.replace('(', '\\(')
source = source.replace(')', '\\)')
return cls('(' +source + ')')
encode = classmethod(encode)
| {
"repo_name": "ralsina/pdfrw",
"path": "pdfrw/objects/pdfstring.py",
"copies": "4",
"size": "2710",
"license": "mit",
"hash": -8005200650202152000,
"line_mean": 36.1232876712,
"line_max": 102,
"alpha_frac": 0.5121771218,
"autogenerated": false,
"ratio": 3.4260429835651074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5938220105365107,
"avg_score": null,
"num_lines": null
} |
import re
class PdfString(str):
''' A PdfString is an encoded string. It has a decode
method to get the actual string data out, and there
is an encode class method to create such a string.
Like any PDF object, it could be indirect, but it
defaults to being a direct object.
'''
indirect = False
unescape_dict = {'\\b': '\b', '\\f': '\f', '\\n': '\n',
'\\r': '\r', '\\t': '\t',
'\\\r\n': '', '\\\r': '', '\\\n': '',
'\\\\': '\\', '\\': '',
}
unescape_pattern = \
r'(\\\\|\\b|\\f|\\n|\\r|\\t|\\\r\n|\\\r|\\\n|\\[0-9]+|\\)'
unescape_func = re.compile(unescape_pattern).split
hex_pattern = '([a-fA-F0-9][a-fA-F0-9]|[a-fA-F0-9])'
hex_func = re.compile(hex_pattern).split
hex_pattern2 = ('([a-fA-F0-9][a-fA-F0-9][a-fA-F0-9][a-fA-F0-9]|'
'[a-fA-F0-9][a-fA-F0-9]|[a-fA-F0-9])')
hex_func2 = re.compile(hex_pattern2).split
hex_funcs = hex_func, hex_func2
def decode_regular(self, remap=chr):
assert self[0] == '(' and self[-1] == ')'
mylist = self.unescape_func(self[1:-1])
result = []
unescape = self.unescape_dict.get
for chunk in mylist:
chunk = unescape(chunk, chunk)
if chunk.startswith('\\') and len(chunk) > 1:
value = int(chunk[1:], 8)
# FIXME: TODO: Handle unicode here
if value > 127:
value = 127
chunk = remap(value)
if chunk:
result.append(chunk)
return ''.join(result)
def decode_hex(self, remap=chr, twobytes=False):
data = ''.join(self.split())
data = self.hex_funcs[twobytes](data)
chars = data[1::2]
other = data[0::2]
assert other[0] == '<' and other[-1] == '>' and \
''.join(other) == '<>', self
return ''.join([remap(int(x, 16)) for x in chars])
def decode(self, remap=chr, twobytes=False):
if self.startswith('('):
return self.decode_regular(remap)
else:
return self.decode_hex(remap, twobytes)
def encode(cls, source, usehex=False):
assert not usehex, "Not supported yet"
if isinstance(source, unicode):
source = source.encode('utf-8')
else:
source = str(source)
source = source.replace('\\', '\\\\')
source = source.replace('(', '\\(')
source = source.replace(')', '\\)')
return cls('(' + source + ')')
encode = classmethod(encode)
| {
"repo_name": "zhzhzoo/pdfrw",
"path": "pdfrw/objects/pdfstring.py",
"copies": "1",
"size": "2771",
"license": "mit",
"hash": -1354004805640416000,
"line_mean": 34.987012987,
"line_max": 68,
"alpha_frac": 0.5009022014,
"autogenerated": false,
"ratio": 3.476787954830615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4477690156230615,
"avg_score": null,
"num_lines": null
} |
'''
A tokenizer for PDF streams.
In general, documentation used was "PDF reference",
sixth edition, for PDF version 1.7, dated November 2006.
'''
from __future__ import generators
import re
import itertools
from pdfrw.objects import PdfString, PdfObject
from pdfrw.errors import log, PdfParseError
def linepos(fdata, loc):
line = fdata.count('\n', 0, loc) + 1
line += fdata.count('\r', 0, loc) - fdata.count('\r\n', 0, loc)
col = loc - max(fdata.rfind('\n', 0, loc), fdata.rfind('\r', 0, loc))
return line, col
class PdfTokens(object):
# Table 3.1, page 50 of reference, defines whitespace
eol = '\n\r'
whitespace = '\x00 \t\f' + eol
# Text on page 50 defines delimiter characters
# Escape the ]
delimiters = r'()<>{}[\]/%'
# "normal" stuff is all but delimiters or whitespace.
p_normal = r'(?:[^\\%s%s]+|\\[^%s])+' % (whitespace,
delimiters, whitespace)
p_comment = r'\%%[^%s]*' % eol
# This will get the bulk of literal strings.
p_literal_string = r'\((?:[^\\()]+|\\.)*[()]?'
# This will get more pieces of literal strings
# (Don't ask me why, but it hangs without the trailing ?.)
p_literal_string_extend = r'(?:[^\\()]+|\\.)*[()]?'
# A hex string. This one's easy.
p_hex_string = r'\<[%s0-9A-Fa-f]*\>' % whitespace
p_dictdelim = r'\<\<|\>\>'
p_name = r'/[^%s%s]*' % (delimiters, whitespace)
p_catchall = '[^%s]' % whitespace
pattern = '|'.join([p_normal, p_name, p_hex_string, p_dictdelim,
p_literal_string, p_comment, p_catchall])
findtok = re.compile('(%s)[%s]*' %
(pattern, whitespace), re.DOTALL).finditer
findparen = re.compile('(%s)[%s]*' %
(p_literal_string_extend, whitespace), re.DOTALL).finditer
splitname = re.compile(r'\#([0-9A-Fa-f]{2})').split
def _cacheobj(cache, obj, constructor):
''' This caching relies on the constructors
returning something that will compare as
equal to the original obj. This works
fine with our PDF objects.
'''
result = cache.get(obj)
if result is None:
result = constructor(obj)
cache[result] = result
return result
def fixname(self, cache, token, constructor, splitname=splitname,
join=''.join, cacheobj=_cacheobj):
''' Inside name tokens, a '#' character indicates that
the next two bytes are hex characters to be used
to form the 'real' character.
'''
substrs = splitname(token)
if '#' in join(substrs[::2]):
self.warning('Invalid /Name token')
return token
substrs[1::2] = (chr(int(x, 16)) for x in substrs[1::2])
result = cacheobj(cache, join(substrs), constructor)
result.encoded = token
return result
def _gettoks(self, startloc, cacheobj=_cacheobj, delimiters=delimiters,
findtok=findtok, findparen=findparen, PdfString=PdfString,
PdfObject=PdfObject):
''' Given a source data string and a location inside it,
gettoks generates tokens. Each token is a tuple of the form:
<starting file loc>, <ending file loc>, <token string>
The ending file loc is past any trailing whitespace.
The main complication here is the literal strings, which
can contain nested parentheses. In order to cope with these
we can discard the current iterator and loop back to the
top to get a fresh one.
We could use re.search instead of re.finditer, but that's slower.
'''
fdata = self.fdata
current = self.current = [(startloc, startloc)]
namehandler = (cacheobj, self.fixname)
cache = {}
while 1:
for match in findtok(fdata, current[0][1]):
current[0] = tokspan = match.span()
token = match.group(1)
firstch = token[0]
if firstch not in delimiters:
token = cacheobj(cache, token, PdfObject)
elif firstch in '/<(%':
if firstch == '/':
# PDF Name
token = namehandler['#' in token](cache, token,
PdfObject)
elif firstch == '<':
# << dict delim, or < hex string >
if token[1:2] != '<':
token = cacheobj(cache, token, PdfString)
elif firstch == '(':
# Literal string
# It's probably simple, but maybe not
# Nested parentheses are a bear, and if
# they are present, we exit the for loop
# and get back in with a new starting location.
ends = None # For broken strings
if fdata[match.end(1) - 1] != ')':
nest = 2
m_start, loc = tokspan
for match in findparen(fdata, loc):
loc = match.end(1)
ending = fdata[loc - 1] == ')'
nest += 1 - ending * 2
if not nest:
break
if ending and ends is None:
ends = loc, match.end(), nest
token = fdata[m_start:loc]
current[0] = m_start, match.end()
if nest:
# There is one possible recoverable error seen in
# the wild -- some stupid generators don't escape (.
# If this happens, just terminate on first unescaped ).
# The string won't be quite right, but that's a science
# fair project for another time.
(self.error, self.exception)[not ends]('Unterminated literal string')
loc, ends, nest = ends
token = fdata[m_start:loc] + ')' * nest
current[0] = m_start, ends
token = cacheobj(cache, token, PdfString)
elif firstch == '%':
# Comment
if self.strip_comments:
continue
else:
self.exception(('Tokenizer logic incorrect --'
' should never get here'))
yield token
if current[0] is not tokspan:
break
else:
if self.strip_comments:
break
raise StopIteration
def __init__(self, fdata, startloc=0, strip_comments=True):
self.fdata = fdata
self.strip_comments = strip_comments
self.iterator = iterator = self._gettoks(startloc)
self.next = iterator.next
def setstart(self, startloc):
''' Change the starting location.
'''
current = self.current
if startloc != current[0][1]:
current[0] = startloc, startloc
def floc(self):
''' Return the current file position
(where the next token will be retrieved)
'''
return self.current[0][1]
floc = property(floc, setstart)
def tokstart(self):
''' Return the file position of the most
recently retrieved token.
'''
return self.current[0][0]
tokstart = property(tokstart, setstart)
def __iter__(self):
return self.iterator
def multiple(self, count, islice=itertools.islice, list=list):
''' Retrieve multiple tokens
'''
return list(islice(self, count))
def next_default(self, default='nope'):
for result in self:
return result
return default
def msg(self, msg, *arg):
if arg:
msg %= arg
fdata = self.fdata
begin, end = self.current[0]
line, col = linepos(fdata, begin)
if end > begin:
tok = fdata[begin:end].rstrip()
if len(tok) > 30:
tok = tok[:26] + ' ...'
return '%s (line=%d, col=%d, token=%s)' % \
(msg, line, col, repr(tok))
return '%s (line=%d, col=%d)' % (msg, line, col)
def warning(self, *arg):
log.warning(self.msg(*arg))
def error(self, *arg):
log.error(self.msg(*arg))
def exception(self, *arg):
raise PdfParseError(self.msg(*arg))
| {
"repo_name": "nrcmedia/pdfrw",
"path": "pdfrw/tokens.py",
"copies": "1",
"size": "9043",
"license": "mit",
"hash": 61706952555355980,
"line_mean": 36.9957983193,
"line_max": 101,
"alpha_frac": 0.5011611191,
"autogenerated": false,
"ratio": 4.295961995249407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5297123114349407,
"avg_score": null,
"num_lines": null
} |
'''
This module contains code to build PDF "Form XObjects".
A Form XObject allows a fragment from one PDF file to be cleanly
included in another PDF file.
Reference for syntax: "Parameters for opening PDF files" from SDK 8.1
http://www.adobe.com/devnet/acrobat/pdfs/pdf_open_parameters.pdf
supported 'page=xxx', 'viewrect=<left>,<top>,<width>,<height>'
Also supported by this, but not by Adobe:
'rotate=xxx' where xxx in [0, 90, 180, 270]
Units are in points
Reference for content: Adobe PDF reference, sixth edition, version 1.7
http://www.adobe.com/devnet/acrobat/pdfs/pdf_reference_1-7.pdf
Form xobjects discussed chapter 4.9, page 355
'''
from pdfrw.objects import PdfDict, PdfArray, PdfName
from pdfrw.pdfreader import PdfReader
from pdfrw.errors import log
from pdfrw.uncompress import uncompress
class ViewInfo(object):
''' Instantiate ViewInfo with a uri, and it will parse out
the filename, page, and viewrect into object attributes.
'''
doc = None
docname = None
page = None
viewrect = None
rotate = None
def __init__(self, pageinfo='', **kw):
pageinfo = pageinfo.split('#', 1)
if len(pageinfo) == 2:
pageinfo[1:] = pageinfo[1].replace('&', '#').split('#')
for key in 'page viewrect'.split():
if pageinfo[0].startswith(key + '='):
break
else:
self.docname = pageinfo.pop(0)
for item in pageinfo:
key, value = item.split('=')
key = key.strip()
value = value.replace(',', ' ').split()
if key in ('page', 'rotate'):
assert len(value) == 1
setattr(self, key, int(value[0]))
elif key == 'viewrect':
assert len(value) == 4
setattr(self, key, [float(x) for x in value])
else:
log.error('Unknown option: %s', key)
for key, value in kw.iteritems():
assert hasattr(self, key), key
setattr(self, key, value)
def get_rotation(rotate):
''' Return clockwise rotation code:
0 = unrotated
1 = 90 degrees
2 = 180 degrees
3 = 270 degrees
'''
try:
rotate = int(rotate)
except (ValueError, TypeError):
return 0
if rotate % 90 != 0:
return 0
return rotate / 90
def rotate_point(point, rotation):
''' Rotate an (x,y) coordinate clockwise by a
rotation code specifying a multiple of 90 degrees.
'''
if rotation & 1:
point = point[1], -point[0]
if rotation & 2:
point = -point[0], -point[1]
return point
def rotate_rect(rect, rotation):
''' Rotate both points within the rectangle, then normalize
the rectangle by returning the new lower left, then new
upper right.
'''
rect = rotate_point(rect[:2], rotation) + rotate_point(rect[2:], rotation)
return (min(rect[0], rect[2]), min(rect[1], rect[3]),
max(rect[0], rect[2]), max(rect[1], rect[3]))
def getrects(inheritable, pageinfo, rotation):
''' Given the inheritable attributes of a page and
the desired pageinfo rectangle, return the page's
media box and the calculated boundary (clip) box.
'''
mbox = tuple([float(x) for x in inheritable.MediaBox])
vrect = pageinfo.viewrect
if vrect is None:
cbox = tuple([float(x) for x in (inheritable.CropBox or mbox)])
else:
# Rotate the media box to match what the user sees,
# figure out the clipping box, then rotate back
mleft, mbot, mright, mtop = rotate_rect(mbox, rotation)
x, y, w, h = vrect
cleft = mleft + x
ctop = mtop - y
cright = cleft + w
cbot = ctop - h
cbox = (max(mleft, cleft), max(mbot, cbot),
min(mright, cright), min(mtop, ctop))
cbox = rotate_rect(cbox, -rotation)
return mbox, cbox
def _cache_xobj(contents, resources, mbox, bbox, rotation):
''' Return a cached Form XObject, or create a new one and cache it.
Adds private members x, y, w, h
'''
cachedict = contents.xobj_cachedict
if cachedict is None:
cachedict = contents.private.xobj_cachedict = {}
cachekey = mbox, bbox, rotation
result = cachedict.get(cachekey)
if result is None:
func = (_get_fullpage, _get_subpage)[mbox != bbox]
result = PdfDict(
func(contents, resources, mbox, bbox, rotation),
Type=PdfName.XObject,
Subtype=PdfName.Form,
FormType=1,
BBox=PdfArray(bbox),
)
rect = bbox
if rotation:
matrix = (rotate_point((1, 0), rotation) +
rotate_point((0, 1), rotation))
result.Matrix = PdfArray(matrix + (0, 0))
rect = rotate_rect(rect, rotation)
result.private.x = rect[0]
result.private.y = rect[1]
result.private.w = rect[2] - rect[0]
result.private.h = rect[3] - rect[1]
cachedict[cachekey] = result
return result
def _get_fullpage(contents, resources, mbox, bbox, rotation):
''' fullpage is easy. Just copy the contents,
set up the resources, and let _cache_xobj handle the
rest.
'''
return PdfDict(contents, Resources=resources)
def _get_subpage(contents, resources, mbox, bbox, rotation):
''' subpages *could* be as easy as full pages, but we
choose to complicate life by creating a Form XObject
for the page, and then one that references it for
the subpage, on the off-chance that we want multiple
items from the page.
'''
return PdfDict(
stream='/FullPage Do\n',
Resources=PdfDict(
XObject=PdfDict(
FullPage=_cache_xobj(contents, resources, mbox, mbox, 0)
)
)
)
def pagexobj(page, viewinfo=ViewInfo(), allow_compressed=True):
''' pagexobj creates and returns a Form XObject for
a given view within a page (Defaults to entire page.)
'''
inheritable = page.inheritable
resources = inheritable.Resources
rotation = get_rotation(inheritable.Rotate)
mbox, bbox = getrects(inheritable, viewinfo, rotation)
rotation += get_rotation(viewinfo.rotate)
if isinstance(page.Contents, PdfArray):
if len(page.Contents) == 1:
contents = page.Contents[0]
else:
# decompress and join multiple streams
contlist = [c for c in page.Contents]
uncompress(contlist)
stream = '\n'.join([c.stream for c in contlist])
contents = PdfDict(stream=stream)
else:
contents = page.Contents
# Make sure the only attribute is length
# All the filters must have been executed
assert int(contents.Length) == len(contents.stream)
if not allow_compressed:
assert len([x for x in contents.iteritems()]) == 1
return _cache_xobj(contents, resources, mbox, bbox, rotation)
def docxobj(pageinfo, doc=None, allow_compressed=True):
''' docxobj creates and returns an actual Form XObject.
Can work standalone, or in conjunction with
the CacheXObj class (below).
'''
if not isinstance(pageinfo, ViewInfo):
pageinfo = ViewInfo(pageinfo)
# If we're explicitly passed a document,
# make sure we don't have one implicitly as well.
# If no implicit or explicit doc, then read one in
# from the filename.
if doc is not None:
assert pageinfo.doc is None
pageinfo.doc = doc
elif pageinfo.doc is not None:
doc = pageinfo.doc
else:
doc = pageinfo.doc = PdfReader(pageinfo.docname,
decompress=not allow_compressed)
assert isinstance(doc, PdfReader)
sourcepage = doc.pages[(pageinfo.page or 1) - 1]
return pagexobj(sourcepage, pageinfo, allow_compressed)
class CacheXObj(object):
''' Use to keep from reparsing files over and over,
and to keep from making the output too much
bigger than it ought to be by replicating
unnecessary object copies.
'''
def __init__(self, decompress=False):
''' Set decompress true if you need
the Form XObjects to be decompressed.
Will decompress what it can and scream
about the rest.
'''
self.cached_pdfs = {}
self.decompress = decompress
def load(self, sourcename):
''' Load a Form XObject from a uri
'''
info = ViewInfo(sourcename)
fname = info.docname
pcache = self.cached_pdfs
doc = pcache.get(fname)
if doc is None:
doc = pcache[fname] = PdfReader(fname, decompress=self.decompress)
return docxobj(info, doc, allow_compressed=not self.decompress)
| {
"repo_name": "zhzhzoo/pdfrw",
"path": "pdfrw/buildxobj.py",
"copies": "1",
"size": "9067",
"license": "mit",
"hash": 4812321596284411000,
"line_mean": 32.5814814815,
"line_max": 78,
"alpha_frac": 0.6030660637,
"autogenerated": false,
"ratio": 3.827353313634445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9930419377334445,
"avg_score": 0,
"num_lines": 270
} |
# A patched version of QMessageBox that allows copying the error
from __future__ import absolute_import, division, print_function
import os
from qtpy import QtWidgets, QtGui
__all__ = ['QMessageBoxPatched']
class QMessageBoxPatched(QtWidgets.QMessageBox):
def __init__(self, *args, **kwargs):
super(QMessageBoxPatched, self).__init__(*args, **kwargs)
copy_action = QtWidgets.QAction('&Copy', self)
copy_action.setShortcut(QtGui.QKeySequence.Copy)
copy_action.triggered.connect(self.copy_detailed)
select_all = QtWidgets.QAction('Select &All', self)
select_all.setShortcut(QtGui.QKeySequence.SelectAll)
select_all.triggered.connect(self.select_all)
menubar = QtWidgets.QMenuBar()
editMenu = menubar.addMenu('&Edit')
editMenu.addAction(copy_action)
editMenu.addAction(select_all)
self.layout().setMenuBar(menubar)
@property
def detailed_text_widget(self):
return self.findChild(QtWidgets.QTextEdit)
def select_all(self):
self.detailed_text_widget.selectAll()
def copy_detailed(self):
clipboard = QtWidgets.QApplication.clipboard()
selected_text = self.detailed_text_widget.textCursor().selectedText()
# Newlines are unicode, so need to normalize them to ASCII
selected_text = os.linesep.join(selected_text.splitlines())
clipboard.setText(selected_text)
| {
"repo_name": "saimn/glue",
"path": "glue/utils/qt/qmessagebox_widget.py",
"copies": "1",
"size": "1438",
"license": "bsd-3-clause",
"hash": 7266451257069533000,
"line_mean": 30.9555555556,
"line_max": 77,
"alpha_frac": 0.6870653686,
"autogenerated": false,
"ratio": 3.961432506887052,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 45
} |
# A patched version of QMessageBox that allows copying the error
import os
from ...external.qt import QtGui
__all__ = ['QMessageBoxPatched']
class QMessageBoxPatched(QtGui.QMessageBox):
def __init__(self, *args, **kwargs):
super(QMessageBoxPatched, self).__init__(*args, **kwargs)
copy_action = QtGui.QAction('&Copy', self)
copy_action.setShortcut(QtGui.QKeySequence.Copy)
copy_action.triggered.connect(self.copy_detailed)
select_all = QtGui.QAction('Select &All', self)
select_all.setShortcut(QtGui.QKeySequence.SelectAll)
select_all.triggered.connect(self.select_all)
menubar = QtGui.QMenuBar()
editMenu = menubar.addMenu('&Edit')
editMenu.addAction(copy_action)
editMenu.addAction(select_all)
self.layout().setMenuBar(menubar)
@property
def detailed_text_widget(self):
return self.findChild(QtGui.QTextEdit)
def select_all(self):
self.detailed_text_widget.selectAll()
def copy_detailed(self):
clipboard = QtGui.QApplication.clipboard()
selected_text = self.detailed_text_widget.textCursor().selectedText()
# Newlines are unicode, so need to normalize them to ASCII
selected_text = os.linesep.join(selected_text.splitlines())
clipboard.setText(selected_text)
| {
"repo_name": "JudoWill/glue",
"path": "glue/utils/qt/qmessagebox_widget.py",
"copies": "1",
"size": "1346",
"license": "bsd-3-clause",
"hash": -909899314851793200,
"line_mean": 31.0476190476,
"line_max": 77,
"alpha_frac": 0.676077266,
"autogenerated": false,
"ratio": 3.8901734104046244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 42
} |
"""A patch to the datetime module."""
from .base_patcher import BasePatcher
import sys
import datetime
try:
import copy_reg as copyreg
except ImportError:
import copyreg
_real_datetime = datetime.datetime
_real_date = datetime.date
def with_metaclass(meta, name, *bases):
"""Create a base class with a metaclass."""
return meta(name, bases, {})
class DateSubclassMeta(type):
"""Date mock metaclass to check instancechek to the real class."""
@classmethod
def __instancecheck__(mcs, obj):
return isinstance(obj, _real_date)
class DatetimeSubclassMeta(DateSubclassMeta):
"""Datetime mock metaclass to check instancechek to the real class."""
@classmethod
def __instancecheck__(mcs, obj):
return isinstance(obj, _real_datetime)
def date_to_fakedate(date):
"""Return mocked datetime object from original one."""
return FakeDate(date.year,
date.month,
date.day)
def datetime_to_fakedatetime(datetime):
"""Return mocked datetime object from original one."""
return FakeDatetime(datetime.year,
datetime.month,
datetime.day,
datetime.hour,
datetime.minute,
datetime.second,
datetime.microsecond,
datetime.tzinfo)
class FakeDate(with_metaclass(DateSubclassMeta, 'date', _real_date)):
"""Mocked datetime.date class."""
def __new__(cls, *args, **kwargs):
"""Return a new mocked date object."""
return _real_date.__new__(cls, *args, **kwargs)
@classmethod
def today(cls):
"""Return today's date."""
result = cls._now()
return date_to_fakedate(result)
FakeDate.min = date_to_fakedate(_real_date.min)
FakeDate.max = date_to_fakedate(_real_date.max)
class FakeDatetime(with_metaclass(DatetimeSubclassMeta, 'datetime',
_real_datetime, FakeDate)):
"""Mocked datetime.datetime class."""
def __new__(cls, *args, **kwargs):
"""Return a new mocked datetime object."""
return _real_datetime.__new__(cls, *args, **kwargs)
@classmethod
def now(cls, tz=None):
"""Return a datetime object representing current time."""
now = cls._now()
if tz:
result = tz.fromutc(now.replace(tzinfo=tz)) +\
datetime.timedelta(hours=cls._tz_offset())
else:
result = now
return datetime_to_fakedatetime(result)
@classmethod
def today(cls):
"""Return a datetime object representing current time."""
return cls.now(tz=None)
@classmethod
def utcnow(cls):
"""Return a datetime object representing current time."""
result = cls._now()
return datetime_to_fakedatetime(result)
FakeDatetime.min = datetime_to_fakedatetime(_real_datetime.min)
FakeDatetime.max = datetime_to_fakedatetime(_real_datetime.max)
def pickle_fake_date(datetime_):
"""Pickle function for FakeDate."""
return FakeDate, (
datetime_.year,
datetime_.month,
datetime_.day,
)
def pickle_fake_datetime(datetime_):
"""Pickle function for FakeDatetime."""
return FakeDatetime, (
datetime_.year,
datetime_.month,
datetime_.day,
datetime_.hour,
datetime_.minute,
datetime_.second,
datetime_.microsecond,
datetime_.tzinfo,
)
class DatetimePatcher(BasePatcher):
"""Patcher of the datetime module.
patching:
- datetime.today
- datetime.now
- datetime.utcnow
- date.today
"""
def __init__(self, **kwargs):
"""Create the patcher."""
super(DatetimePatcher, self).__init__(patcher_module=__name__,
**kwargs)
FakeDate._now = self._now
FakeDatetime._now = self._now
def get_patched_module(self):
"""Return the actual module obect to be patched."""
return datetime
def get_patch_actions(self):
"""Return list of the patches to do."""
return [
('date', _real_date, FakeDate),
('datetime', _real_datetime, FakeDatetime)
]
def start(self):
"""Change pickle function for datetime to handle mocked datetime."""
super(DatetimePatcher, self).start()
copyreg.dispatch_table[_real_datetime] = pickle_fake_datetime
copyreg.dispatch_table[_real_date] = pickle_fake_date
def stop(self):
"""Return pickle behavior to normal."""
copyreg.dispatch_table.pop(_real_datetime)
copyreg.dispatch_table.pop(_real_date)
super(DatetimePatcher, self).stop()
def _now(self):
return _real_datetime.fromtimestamp(self.clock.time)
| {
"repo_name": "snudler6/time-travel",
"path": "src/time_travel/patchers/datetime_patcher.py",
"copies": "1",
"size": "4881",
"license": "mit",
"hash": 6845699356640046000,
"line_mean": 26.4213483146,
"line_max": 76,
"alpha_frac": 0.5972136857,
"autogenerated": false,
"ratio": 4.248041775456919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 178
} |
"""A patch to the select.select function."""
from .base_patcher import BasePatcher
import select as select_lib
from enum import Enum
class SelectPatcher(BasePatcher):
"""Patcher for select.select."""
EVENTS_NAMESPACE = 'select'
EventTypes = Enum('select', ['READ', 'WRITE', 'EXCEPTIONAL'])
def __init__(self, *args, **kwargs):
"""Create the patch."""
super(SelectPatcher, self).__init__(*args, **kwargs)
def get_patch_actions(self):
"""Return generator containing all patches to do."""
return [('select', select_lib.select, self._mocked_select)]
def get_patched_module(self):
"""Return the actual module obect to be patched."""
return select_lib
@classmethod
def get_events_namespace(cls):
"""Return the namespace of the select events."""
return cls.EVENTS_NAMESPACE
@classmethod
def get_event_types(cls):
"""Return Enum of select events types."""
return cls.EventTypes
@staticmethod
def _list_intersection(list1, list2):
return list(set(list1).intersection(set(list2)))
def _get_earliest_events(self, waited_fds, event, timeout):
added_timeout = float('inf') if timeout is None else timeout
timeout_timestamp = self.clock.time + added_timeout
def _is_relevant_fd_event(fd, evt):
return fd in waited_fds and evt == event
# fd_events is a list of [(fd, set(events)), ...].
ts, fd_events = self.event_pool.get_next_event(
_is_relevant_fd_event)
if ts is None or ts > timeout_timestamp:
return timeout_timestamp, []
else:
return ts, [fd for fd, _ in fd_events]
def _mocked_select(self, rlist, wlist, xlist, timeout=None):
read_timestamp, read_fds = self._get_earliest_events(
rlist,
self.EventTypes.READ,
timeout)
write_timestamp, write_fds = self._get_earliest_events(
wlist,
self.EventTypes.WRITE,
timeout)
ex_timestamp, ex_fds = self._get_earliest_events(
xlist,
self.EventTypes.EXCEPTIONAL,
timeout)
timestamp = min([read_timestamp,
write_timestamp,
ex_timestamp])
if timestamp == float('inf'):
raise ValueError('No relevant future events were set for infinite '
'timout')
read_fds = [] if timestamp < read_timestamp else read_fds
write_fds = [] if timestamp < write_timestamp else write_fds
ex_fds = [] if timestamp < ex_timestamp else ex_fds
self.event_pool.remove_events_from_fds(
timestamp,
[(fd, self.EventTypes.READ) for fd in read_fds])
self.event_pool.remove_events_from_fds(
timestamp,
[(fd, self.EventTypes.WRITE) for fd in write_fds])
self.event_pool.remove_events_from_fds(
timestamp,
[(fd, self.EventTypes.EXCEPTIONAL) for fd in ex_fds])
self.clock.time = timestamp
return read_fds, write_fds, ex_fds
| {
"repo_name": "snudler6/time-travel",
"path": "src/time_travel/patchers/select_patcher.py",
"copies": "1",
"size": "3161",
"license": "mit",
"hash": 2262469886567089200,
"line_mean": 31.9270833333,
"line_max": 79,
"alpha_frac": 0.5890540968,
"autogenerated": false,
"ratio": 4.0629820051413885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5152036101941389,
"avg_score": null,
"num_lines": null
} |
# A * Pathfinding Algorithm 🌟
# Implemented with pygame, this script will find the shortest distance between two nodes using A * Algorithm 🎮
# Instructions / Keys Functionalities:
# -Left Click to add start and end nodes
# -Right Click to remove the nodes
# -Space Bar to start finding the shortest distance
# -'C' to clear and reset the grid
# Requirements:
# pip install pygame
# By Susnata Goswami(https://github.com/proghead00)
import pygame
import math
from queue import PriorityQueue
WIDTH = 800
WIN = pygame.display.set_mode((WIDTH, WIDTH)) # dimension to make it a square
pygame.display.set_caption("A* Path Finding Algorithm")
RED = (235, 77, 75)
GREEN = (186, 220, 88)
BLUE = (48, 51, 107)
YELLOW = (249, 202, 36)
WHITE = (255, 255, 255)
BLACK = (53, 59, 72)
PURPLE = (130, 88, 159)
ORANGE = (225, 95, 65)
GREY = (128, 128, 128)
TURQUOISE = (10, 189, 227)
class Spot:
def __init__(self, row, col, width, total_rows):
self.row = row
self.col = col
self.x = row * width
self.y = col * width
self.color = WHITE
self.neighbors = []
self.width = width
self.total_rows = total_rows
def get_pos(self):
return self.row, self.col
def is_closed(self):
return self.color == RED
def is_open(self):
return self.color == GREEN
def is_barrier(self):
return self.color == BLACK
def is_start(self):
return self.color == ORANGE
def is_end(self):
return self.color == TURQUOISE
def reset(self):
self.color = WHITE
def make_start(self):
self.color = ORANGE
def make_closed(self):
self.color = RED
def make_open(self):
self.color = GREEN
def make_barrier(self):
self.color = BLACK
def make_end(self):
self.color = TURQUOISE
def make_path(self):
self.color = PURPLE
def draw(self, win):
pygame.draw.rect(
win, self.color, (self.x, self.y, self.width, self.width))
def update_neighbors(self, grid):
self.neighbors = []
# DOWN
if self.row < self.total_rows - 1 and not grid[self.row + 1][self.col].is_barrier():
self.neighbors.append(grid[self.row + 1][self.col])
if self.row > 0 and not grid[self.row - 1][self.col].is_barrier(): # UP
self.neighbors.append(grid[self.row - 1][self.col])
# RIGHT
if self.col < self.total_rows - 1 and not grid[self.row][self.col + 1].is_barrier():
self.neighbors.append(grid[self.row][self.col + 1])
if self.col > 0 and not grid[self.row][self.col - 1].is_barrier(): # LEFT
self.neighbors.append(grid[self.row][self.col - 1])
def __lt__(self, other):
return False
def h(p1, p2):
x1, y1 = p1
x2, y2 = p2
return abs(x1 - x2) + abs(y1 - y2) # finding absolute distance
def reconstruct_path(came_from, current, draw):
while current in came_from:
current = came_from[current]
current.make_path()
draw()
def algorithm(draw, grid, start, end):
count = 0
open_set = PriorityQueue()
open_set.put((0, count, start))
came_from = {}
# keeps track of current shortest distance from start node to this node
g_score = {spot: float("inf") for row in grid for spot in row}
g_score[start] = 0
# keeps track of predicted distance from this node to end node
f_score = {spot: float("inf") for row in grid for spot in row}
f_score[start] = h(start.get_pos(), end.get_pos())
open_set_hash = {start}
while not open_set.empty():
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
current = open_set.get()[2]
open_set_hash.remove(current)
if current == end:
reconstruct_path(came_from, end, draw)
end.make_end()
return True
for neighbor in current.neighbors:
temp_g_score = g_score[current] + 1
if temp_g_score < g_score[neighbor]:
came_from[neighbor] = current
g_score[neighbor] = temp_g_score
f_score[neighbor] = temp_g_score + \
h(neighbor.get_pos(), end.get_pos())
if neighbor not in open_set_hash:
count += 1
open_set.put((f_score[neighbor], count, neighbor))
open_set_hash.add(neighbor)
neighbor.make_open()
draw()
if current != start:
current.make_closed()
return False
def make_grid(rows, width):
grid = []
gap = width // rows # integer division: gap b/w each of these rows
for i in range(rows):
grid.append([])
for j in range(rows):
spot = Spot(i, j, gap, rows)
grid[i].append(spot)
return grid
def draw_grid(win, rows, width):
gap = width // rows
for i in range(rows):
pygame.draw.line(win, GREY, (0, i * gap),
(width, i * gap)) # horizontal line
for j in range(rows):
pygame.draw.line(win, GREY, (j * gap, 0),
(j * gap, width)) # vertical lines
def draw(win, grid, rows, width):
win.fill(WHITE)
for row in grid:
for spot in row:
spot.draw(win)
draw_grid(win, rows, width)
pygame.display.update()
# getting mouse postiion
def get_clicked_pos(pos, rows, width):
gap = width // rows
y, x = pos
row = y // gap
col = x // gap
return row, col
def main(win, width):
ROWS = 50
grid = make_grid(ROWS, width)
start = None
end = None
run = True
while run:
draw(win, grid, ROWS, width)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if pygame.mouse.get_pressed()[0]: # LEFT MOUSE BUTTON: 0
pos = pygame.mouse.get_pos()
# actual spot in 2D list where mouse is clicked
row, col = get_clicked_pos(pos, ROWS, width)
spot = grid[row][col]
# if start and end aren't done
if not start and spot != end:
start = spot
start.make_start()
# to avoid overlapping of start and end node
elif not end and spot != start:
end = spot
end.make_end()
elif spot != end and spot != start:
spot.make_barrier()
elif pygame.mouse.get_pressed()[2]: # RIGHT MOUSE BUTTON: 2
pos = pygame.mouse.get_pos()
row, col = get_clicked_pos(pos, ROWS, width)
spot = grid[row][col]
spot.reset()
if spot == start:
start = None
elif spot == end:
end = None
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and start and end:
for row in grid:
for spot in row:
spot.update_neighbors(grid)
algorithm(lambda: draw(win, grid, ROWS, width),
grid, start, end)
if event.key == pygame.K_c:
start = None
end = None
grid = make_grid(ROWS, width)
pygame.quit()
main(WIN, WIDTH)
| {
"repo_name": "Logan1x/Python-Scripts",
"path": "bin/A-Star-GUI/AStarGUI.py",
"copies": "1",
"size": "7837",
"license": "mit",
"hash": -971223844160202900,
"line_mean": 26.2707581227,
"line_max": 110,
"alpha_frac": 0.517430724,
"autogenerated": false,
"ratio": 3.7450980392156863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47625287632156865,
"avg_score": null,
"num_lines": null
} |
"""APC Power Distribution Unit control object."""
import logging
from datetime import datetime
from pysnmp.entity.rfc3413.oneliner import cmdgen
from pysnmp.proto import rfc1902
from retrying import retry, RetryError
class APC(object):
"""APC Power Distribution Unit."""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
# This class requires more attributes and public methods to cover the
# functionality of the device.
ERROR_MSG = "SNMP {} of {} on {} failed."
SNMP_VERSION_2_2C = 1
SNMP_PORT = 161
SNMP_TIMEOUT = 1.5 # 1.5 seconds
SNMP_RETRIES = 2
MAX_STOP_DELAY = 15000 # 15 seconds
INTER_RETRY_WAIT = 500 # 0.5 seconds
# APC PowerNet-MIB Base OID
BASE_OID = (1, 3, 6, 1, 4, 1, 318, 1, 1, 26)
# Static readonly data
# PowerNet-MIB::rPDU2IdentName.1
Q_NAME = BASE_OID + (2, 1, 3, 1)
# PowerNet-MIB::rPDU2IdentLocation.1
Q_LOCATION = BASE_OID + (2, 1, 4, 1)
# PowerNet-MIB::rPDU2IdentHardwareRev.1
Q_HARDWARE_REV = BASE_OID + (2, 1, 5, 1)
# PowerNet-MIB::rPDU2IdentFirmwareRev.1
Q_FIRMWARE_REV = BASE_OID + (2, 1, 6, 1)
# PowerNet-MIB::rPDU2IdentDateOfManufacture.1
Q_MANUFACTURE_DATE = BASE_OID + (2, 1, 7, 1)
# PowerNet-MIB::rPDU2IdentModelNumber.1
Q_MODEL_NUMBER = BASE_OID + (2, 1, 8, 1)
# PowerNet-MIB::rPDU2IdentSerialNumber.1
Q_SERIAL_NUMBER = BASE_OID + (2, 1, 9, 1)
# PowerNet-MIB::rPDU2DevicePropertiesNumOutlets.1
Q_NUM_OUTLETS = BASE_OID + (4, 2, 1, 4, 1)
# PowerNet-MIB::rPDU2DevicePropertiesNumSwitchedOutlets.1
Q_NUM_SWITCHED_OUTLETS = BASE_OID + (4, 2, 1, 5, 1)
# PowerNet-MIB::rPDU2DevicePropertiesNumMeteredOutlets.1
Q_NUM_METERED_OUTLETS = BASE_OID + (4, 2, 1, 6, 1)
# PowerNet-MIB::rPDU2DevicePropertiesMaxCurrentRating.1
Q_MAX_CURRENT_RATING = BASE_OID + (4, 2, 1, 9, 1)
# PowerNet-MIB::rPDU2PhaseStatusVoltage.1
Q_PHASE_VOLTAGE = BASE_OID + (6, 3, 1, 6, 1)
# Dynamic readonly data
# PowerNet-MIB::rPDU2PhaseStatusLoadState.1
Q_PHASE_LOAD_STATE = BASE_OID + (6, 3, 1, 4, 1)
# PowerNet-MIB::rPDU2PhaseStatusCurrent.1
Q_PHASE_CURRENT = BASE_OID + (6, 3, 1, 5, 1)
# PowerNet-MIB::rPDU2DeviceStatusPower.1
Q_POWER = BASE_OID + (4, 3, 1, 5, 1)
# PowerNet-MIB::rPDU2SensorTempHumidityStatusType.1
Q_SENSOR_TYPE = BASE_OID + (10, 2, 2, 1, 5, 1)
# PowerNet-MIB::rPDU2SensorTempHumidityStatusName.1
Q_SENSOR_NAME = BASE_OID + (10, 2, 2, 1, 3, 1)
# PowerNet-MIB::rPDU2SensorTempHumidityStatusCommStatus.1
Q_SENSOR_COMM_STATUS = BASE_OID + (10, 2, 2, 1, 6, 1)
# PowerNet-MIB::rPDU2SensorTempHumidityStatusTempF.1
Q_SENSOR_TEMP_F = BASE_OID + (10, 2, 2, 1, 7, 1)
# PowerNet-MIB::rPDU2SensorTempHumidityStatusTempC.1
Q_SENSOR_TEMP_C = BASE_OID + (10, 2, 2, 1, 8, 1)
# PowerNet-MIB::rPDU2SensorTempHumidityStatusTempStatus.1
Q_SENSOR_TEMP_STATUS = BASE_OID + (10, 2, 2, 1, 9, 1)
# PowerNet-MIB::rPDU2SensorTempHumidityStatusRelativeHumidity.1
Q_SENSOR_HUMIDITY = BASE_OID + (10, 2, 2, 1, 10, 1)
# PowerNet-MIB::rPDU2SensorTempHumidityStatusHumidityStatus.1
Q_SENSOR_HUMIDITY_STATUS = BASE_OID + (10, 2, 2, 1, 11, 1)
# PowerNet-MIB::rPDU2OutletSwitchedStatusName.24
Q_OUTLET_NAME = BASE_OID + (9, 2, 3, 1, 3) # Requires outlet number
# PowerNet-MIB::rPDU2OutletSwitchedStatusState.24
Q_OUTLET_STATUS = BASE_OID + (9, 2, 3, 1, 5) # Requires outlet number
# Dynamic readwrite data
# PowerNet-MIB::rPDU2SensorTempHumidityConfigName.1
Q_SENSOR_NAME_RW = BASE_OID + (10, 2, 1, 1, 3, 1)
# PowerNet-MIB::rPDU2OutletSwitchedConfigName.24
Q_OUTLET_NAME_RW = BASE_OID + (9, 2, 1, 1, 3) # Requires outlet number
# PowerNet-MIB::rPDU2OutletSwitchedControlCommand.24
Q_OUTLET_COMMAND_RW = BASE_OID + (9, 2, 4, 1, 5) # Requires outlet number
# Lookups
LOAD_STATES = ["", "lowLoad", "normal", "nearOverload", "overload"]
SENSOR_TYPES = [
"",
"temperatureOnly",
"temperatureHumidity",
"commsLost",
"notInstalled",
]
COMM_STATUS_TYPES = ["", "notInstalled", "commsOK", "commsLost"]
SENSOR_STATUS_TYPES = [
"",
"notPresent",
"belowMin",
"belowLow",
"normal",
"aboveHigh",
"aboveMax",
]
OUTLET_STATUS_TYPES = ["", "off", "on"]
def __init__(
self, hostname_or_ip_address, public_community, private_community
):
"""Create an APC object.
:param hostname_or_ip_address: hostname or ip address of PDU
:param public_community: public community string
:param private_community: private community string
:return: APC object
"""
self._host = hostname_or_ip_address
self._vendor = "APC"
self._transport = cmdgen.UdpTransportTarget(
(self._host, self.SNMP_PORT),
timeout=self.SNMP_TIMEOUT,
retries=self.SNMP_RETRIES,
)
self._public = cmdgen.CommunityData(
public_community, mpModel=self.SNMP_VERSION_2_2C
)
self._private = cmdgen.CommunityData(
private_community, mpModel=self.SNMP_VERSION_2_2C
)
# Generic information (static)
self._identification = self.__get(self.Q_NAME)
self._location = self.__get(self.Q_LOCATION)
self._hardware_rev = self.__get(self.Q_HARDWARE_REV)
self._firmware_rev = self.__get(self.Q_FIRMWARE_REV)
self._manufacture_date = datetime.strptime(
str(self.__get(self.Q_MANUFACTURE_DATE)), "%m/%d/%Y"
)
self._model_number = self.__get(self.Q_MODEL_NUMBER)
self._serial_number = self.__get(self.Q_SERIAL_NUMBER)
# Device status (static)
self._num_outlets = self.__get(self.Q_NUM_OUTLETS)
self._num_switched_outlets = self.__get(self.Q_NUM_SWITCHED_OUTLETS)
self._num_metered_outlets = self.__get(self.Q_NUM_METERED_OUTLETS)
self._max_current = self.__get(self.Q_MAX_CURRENT_RATING)
# Phase status (static)
self._power_factor = 100
self._current_factor = 10
self._phase_voltage = int(self.__get(self.Q_PHASE_VOLTAGE))
self._use_centigrade = False
self._logger = logging.getLogger(__name__)
self._logger.addHandler(logging.NullHandler())
@property
def host(self):
"""Hostname or IP Address of PDU.
:return: PDU hostname or ip address
"""
self._logger.info("Host: %s", self._host)
return self._host
@property
def vendor(self):
"""Vendor/Manufacturer of PDU.
:return: PDU vendor/manufacturer
"""
self._logger.info("Vendor: %s", self._vendor)
return self._vendor
@property
def identification(self):
"""Identification string.
:return: PDU identification
"""
self._logger.info("Identification: %s", self._identification)
return self._identification
@property
def location(self):
"""Location of the PDU.
:return: PDU location
"""
self._logger.info("Location: %s", self._location)
return self._location
@property
def hardware_revision(self):
"""Hardware revision.
:return: PDU hardware revision
"""
revision = str(self._hardware_rev)
self._logger.info("Hardware revision: %s", revision)
return revision
@property
def firmware_revision(self):
"""Firmware revision.
:return: PDU firmware revision
"""
revision = str(self._firmware_rev)
self._logger.info("Firmware revision: %s", revision)
return revision
@property
def date_of_manufacture(self):
"""Date of manufacture.
:return: PDU date of manufacture
"""
self._logger.info(
"Date of Manufacture: %s", str(self._manufacture_date)
)
return self._manufacture_date
@property
def model_number(self):
"""Model number.
:return: PDU model number
"""
model = str(self._model_number)
self._logger.info("Model number: %s", model)
return model
@property
def serial_number(self):
"""Return the serial number.
:return: PDU serial number
"""
serial = str(self._serial_number)
self._logger.info("Serial number: %s", serial)
return serial
@property
def num_outlets(self):
"""Return the number of outlets in the PDU.
:return: total number of outlets in the PDU
"""
num = int(self._num_outlets)
self._logger.info("Number of outlets: %d", num)
return num
@property
def num_switched_outlets(self):
"""Return the number of switched outlets in the PDU.
:return: number of switched outlets in the PDU
"""
num = int(self._num_switched_outlets)
self._logger.info("Number of switched outlets: %d", num)
return num
@property
def num_metered_outlets(self):
"""Return the number of metered outlets in the PDU.
:return: number of metered outlets in the PDU
"""
num = int(self._num_metered_outlets)
self._logger.info("Number of metered outlets: %d", num)
return num
@property
def max_current(self):
"""Maximum current for the PDU.
:return: maximum current for the PDU
"""
current = int(self._max_current)
self._logger.info("Maximum current: %d", current)
return current
@property
def voltage(self):
"""Line voltage of the PDU.
:return: PDU line voltage
"""
voltage = int(self._phase_voltage)
self._logger.info("Line voltage: %d", voltage)
return voltage
@property
def load_state(self):
"""Load state of the PDU.
:return: one of ['lowLoad', 'normal', 'nearOverload', overload']
"""
state = int(self.__get(self.Q_PHASE_LOAD_STATE))
self._logger.info("Load state: %s", self.LOAD_STATES[state])
return self.LOAD_STATES[state]
@property
def current(self):
"""Return the current utilization of the PDU.
:return: current, in amps
"""
current = float(self.__get(self.Q_PHASE_CURRENT) / self._current_factor)
self._logger.info("Current: %.2f", current)
return current
@property
def power(self):
"""Return the power utilization of the PDU.
:return: power, in kW
"""
power = float(self.__get(self.Q_POWER) / self._power_factor)
self._logger.info("Power: %.2f", power)
return power
@property
def is_sensor_present(self):
"""Determine if a sensor is present on the PDU.
:return: Is the sensor present?
"""
state = self.__get(self.Q_SENSOR_TYPE)
present = 1 < int(state) < 3
self._logger.info("Sensor present: %s", str(present))
return present
@property
def sensor_name(self):
"""Name of the sensor.
:return: name of the sensor
"""
name = None
if self.is_sensor_present:
name = str(self.__get(self.Q_SENSOR_NAME))
self._logger.info("Sensor name: %s", name)
return name
@sensor_name.setter
def _set_sensor_name(self, name):
"""Name of the sensor.
:param name: name of the sensor
:return:
"""
if self.is_sensor_present:
self.__set(self.Q_SENSOR_NAME_RW, name)
self._logger.info("Updating sensor name to: %s", name)
@property
def sensor_type(self):
"""Type of sensor.
:return: type of sensor, one of
['temperatureOnly', 'temperatureHumidity', 'commsLost',
'notInstalled']
"""
index = 4
if self.is_sensor_present:
index = int(self.__get(self.Q_SENSOR_TYPE))
self._logger.info("Sensor type: %s", self.SENSOR_TYPES[index])
return self.SENSOR_TYPES[index]
@property
def sensor_comm_status(self):
"""Communication status of the sensor.
:return: communication status of the sensor
"""
index = 1
if self.is_sensor_present:
index = int(self.__get(self.Q_SENSOR_COMM_STATUS))
self._logger.info(
"Sensor communication status: %s", self.SENSOR_STATUS_TYPES[index]
)
return self.SENSOR_STATUS_TYPES[index]
@property
def use_centigrade(self):
"""Select between centigrade and fahrenheit.
:return: using centigrade or not
"""
self._logger.info("Use centigrade: %s", str(self._use_centigrade))
return self._use_centigrade
@use_centigrade.setter
def use_centigrade(self, value):
"""Select between centigrade and fahrenheit.
:param value: use centrigrade or not
:return:
"""
self._logger.info("Updating use centigrade to: %s", value)
self._use_centigrade = value
@property
def temperature(self):
"""Temperature.
:return: temperature
"""
temp = 0.00
if self.sensor_supports_temperature:
if self._use_centigrade:
temp = float(self.__get(self.Q_SENSOR_TEMP_C) / 10)
else:
temp = float(self.__get(self.Q_SENSOR_TEMP_F) / 10)
self._logger.info("Temperature: %.2f", temp)
return temp
@property
def humidity(self):
"""Relative humidity.
:return: relative humidity
"""
humid = 0.00
if self.sensor_supports_humidity:
humid = float(self.__get(self.Q_SENSOR_HUMIDITY))
self._logger.info("Relative humidity: %.2f", humid)
return humid
@property
def temperature_status(self):
"""Determine the status of the temperature sensor.
:return: The status of the temperature sensor
"""
index = 1
if self.sensor_supports_temperature:
index = self.__get(self.Q_SENSOR_TEMP_STATUS)
self._logger.info(
"Temperature sensor status: %s", self.SENSOR_STATUS_TYPES[index]
)
return self.SENSOR_STATUS_TYPES[index]
@property
def humidity_status(self):
"""Determine the status of the humidity sensor.
:return: status of the humidity sensor
"""
index = 1
if self.sensor_supports_humidity:
index = self.__get(self.Q_SENSOR_HUMIDITY_STATUS)
self._logger.info(
"Relative humidity sensor status: %s",
self.SENSOR_STATUS_TYPES[index],
)
return self.SENSOR_STATUS_TYPES[index]
def get_outlet_name(self, outlet):
"""Name of an outlet in the PDU.
:param outlet: outlet number
:return: name of the outlet
"""
if 1 <= outlet <= self._num_outlets:
name = str(self.__get(self.Q_OUTLET_NAME + (outlet,)))
self._logger.info("Outlet number %d has name %s", outlet, name)
return name
raise IndexError(
'Only {} outlets exist. "{}" is an invalid outlet.'.format(
self._num_outlets, str(outlet)
)
)
def set_outlet_name(self, outlet, name):
"""Update the name of an outlet in the PDU.
:param outlet: outlet number
:param name: outlet name
:return:
"""
if 1 <= outlet <= self._num_outlets:
self.__set(self.Q_OUTLET_NAME_RW + (outlet,), name)
self._logger.info(
"Updating outlet number %d to new name %s", outlet, name
)
raise IndexError(
'Only {} outlets exist. "{}" is an invalid outlet.'.format(
self._num_outlets, str(outlet)
)
)
def outlet_status(self, outlet):
"""Determine the status of the outlet in the PDU.
:param outlet: outlet number
:return: status of the outlet, one of ['on', 'off']
"""
if 1 <= outlet <= self._num_outlets:
state = self.__get(self.Q_OUTLET_STATUS + (outlet,))
self._logger.info(
"Outlet number %d has status %s",
outlet,
self.OUTLET_STATUS_TYPES[state],
)
return self.OUTLET_STATUS_TYPES[state]
raise IndexError(
'Only {} outlets exist. "{}" is an invalid outlet.'.format(
self._num_outlets, str(outlet)
)
)
def outlet_command(self, outlet, operation):
"""Send command to an outlet in the PDU.
:param outlet: outlet number
:param operation: one of ['on', 'off', 'reboot']
:return: did the operation complete successfully?
"""
valid_operations = ["on", "off", "reboot"]
if operation not in valid_operations:
raise ValueError(
'"{}" is an invalid operation. Valid operations are: {}'.format(
str(operation), str(valid_operations)
)
)
operations = {"on": 1, "off": 2, "reboot": 3}
if 1 <= outlet <= self._num_outlets:
self._logger.info(
"Setting outlet %d to %s state", outlet, operation
)
self.__set(
self.Q_OUTLET_COMMAND_RW + (outlet,), operations[operation]
)
try:
if operation in ("on", "reboot"):
success = self.__wait_for_state(outlet, "on")
else:
success = self.__wait_for_state(outlet, "off")
except RetryError:
# If the operation timed out, no determination of the result
# can be made.
success = False
return success
raise IndexError(
'Only {} outlets exist. "{}" is an invalid outlet.'.format(
self._num_outlets, str(outlet)
)
)
@property
def sensor_supports_temperature(self):
"""Determine if the sensor supports temperature measurements.
:return: does the sensor support temperature measurements?
"""
return self.is_sensor_present and "temp" in self.sensor_type.lower()
@property
def sensor_supports_humidity(self):
"""Determine if the sensor supports relative humidity measurements.
:return: does the sensor support relative humidity measurements?
"""
return self.is_sensor_present and "humid" in self.sensor_type.lower()
# pylint: disable=no-self-argument
# In order to use this method within the @retry decorator, this method
# must be defined as such.
def __retry_if_not_state(result):
"""Only keep retrying if the state is not what is expected.
:return: negation of input
"""
return not result
@retry(
stop_max_delay=MAX_STOP_DELAY,
wait_fixed=INTER_RETRY_WAIT,
retry_on_result=__retry_if_not_state,
)
def __wait_for_state(self, outlet, state):
"""Wait until state is hit.
This will wait for MAX_STOP_DELAY with a inter-try delay of
INTER_RETRY_WAIT.
:param outlet: outlet number
:param state: state to wait for
:return: was the state hit?
"""
return self.outlet_status(outlet) is state
def __get(self, oid):
"""Get a specific value from an OID in the SNMP tree.
:param oid: OID to get
:returns: value from the specified OID
"""
(error_indication, _, _, var_binds) = cmdgen.CommandGenerator().getCmd(
self._public, self._transport, oid
)
if error_indication:
raise RuntimeError(self.ERROR_MSG.format("get", oid, self._host))
return var_binds[0][1]
def __set(self, oid, value):
"""Set a specific value to an OID in the SNMP tree.
:param oid: OID to set
:param value: value to set
"""
initial_value = self.__get(oid)
new_value = self.__coerce_value(initial_value, value)
(error_indication, _, _, var_binds) = cmdgen.CommandGenerator().setCmd(
self._private, self._transport, (oid, new_value)
)
if error_indication:
raise RuntimeError(self.ERROR_MSG.format("set", oid, self._host))
return var_binds[0][1]
@staticmethod
def __coerce_value(initial_value, new_value):
"""Coerce the new_value to the same type as the initial_value.
Unfortunately this is a bit of a workaround for the more elegant
version:
`return initial_value.__init__(str(new_value))`
Utilizing that more elegant version yields an SmiError:
MIB object ObjectIdentity((...)) is not OBJECT-TYPE (MIB not loaded?)
:param initial_value: initial value from the device
:param new_value: new value to set, coerced into the right type
:return: new value, coerced into the right type
"""
if isinstance(initial_value, rfc1902.Counter32):
set_value = rfc1902.Counter32(str(new_value))
elif isinstance(initial_value, rfc1902.Counter64):
set_value = rfc1902.Counter64(str(new_value))
elif isinstance(initial_value, rfc1902.Gauge32):
set_value = rfc1902.Gauge32(str(new_value))
elif isinstance(initial_value, rfc1902.Integer):
set_value = rfc1902.Integer(str(new_value))
elif isinstance(initial_value, rfc1902.Integer32):
set_value = rfc1902.Integer32(str(new_value))
elif isinstance(initial_value, rfc1902.IpAddress):
set_value = rfc1902.IpAddress(str(new_value))
elif isinstance(initial_value, rfc1902.OctetString):
set_value = rfc1902.OctetString(str(new_value))
elif isinstance(initial_value, rfc1902.TimeTicks):
set_value = rfc1902.TimeTicks(str(new_value))
elif isinstance(initial_value, rfc1902.Unsigned32):
set_value = rfc1902.Unsigned32(str(new_value))
else:
raise RuntimeError("Unknown type: {}".format(type(initial_value)))
return set_value
| {
"repo_name": "SimplicityGuy/pynoc",
"path": "pynoc/apc.py",
"copies": "1",
"size": "22414",
"license": "apache-2.0",
"hash": -5774310688909020000,
"line_mean": 32.1568047337,
"line_max": 80,
"alpha_frac": 0.5862853574,
"autogenerated": false,
"ratio": 3.6546551443013207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47409405017013206,
"avg_score": null,
"num_lines": null
} |
# APC UPS message
from mMessage import mMessage
import re
from pprint import pprint
class SimpleValue:
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
def __repr__(self):
return 'SimpleValue(' + repr(self.value) + ')'
class LabeledValue:
def __init__(self, value, label):
self.value = value
self.label = label
def __repr__(self):
return 'LabeledValue(' + repr(self.value) + ', ' + repr(self.label) + ')'
def getValue(self):
return self.value
def getLabel(self):
return self.label
class mApcMessage(mMessage):
fields = {}
labeledFields = ['LINEV', 'LOADPCT', 'BCHARGE', 'TIMELEFT', 'MBATTCHG', \
'MINTIMEL', 'MAXTIME', 'LOTRANS', 'HITRANS', 'BATTV', 'TONBATT', \
'CUMONBATT', 'NOMINV', 'NOMBATTV', 'NOMPOWER']
def __init__(self, m):
self.fields = {}
self.setDigest(m.getDigest())
self.setEntropy(m.getEntropy())
self.setCounter(m.getCounter())
self.setData(m.getData())
self.extractApcFields()
def __repr__(self):
value = "mApcMessage('Digest': " + repr(self.getDigest()) + "\n"
value += " 'Entropy': " + repr(self.getEntropy()) + "\n"
value += " 'Counter': " + repr(self.getCounter()) + "\n"
value += " 'Fields': {\n"
for key, val in self.fields.iteritems():
value += " '" + key + "': " + repr(val) + "\n"
value += " }\n"
value += ')'
return value
def extractApcFields(self):
data = str(self.getData())
# Split each line into a key and value
for line in iter( data.splitlines() ):
match = re.search('^([a-zA-Z0-9\s]*)\s*:\s(.*)$', line)
# Make sure the match returned components
if match.lastindex is not None:
fieldName = match.group(1).strip()
rawValue = match.group(2).strip()
# If the field has a label, make it a labeledValue
if fieldName in self.labeledFields:
# Check for empty value
if rawValue == '':
labeledValue = LabeledValue('','')
else:
valueMatch = re.search('^([^\s]*)\s(.*)$', rawValue)
labeledValue = LabeledValue(valueMatch.group(1), valueMatch.group(2))
self.fields[fieldName] = labeledValue
# Field does not have a label, make it a SimpleValue
else:
self.fields[fieldName] = SimpleValue(rawValue)
else:
#print "Error processing '{0}'".format(line)
pass
def getFieldValue(self, fieldName):
if fieldName in self.fields:
return self.fields[fieldName].getValue()
else:
raise KeyError("{0} is not in the available field list".format(fieldName))
def getFieldLabel(self, fieldName):
if fieldName not in self.fields:
raise KeyError("{0} is not in the available field list".format(fieldName))
if fieldName in self.labeledFields:
return self.fields[fieldName].getLabel()
else:
return None
| {
"repo_name": "arobb/multicast-comms",
"path": "mApcMessage.py",
"copies": "1",
"size": "3313",
"license": "mit",
"hash": 5228371414995910000,
"line_mean": 27.5603448276,
"line_max": 93,
"alpha_frac": 0.5342589798,
"autogenerated": false,
"ratio": 3.9346793349168645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9950707900917536,
"avg_score": 0.003646082759865829,
"num_lines": 116
} |
# APDS9960 i2c address
APDS9960_I2C_ADDR = 0x39
# APDS9960 gesture parameters
APDS9960_GESTURE_THRESHOLD_OUT = 10
APDS9960_GESTURE_SENSITIVITY_1 = 50
APDS9960_GESTURE_SENSITIVITY_2 = 20
# APDS9960 device IDs
APDS9960_DEV_ID = [0xab, 0x9c, 0xa8]
# APDS9960 times
APDS9960_TIME_FIFO_PAUSE = 0.03
# APDS9960 register addresses
APDS9960_REG_ENABLE = 0x80
APDS9960_REG_ATIME = 0x81
APDS9960_REG_WTIME = 0x83
APDS9960_REG_AILTL = 0x84
APDS9960_REG_AILTH = 0x85
APDS9960_REG_AIHTL = 0x86
APDS9960_REG_AIHTH = 0x87
APDS9960_REG_PILT = 0x89
APDS9960_REG_PIHT = 0x8b
APDS9960_REG_PERS = 0x8c
APDS9960_REG_CONFIG1 = 0x8d
APDS9960_REG_PPULSE = 0x8e
APDS9960_REG_CONTROL = 0x8f
APDS9960_REG_CONFIG2 = 0x90
APDS9960_REG_ID = 0x92
APDS9960_REG_STATUS = 0x93
APDS9960_REG_CDATAL = 0x94
APDS9960_REG_CDATAH = 0x95
APDS9960_REG_RDATAL = 0x96
APDS9960_REG_RDATAH = 0x97
APDS9960_REG_GDATAL = 0x98
APDS9960_REG_GDATAH = 0x99
APDS9960_REG_BDATAL = 0x9a
APDS9960_REG_BDATAH = 0x9b
APDS9960_REG_PDATA = 0x9c
APDS9960_REG_POFFSET_UR = 0x9d
APDS9960_REG_POFFSET_DL = 0x9e
APDS9960_REG_CONFIG3 = 0x9f
APDS9960_REG_GPENTH = 0xa0
APDS9960_REG_GEXTH = 0xa1
APDS9960_REG_GCONF1 = 0xa2
APDS9960_REG_GCONF2 = 0xa3
APDS9960_REG_GOFFSET_U = 0xa4
APDS9960_REG_GOFFSET_D = 0xa5
APDS9960_REG_GOFFSET_L = 0xa7
APDS9960_REG_GOFFSET_R = 0xa9
APDS9960_REG_GPULSE = 0xa6
APDS9960_REG_GCONF3 = 0xaA
APDS9960_REG_GCONF4 = 0xaB
APDS9960_REG_GFLVL = 0xae
APDS9960_REG_GSTATUS = 0xaf
APDS9960_REG_IFORCE = 0xe4
APDS9960_REG_PICLEAR = 0xe5
APDS9960_REG_CICLEAR = 0xe6
APDS9960_REG_AICLEAR = 0xe7
APDS9960_REG_GFIFO_U = 0xfc
APDS9960_REG_GFIFO_D = 0xfd
APDS9960_REG_GFIFO_L = 0xfe
APDS9960_REG_GFIFO_R = 0xff
# APDS9960 bit fields
APDS9960_BIT_PON = 0b00000001
APDS9960_BIT_AEN = 0b00000010
APDS9960_BIT_PEN = 0b00000100
APDS9960_BIT_WEN = 0b00001000
APSD9960_BIT_AIEN =0b00010000
APDS9960_BIT_PIEN = 0b00100000
APDS9960_BIT_GEN = 0b01000000
APDS9960_BIT_GVALID = 0b00000001
# APDS9960 modes
APDS9960_MODE_POWER = 0
APDS9960_MODE_AMBIENT_LIGHT = 1
APDS9960_MODE_PROXIMITY = 2
APDS9960_MODE_WAIT = 3
APDS9960_MODE_AMBIENT_LIGHT_INT = 4
APDS9960_MODE_PROXIMITY_INT = 5
APDS9960_MODE_GESTURE = 6
APDS9960_MODE_ALL = 7
# LED Drive values
APDS9960_LED_DRIVE_100MA = 0
APDS9960_LED_DRIVE_50MA = 1
APDS9960_LED_DRIVE_25MA = 2
APDS9960_LED_DRIVE_12_5MA = 3
# Proximity Gain (PGAIN) values
APDS9960_PGAIN_1X = 0
APDS9960_PGAIN_2X = 1
APDS9960_PGAIN_4X = 2
APDS9960_PGAIN_8X = 3
# ALS Gain (AGAIN) values
APDS9960_AGAIN_1X = 0
APDS9960_AGAIN_4X = 1
APDS9960_AGAIN_16X = 2
APDS9960_AGAIN_64X = 3
# Gesture Gain (GGAIN) values
APDS9960_GGAIN_1X = 0
APDS9960_GGAIN_2X = 1
APDS9960_GGAIN_4X = 2
APDS9960_GGAIN_8X = 3
# LED Boost values
APDS9960_LED_BOOST_100 = 0
APDS9960_LED_BOOST_150 = 1
APDS9960_LED_BOOST_200 = 2
APDS9960_LED_BOOST_300 = 3
# Gesture wait time values
APDS9960_GWTIME_0MS = 0
APDS9960_GWTIME_2_8MS = 1
APDS9960_GWTIME_5_6MS = 2
APDS9960_GWTIME_8_4MS = 3
APDS9960_GWTIME_14_0MS = 4
APDS9960_GWTIME_22_4MS = 5
APDS9960_GWTIME_30_8MS = 6
APDS9960_GWTIME_39_2MS = 7
# Default values
APDS9960_DEFAULT_ATIME = 219 # 103ms
APDS9960_DEFAULT_WTIME = 246 # 27ms
APDS9960_DEFAULT_PROX_PPULSE = 0x87 # 16us, 8 pulses
APDS9960_DEFAULT_GESTURE_PPULSE = 0x89 # 16us, 10 pulses
APDS9960_DEFAULT_POFFSET_UR = 0 # 0 offset
APDS9960_DEFAULT_POFFSET_DL = 0 # 0 offset
APDS9960_DEFAULT_CONFIG1 = 0x60 # No 12x wait (WTIME) factor
APDS9960_DEFAULT_LDRIVE = APDS9960_LED_DRIVE_100MA
APDS9960_DEFAULT_PGAIN = APDS9960_PGAIN_4X
APDS9960_DEFAULT_AGAIN = APDS9960_AGAIN_4X
APDS9960_DEFAULT_PILT = 0 # Low proximity threshold
APDS9960_DEFAULT_PIHT = 50 # High proximity threshold
APDS9960_DEFAULT_AILT = 0xffff # Force interrupt for calibration
APDS9960_DEFAULT_AIHT = 0
APDS9960_DEFAULT_PERS = 0x11 # 2 consecutive prox or ALS for int.
APDS9960_DEFAULT_CONFIG2 = 0x01 # No saturation interrupts or LED boost
APDS9960_DEFAULT_CONFIG3 = 0 # Enable all photodiodes, no SAI
APDS9960_DEFAULT_GPENTH = 40 # Threshold for entering gesture mode
APDS9960_DEFAULT_GEXTH = 30 # Threshold for exiting gesture mode
APDS9960_DEFAULT_GCONF1 = 0x40 # 4 gesture events for int., 1 for exit
APDS9960_DEFAULT_GGAIN = APDS9960_GGAIN_4X
APDS9960_DEFAULT_GLDRIVE = APDS9960_LED_DRIVE_100MA
APDS9960_DEFAULT_GWTIME = APDS9960_GWTIME_2_8MS
APDS9960_DEFAULT_GOFFSET = 0 # No offset scaling for gesture mode
APDS9960_DEFAULT_GPULSE = 0xc9 # 32us, 10 pulses
APDS9960_DEFAULT_GCONF3 = 0 # All photodiodes active during gesture
APDS9960_DEFAULT_GIEN = 0 # Disable gesture interrupts
# gesture directions
APDS9960_DIR_NONE = 0
APDS9960_DIR_LEFT = 1
APDS9960_DIR_RIGHT = 2
APDS9960_DIR_UP = 3
APDS9960_DIR_DOWN = 4
APDS9960_DIR_NEAR = 5
APDS9960_DIR_FAR = 6
APDS9960_DIR_ALL = 7
# state definitions
APDS9960_STATE_NA = 0
APDS9960_STATE_NEAR = 1
APDS9960_STATE_FAR = 2
APDS9960_STATE_ALL = 3
| {
"repo_name": "psby233/pibot",
"path": "src/apds9960/const.py",
"copies": "1",
"size": "5329",
"license": "mit",
"hash": 691456521780458900,
"line_mean": 30.5325443787,
"line_max": 97,
"alpha_frac": 0.6920623006,
"autogenerated": false,
"ratio": 2.2773504273504273,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3469412727950427,
"avg_score": null,
"num_lines": null
} |
""" a peer's vote """
from collections import namedtuple
import json
class Vote(namedtuple('Vote', 'myid mystate proposed_id zxid')):
"""
Used in a FastLeaderElection to propose a leader (or to broadcast the current one).
Note: zxid refer's to the (proposed or current) leader's zxid
"""
def __str__(self):
return json.dumps(self._asdict())
def __gt__(self, other):
"""
Bigger zxid wins. if that's tied, smaller id breaks the tie.
"""
if self.zxid > other.zxid:
return True
if self.zxid == other.zxid and self.proposed_id < other.proposed_id:
return True
return False
def __lt__(self, other):
return other > self
@classmethod
def parse(cls, vstr):
"""
Given a valid JSON repr of a Vote, returns the corresponding Vote object
"""
vd = json.loads(vstr)
return Vote(
int(vd["myid"]),
int(vd["mystate"]),
int(vd["proposed_id"]),
vd["zxid"] if type(vd["zxid"]) == int else int(vd["zxid"], 16)
)
| {
"repo_name": "rgs1/pyzab",
"path": "pyzab/vote.py",
"copies": "1",
"size": "1117",
"license": "apache-2.0",
"hash": -6367396478988713000,
"line_mean": 26.925,
"line_max": 87,
"alpha_frac": 0.5550581916,
"autogenerated": false,
"ratio": 3.7993197278911564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4854377919491156,
"avg_score": null,
"num_lines": null
} |
"""A Pelican plugin which minifies HTML pages."""
from logging import getLogger
from os import walk
from os.path import join
from htmlmin import minify
from pelican import signals
from joblib import Parallel, delayed
# We need save unicode strings to files.
try:
from codecs import open
except ImportError:
pass
logger = getLogger(__name__)
def minify_html(pelican):
"""Minify all HTML files.
:param pelican: The Pelican instance.
"""
options = pelican.settings.get('MINIFY', {})
files_to_minify = []
for dirpath, _, filenames in walk(pelican.settings['OUTPUT_PATH']):
files_to_minify += [join(dirpath, name) for name in filenames if name.endswith('.html') or name.endswith('.htm')]
Parallel(n_jobs=-1)(delayed(create_minified_file)(filepath, options) for filepath in files_to_minify)
def create_minified_file(filename, options):
"""Create a minified HTML file, overwriting the original.
:param str filename: The file to minify.
"""
uncompressed = open(filename, encoding='utf-8').read()
with open(filename, 'w', encoding='utf-8') as f:
try:
logger.debug('Minifying: %s' % filename)
compressed = minify(uncompressed, **options)
f.write(compressed)
except Exception as ex:
logger.critical('HTML Minification failed: %s' % ex)
finally:
f.close()
def register():
"""Run the HTML minification stuff after all articles have been generated,
at the very end of the processing loop.
"""
signals.finalized.connect(minify_html)
| {
"repo_name": "rdegges/pelican-minify",
"path": "minify.py",
"copies": "1",
"size": "1598",
"license": "unlicense",
"hash": -1651856778887837200,
"line_mean": 27.0350877193,
"line_max": 121,
"alpha_frac": 0.6670838548,
"autogenerated": false,
"ratio": 3.9166666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5083750521466667,
"avg_score": null,
"num_lines": null
} |
"""A PEP 517 interface to setuptools
Previously, when a user or a command line tool (let's call it a "frontend")
needed to make a request of setuptools to take a certain action, for
example, generating a list of installation requirements, the frontend would
would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
PEP 517 defines a different method of interfacing with setuptools. Rather
than calling "setup.py" directly, the frontend should:
1. Set the current directory to the directory with a setup.py file
2. Import this module into a safe python interpreter (one in which
setuptools can potentially set global variables or crash hard).
3. Call one of the functions defined in PEP 517.
What each function does is defined in PEP 517. However, here is a "casual"
definition of the functions (this definition should not be relied on for
bug reports or API stability):
- `build_wheel`: build a wheel in the folder and return the basename
- `get_requires_for_build_wheel`: get the `setup_requires` to build
- `prepare_metadata_for_build_wheel`: get the `install_requires`
- `build_sdist`: build an sdist in the folder and return the basename
- `get_requires_for_build_sdist`: get the `setup_requires` to build
Again, this is not a formal definition! Just a "taste" of the module.
"""
import io
import os
import sys
import tokenize
import shutil
import contextlib
import setuptools
import distutils
from pkg_resources import parse_requirements
__all__ = ['get_requires_for_build_sdist',
'get_requires_for_build_wheel',
'prepare_metadata_for_build_wheel',
'build_wheel',
'build_sdist',
'__legacy__',
'SetupRequirementsError']
class SetupRequirementsError(BaseException):
def __init__(self, specifiers):
self.specifiers = specifiers
class Distribution(setuptools.dist.Distribution):
def fetch_build_eggs(self, specifiers):
specifier_list = list(map(str, parse_requirements(specifiers)))
raise SetupRequirementsError(specifier_list)
@classmethod
@contextlib.contextmanager
def patch(cls):
"""
Replace
distutils.dist.Distribution with this class
for the duration of this context.
"""
orig = distutils.core.Distribution
distutils.core.Distribution = cls
try:
yield
finally:
distutils.core.Distribution = orig
def _to_str(s):
"""
Convert a filename to a string (on Python 2, explicitly
a byte string, not Unicode) as distutils checks for the
exact type str.
"""
if sys.version_info[0] == 2 and not isinstance(s, str):
# Assume it's Unicode, as that's what the PEP says
# should be provided.
return s.encode(sys.getfilesystemencoding())
return s
def _get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def _file_with_extension(directory, extension):
matching = (
f for f in os.listdir(directory)
if f.endswith(extension)
)
file, = matching
return file
def _open_setup_script(setup_script):
if not os.path.exists(setup_script):
# Supply a default setup.py
return io.StringIO(u"from setuptools import setup; setup()")
return getattr(tokenize, 'open', open)(setup_script)
class _BuildMetaBackend(object):
def _fix_config(self, config_settings):
config_settings = config_settings or {}
config_settings.setdefault('--global-option', [])
return config_settings
def _get_build_requires(self, config_settings, requirements):
config_settings = self._fix_config(config_settings)
sys.argv = sys.argv[:1] + ['egg_info'] + \
config_settings["--global-option"]
try:
with Distribution.patch():
self.run_setup()
except SetupRequirementsError as e:
requirements += e.specifiers
return requirements
def run_setup(self, setup_script='setup.py'):
# Note that we can reuse our build directory between calls
# Correctness comes first, then optimization later
__file__ = setup_script
__name__ = '__main__'
with _open_setup_script(__file__) as f:
code = f.read().replace(r'\r\n', r'\n')
exec(compile(code, __file__, 'exec'), locals())
def get_requires_for_build_wheel(self, config_settings=None):
config_settings = self._fix_config(config_settings)
return self._get_build_requires(config_settings, requirements=['wheel'])
def get_requires_for_build_sdist(self, config_settings=None):
config_settings = self._fix_config(config_settings)
return self._get_build_requires(config_settings, requirements=[])
def prepare_metadata_for_build_wheel(self, metadata_directory,
config_settings=None):
sys.argv = sys.argv[:1] + ['dist_info', '--egg-base',
_to_str(metadata_directory)]
self.run_setup()
dist_info_directory = metadata_directory
while True:
dist_infos = [f for f in os.listdir(dist_info_directory)
if f.endswith('.dist-info')]
if (len(dist_infos) == 0 and
len(_get_immediate_subdirectories(dist_info_directory)) == 1):
dist_info_directory = os.path.join(
dist_info_directory, os.listdir(dist_info_directory)[0])
continue
assert len(dist_infos) == 1
break
# PEP 517 requires that the .dist-info directory be placed in the
# metadata_directory. To comply, we MUST copy the directory to the root
if dist_info_directory != metadata_directory:
shutil.move(
os.path.join(dist_info_directory, dist_infos[0]),
metadata_directory)
shutil.rmtree(dist_info_directory, ignore_errors=True)
return dist_infos[0]
def build_wheel(self, wheel_directory, config_settings=None,
metadata_directory=None):
config_settings = self._fix_config(config_settings)
wheel_directory = os.path.abspath(wheel_directory)
sys.argv = sys.argv[:1] + ['bdist_wheel'] + \
config_settings["--global-option"]
self.run_setup()
if wheel_directory != 'dist':
shutil.rmtree(wheel_directory)
shutil.copytree('dist', wheel_directory)
return _file_with_extension(wheel_directory, '.whl')
def build_sdist(self, sdist_directory, config_settings=None):
config_settings = self._fix_config(config_settings)
sdist_directory = os.path.abspath(sdist_directory)
sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \
config_settings["--global-option"] + \
["--dist-dir", sdist_directory]
self.run_setup()
return _file_with_extension(sdist_directory, '.tar.gz')
class _BuildMetaLegacyBackend(_BuildMetaBackend):
"""Compatibility backend for setuptools
This is a version of setuptools.build_meta that endeavors to maintain backwards
compatibility with pre-PEP 517 modes of invocation. It exists as a temporary
bridge between the old packaging mechanism and the new packaging mechanism,
and will eventually be removed.
"""
def run_setup(self, setup_script='setup.py'):
# In order to maintain compatibility with scripts assuming that
# the setup.py script is in a directory on the PYTHONPATH, inject
# '' into sys.path. (pypa/setuptools#1642)
sys_path = list(sys.path) # Save the original path
script_dir = os.path.dirname(os.path.abspath(setup_script))
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
try:
super(_BuildMetaLegacyBackend,
self).run_setup(setup_script=setup_script)
finally:
# While PEP 517 frontends should be calling each hook in a fresh
# subprocess according to the standard (and thus it should not be
# strictly necessary to restore the old sys.path), we'll restore
# the original path so that the path manipulation does not persist
# within the hook after run_setup is called.
sys.path[:] = sys_path
# The primary backend
_BACKEND = _BuildMetaBackend()
get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel
get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist
prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel
build_wheel = _BACKEND.build_wheel
build_sdist = _BACKEND.build_sdist
# The legacy backend
__legacy__ = _BuildMetaLegacyBackend()
| {
"repo_name": "lmregus/Portfolio",
"path": "python/design_patterns/env/lib/python3.7/site-packages/setuptools/build_meta.py",
"copies": "3",
"size": "8911",
"license": "mit",
"hash": 3735676748830441500,
"line_mean": 35.3714285714,
"line_max": 83,
"alpha_frac": 0.6460554371,
"autogenerated": false,
"ratio": 4.098896044158233,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000423406671493493,
"num_lines": 245
} |
# A **Percolator** is a reverse query much like a match rule which is run whenever a new feed is added. These can be used to create alerts by causing the sensit to publish the feed that was just added. A percolator query is defined by a `name` and and valid `query` according to the according the the [elasticsearch Query DSL](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html). For more information about Percolator queries please refer to the [elasticsearch percolator documentation](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-percolate.html).
#
# topic_id - The key for the parent topic
# id - The name of the percolator query
class Percolator():
def __init__(self, topic_id, id, client):
self.topic_id = topic_id
self.id = id
self.client = client
# Returns a list or percolators for a given topic. Requires authorization of **read_any_percolators**, or **read_application_percolators**.
# '/api/topics/:topic_id/percolators' GET
#
def list(self, options = {}):
body = options['query'] if 'query' in options else {}
response = self.client.get('/api/topics/' + self.topic_id + '/percolators', body, options)
return response
# Return a specific percolator of the associated Topic by Id. Requires authorization of **read_any_percolators**, or **read_application_percolators**.
# '/api/topics/:topic_id/percolators/:id' GET
#
def find(self, options = {}):
body = options['query'] if 'query' in options else {}
response = self.client.get('/api/topics/' + self.topic_id + '/percolators/' + self.id + '', body, options)
return response
# Create a percolator on the associated Topic with the specified name and query. Requires authorization of **manage_any_percolators**, or **manage_application_percolators**.
# '/api/topics/:topic_id/percolators' POST
#
# percolator - A Hash containing `name`: The name of the percolator(required).`query`: The query hash according to the according the the [elasticsearch Query DSL](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html)
def create(self, percolator, options = {}):
body = options['body'] if 'body' in options else {}
body['percolator'] = percolator
response = self.client.post('/api/topics/' + self.topic_id + '/percolators', body, options)
return response
# Update the query for a specific percolator. Requires authorization of **manage_any_percolators**, or **manage_application_percolators**.
# '/api/topics/:topic_id/percolators/:id' PUT
#
# percolator - A Hash containing the `query` hash according to the according the the [elasticsearch Query DSL](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html)
def update(self, percolator, options = {}):
body = options['body'] if 'body' in options else {}
body['percolator'] = percolator
response = self.client.put('/api/topics/' + self.topic_id + '/percolators/' + self.id + '', body, options)
return response
# Delete a percolator on the associated topic. Requires authorization of **manage_any_percolators**, or **manage_application_percolators**.
# '/api/topics/:topic_id/percolators/:id' DELETE
#
def delete(self, options = {}):
body = options['body'] if 'body' in options else {}
response = self.client.delete('/api/topics/' + self.topic_id + '/percolators/' + self.id + '', body, options)
return response
| {
"repo_name": "cwadding/sensit-python",
"path": "sensit/api/percolator.py",
"copies": "1",
"size": "3422",
"license": "mit",
"hash": -8282176540384180000,
"line_mean": 51.6461538462,
"line_max": 615,
"alpha_frac": 0.7244301578,
"autogenerated": false,
"ratio": 3.432296890672016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46567270484720164,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.