repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
rbarrois/confutils | confutils/configfile.py | ConfigFile._get_section | python | def _get_section(self, name, create=True):
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section | Retrieve a section by name. Create it on first access. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L339-L349 | null | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line)
def iter_lines(self, section):
"""Iterate over all lines in a section.
This will skip 'header' lines.
"""
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line
# Filling from lines
# ==================
def enter_block(self, name):
"""Mark 'entering a block'."""
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block
def insert_line(self, line):
"""Insert a new line"""
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line)
def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line)
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def update_line(self, section, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once)
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value
def get_one(self, section, key):
"""Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key.
"""
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section))
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def add_or_update(self, section, key, value):
"""Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
"""
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/configfile.py | ConfigFile.get_line | python | def get_line(self, section, line):
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line) | Retrieve all lines compatible with a given line. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L358-L364 | [
"def _get_section(self, name, create=True):\n \"\"\"Retrieve a section by name. Create it on first access.\"\"\"\n try:\n return self.sections[name]\n except KeyError:\n if not create:\n raise\n\n section = Section(name)\n self.sections[name] = section\n return section\n"
] | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def _get_section(self, name, create=True):
"""Retrieve a section by name. Create it on first access."""
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def iter_lines(self, section):
"""Iterate over all lines in a section.
This will skip 'header' lines.
"""
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line
# Filling from lines
# ==================
def enter_block(self, name):
"""Mark 'entering a block'."""
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block
def insert_line(self, line):
"""Insert a new line"""
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line)
def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line)
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def update_line(self, section, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once)
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value
def get_one(self, section, key):
"""Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key.
"""
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section))
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def add_or_update(self, section, key, value):
"""Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
"""
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/configfile.py | ConfigFile.iter_lines | python | def iter_lines(self, section):
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line | Iterate over all lines in a section.
This will skip 'header' lines. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L366-L378 | [
"def _get_section(self, name, create=True):\n \"\"\"Retrieve a section by name. Create it on first access.\"\"\"\n try:\n return self.sections[name]\n except KeyError:\n if not create:\n raise\n\n section = Section(name)\n self.sections[name] = section\n return section\n"
] | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def _get_section(self, name, create=True):
"""Retrieve a section by name. Create it on first access."""
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line)
# Filling from lines
# ==================
def enter_block(self, name):
"""Mark 'entering a block'."""
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block
def insert_line(self, line):
"""Insert a new line"""
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line)
def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line)
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def update_line(self, section, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once)
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value
def get_one(self, section, key):
"""Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key.
"""
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section))
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def add_or_update(self, section, key, value):
"""Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
"""
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/configfile.py | ConfigFile.enter_block | python | def enter_block(self, name):
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block | Mark 'entering a block'. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L383-L388 | [
"def new_block(self, **kwargs):\n block = SectionBlock(self.name, **kwargs)\n self.blocks.append(block)\n return block\n",
"def _get_section(self, name, create=True):\n \"\"\"Retrieve a section by name. Create it on first access.\"\"\"\n try:\n return self.sections[name]\n except KeyError:\n if not create:\n raise\n\n section = Section(name)\n self.sections[name] = section\n return section\n"
] | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def _get_section(self, name, create=True):
"""Retrieve a section by name. Create it on first access."""
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line)
def iter_lines(self, section):
"""Iterate over all lines in a section.
This will skip 'header' lines.
"""
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line
# Filling from lines
# ==================
def insert_line(self, line):
"""Insert a new line"""
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line)
def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line)
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def update_line(self, section, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once)
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value
def get_one(self, section, key):
"""Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key.
"""
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section))
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def add_or_update(self, section, key, value):
"""Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
"""
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/configfile.py | ConfigFile.insert_line | python | def insert_line(self, line):
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line) | Insert a new line | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L390-L395 | [
"def append(self, line):\n self.lines.append(line)\n"
] | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def _get_section(self, name, create=True):
"""Retrieve a section by name. Create it on first access."""
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line)
def iter_lines(self, section):
"""Iterate over all lines in a section.
This will skip 'header' lines.
"""
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line
# Filling from lines
# ==================
def enter_block(self, name):
"""Mark 'entering a block'."""
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block
def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line)
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def update_line(self, section, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once)
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value
def get_one(self, section, key):
"""Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key.
"""
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section))
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def add_or_update(self, section, key, value):
"""Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
"""
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/configfile.py | ConfigFile.handle_line | python | def handle_line(self, line):
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line) | Read one line. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L397-L402 | [
"def enter_block(self, name):\n \"\"\"Mark 'entering a block'.\"\"\"\n section = self._get_section(name)\n block = self.current_block = section.new_block()\n self.blocks.append(block)\n return block\n",
"def insert_line(self, line):\n \"\"\"Insert a new line\"\"\"\n if self.current_block is not None:\n self.current_block.append(line)\n else:\n self.header.append(line)\n"
] | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def _get_section(self, name, create=True):
"""Retrieve a section by name. Create it on first access."""
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line)
def iter_lines(self, section):
"""Iterate over all lines in a section.
This will skip 'header' lines.
"""
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line
# Filling from lines
# ==================
def enter_block(self, name):
"""Mark 'entering a block'."""
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block
def insert_line(self, line):
"""Insert a new line"""
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line)
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def update_line(self, section, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once)
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value
def get_one(self, section, key):
"""Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key.
"""
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section))
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def add_or_update(self, section, key, value):
"""Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
"""
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/configfile.py | ConfigFile.parse | python | def parse(self, fileobj, name_hint='', parser=None):
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line) | Fill from a file-like object. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L404-L409 | [
"def handle_line(self, line):\n \"\"\"Read one line.\"\"\"\n if line.kind == ConfigLine.KIND_HEADER:\n self.enter_block(line.header)\n else:\n self.insert_line(line)\n"
] | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def _get_section(self, name, create=True):
"""Retrieve a section by name. Create it on first access."""
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line)
def iter_lines(self, section):
"""Iterate over all lines in a section.
This will skip 'header' lines.
"""
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line
# Filling from lines
# ==================
def enter_block(self, name):
"""Mark 'entering a block'."""
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block
def insert_line(self, line):
"""Insert a new line"""
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line)
def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def update_line(self, section, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once)
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value
def get_one(self, section, key):
"""Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key.
"""
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section))
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def add_or_update(self, section, key, value):
"""Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
"""
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/configfile.py | ConfigFile.update_line | python | def update_line(self, section, old_line, new_line, once=False):
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once) | Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L434-L446 | [
"def _get_section(self, name, create=True):\n \"\"\"Retrieve a section by name. Create it on first access.\"\"\"\n try:\n return self.sections[name]\n except KeyError:\n if not create:\n raise\n\n section = Section(name)\n self.sections[name] = section\n return section\n"
] | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def _get_section(self, name, create=True):
"""Retrieve a section by name. Create it on first access."""
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line)
def iter_lines(self, section):
"""Iterate over all lines in a section.
This will skip 'header' lines.
"""
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line
# Filling from lines
# ==================
def enter_block(self, name):
"""Mark 'entering a block'."""
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block
def insert_line(self, line):
"""Insert a new line"""
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line)
def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line)
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value
def get_one(self, section, key):
"""Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key.
"""
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section))
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def add_or_update(self, section, key, value):
"""Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
"""
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/configfile.py | ConfigFile.remove_line | python | def remove_line(self, section, line):
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line) | Remove all instances of a line.
Returns:
int: the number of lines removed | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L448-L460 | [
"def _get_section(self, name, create=True):\n \"\"\"Retrieve a section by name. Create it on first access.\"\"\"\n try:\n return self.sections[name]\n except KeyError:\n if not create:\n raise\n\n section = Section(name)\n self.sections[name] = section\n return section\n"
] | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def _get_section(self, name, create=True):
"""Retrieve a section by name. Create it on first access."""
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line)
def iter_lines(self, section):
"""Iterate over all lines in a section.
This will skip 'header' lines.
"""
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line
# Filling from lines
# ==================
def enter_block(self, name):
"""Mark 'entering a block'."""
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block
def insert_line(self, line):
"""Insert a new line"""
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line)
def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line)
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def update_line(self, section, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value
def get_one(self, section, key):
"""Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key.
"""
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section))
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def add_or_update(self, section, key, value):
"""Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
"""
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/configfile.py | ConfigFile.items | python | def items(self, section):
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value | Retrieve all key/value pairs for a given section. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L468-L472 | [
"def iter_lines(self, section):\n \"\"\"Iterate over all lines in a section.\n\n This will skip 'header' lines.\n \"\"\"\n try:\n section = self._get_section(section, create=False)\n except KeyError:\n return\n\n for block in section:\n for line in block:\n yield line\n"
] | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def _get_section(self, name, create=True):
"""Retrieve a section by name. Create it on first access."""
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line)
def iter_lines(self, section):
"""Iterate over all lines in a section.
This will skip 'header' lines.
"""
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line
# Filling from lines
# ==================
def enter_block(self, name):
"""Mark 'entering a block'."""
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block
def insert_line(self, line):
"""Insert a new line"""
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line)
def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line)
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def update_line(self, section, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once)
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value
def get_one(self, section, key):
"""Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key.
"""
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section))
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def add_or_update(self, section, key, value):
"""Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
"""
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/configfile.py | ConfigFile.get | python | def get(self, section, key):
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value | Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L474-L482 | [
"def get_line(self, section, line):\n \"\"\"Retrieve all lines compatible with a given line.\"\"\"\n try:\n section = self._get_section(section, create=False)\n except KeyError:\n return []\n return section.find_lines(line)\n",
"def _make_line(self, key, value=None):\n return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)\n"
] | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def _get_section(self, name, create=True):
"""Retrieve a section by name. Create it on first access."""
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line)
def iter_lines(self, section):
"""Iterate over all lines in a section.
This will skip 'header' lines.
"""
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line
# Filling from lines
# ==================
def enter_block(self, name):
"""Mark 'entering a block'."""
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block
def insert_line(self, line):
"""Insert a new line"""
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line)
def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line)
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def update_line(self, section, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once)
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
def get_one(self, section, key):
"""Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key.
"""
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section))
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def add_or_update(self, section, key, value):
"""Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
"""
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/configfile.py | ConfigFile.get_one | python | def get_one(self, section, key):
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section)) | Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L484-L494 | [
"def get(self, section, key):\n \"\"\"Return the 'value' of all lines matching the section/key.\n\n Yields:\n values for matching lines.\n \"\"\"\n line = self._make_line(key)\n for line in self.get_line(section, line):\n yield line.value\n"
] | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def _get_section(self, name, create=True):
"""Retrieve a section by name. Create it on first access."""
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line)
def iter_lines(self, section):
"""Iterate over all lines in a section.
This will skip 'header' lines.
"""
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line
# Filling from lines
# ==================
def enter_block(self, name):
"""Mark 'entering a block'."""
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block
def insert_line(self, line):
"""Insert a new line"""
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line)
def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line)
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def update_line(self, section, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once)
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def add_or_update(self, section, key, value):
"""Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines.
"""
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/configfile.py | ConfigFile.add_or_update | python | def add_or_update(self, section, key, value):
updates = self.update(section, key, value)
if updates == 0:
self.add(section, key, value)
return updates | Update the key or, if no previous value existed, add it.
Returns:
int: Number of updated lines. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L500-L509 | [
"def add(self, section, key, value):\n line = self._make_line(key, value)\n return self.add_line(section, line)\n",
"def update(self, section, key, new_value, old_value=None, once=False):\n old_line = self._make_line(key, old_value)\n new_line = self._make_line(key, new_value)\n return self.update_line(section, old_line, new_line, once=once)\n"
] | class ConfigFile(object):
"""A (hopefully writable) config file.
Attributes:
sections (dict(name => Section)): sections of the file
blocks (SectionBlock list): blocks from the file
header (ConfigLineList): list of lines before the first section
current_block (SectionBlock): current block being read
"""
def __init__(self):
self.sections = dict()
self.blocks = []
self.header = ConfigLineList()
self.current_block = None
def _get_section(self, name, create=True):
"""Retrieve a section by name. Create it on first access."""
try:
return self.sections[name]
except KeyError:
if not create:
raise
section = Section(name)
self.sections[name] = section
return section
def __contains__(self, name):
"""Check whether a given name is a known section."""
return name in self.sections
# Accessing values
# ================
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line)
def iter_lines(self, section):
"""Iterate over all lines in a section.
This will skip 'header' lines.
"""
try:
section = self._get_section(section, create=False)
except KeyError:
return
for block in section:
for line in block:
yield line
# Filling from lines
# ==================
def enter_block(self, name):
"""Mark 'entering a block'."""
section = self._get_section(name)
block = self.current_block = section.new_block()
self.blocks.append(block)
return block
def insert_line(self, line):
"""Insert a new line"""
if self.current_block is not None:
self.current_block.append(line)
else:
self.header.append(line)
def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line)
def parse(self, fileobj, name_hint='', parser=None):
"""Fill from a file-like object."""
self.current_block = None # Reset current block
parser = parser or Parser()
for line in parser.parse(fileobj, name_hint=name_hint):
self.handle_line(line)
def parse_file(self, filename, skip_unreadable=False, **kwargs):
"""Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError.
"""
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename, **kwargs)
# Updating config content
# =======================
def add_line(self, section, line):
"""Insert a new line within a section.
Returns the SectionBlock containing that new line.
"""
return self._get_section(section).insert(line)
def update_line(self, section, old_line, new_line, once=False):
"""Replace all lines matching `old_line` with `new_line`.
If ``once`` is set to True, remove only the first instance.
Returns:
int: the number of updates performed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
return 0
return s.update(old_line, new_line, once=once)
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
# High-level API
# ==============
def _make_line(self, key, value=None):
return ConfigLine(ConfigLine.KIND_DATA, key=key, value=value)
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value
def get_one(self, section, key):
"""Retrieve the first value for a section/key.
Raises:
KeyError: If no line match the given section/key.
"""
lines = iter(self.get(section, key))
try:
return next(lines)
except StopIteration:
raise KeyError("Key %s not found in %s" % (key, section))
def add(self, section, key, value):
line = self._make_line(key, value)
return self.add_line(section, line)
def update(self, section, key, new_value, old_value=None, once=False):
old_line = self._make_line(key, old_value)
new_line = self._make_line(key, new_value)
return self.update_line(section, old_line, new_line, once=once)
def remove(self, section, key, value=None):
line = self._make_line(key, value)
return self.remove_line(section, line)
# Views
# =====
def section_view(self, section, multi_value=False):
view_class = MultiValuedSectionView if multi_value else SingleValuedSectionView
return view_class(self, section)
# Regenerating file
# =================
def __iter__(self):
# First, the header
for line in self.header:
yield line
# Then the content of blocks
for block in self.blocks:
if not block:
# Empty, skip
continue
yield block.header_line()
for line in block:
yield line
# Finally, extra block lines
for section in self.sections.values():
if section.extra_block:
yield section.extra_block.header_line()
for line in section.extra_block:
yield line
def write(self, fd):
"""Write to an open file-like object."""
for line in self:
fd.write('%s\n' % line.text)
|
rbarrois/confutils | confutils/merged_config.py | MergedConfig.get | python | def get(self, key, default=NoDefault):
key = normalize_key(key)
if default is NoDefault:
defaults = []
else:
defaults = [default]
for options in self.options:
try:
value = options[key]
except KeyError:
continue
if isinstance(value, Default):
defaults.append(value.value)
continue
else:
return value
if defaults:
return defaults[0]
return NoDefault | Retrieve a value from its key.
Retrieval steps are:
1) Normalize the key
2) For each option group:
a) Retrieve the value at that key
b) If no value exists, continue
c) If the value is an instance of 'Default', continue
d) Otherwise, return the value
3) If no option had a non-default value for the key, return the
first Default() option for the key (or :arg:`default`). | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/merged_config.py#L92-L126 | [
"def normalize_key(key):\n \"\"\"Normalize a config key.\n\n Returns the same key, with only lower-case characters and no '-'.\n \"\"\"\n return key.lower().replace('-', '_')\n"
] | class MergedConfig(object):
"""A merged configuration holder.
Merges options from a set of dicts."""
def __init__(self, *options, **kwargs):
self.options = []
for option in options:
self.add_options(option)
def add_options(self, options, normalize=True):
if normalize:
options = NormalizedDict(options)
self.options.append(options)
def __repr__(self): # pragma: no cover
return '%s(%r)' % (self.__class__.__name__, self.options)
|
rbarrois/confutils | confutils/configreader.py | ConfigReader.parse_file | python | def parse_file(self, filename, skip_unreadable=False):
if not os.access(filename, os.R_OK):
if skip_unreadable:
return
raise ConfigReadingError("Unable to open file %s." % filename)
with open(filename, 'rt') as f:
return self.parse(f, name_hint=filename) | Parse a file from its name (instead of fds).
If skip_unreadable is False and the file can't be read, will raise a
ConfigReadingError. | train | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configreader.py#L127-L138 | [
"def parse(self, f, name_hint=''):\n self.enter_section('core')\n\n for lineno, line in enumerate(f):\n line = line.strip()\n if self.re_section_header.match(line):\n section_name = line[1:-1]\n self.enter_section(section_name)\n elif self.re_blank_line.match(line):\n continue\n else:\n match = self.re_normal_line.match(line)\n if not match:\n raise ConfigSyntaxError(\"Invalid line %r at %s:%d\" % (\n line, name_hint or f, lineno))\n\n key, value = match.groups()\n self.current_section[key.strip()] = value.strip()\n"
] | class ConfigReader(object):
re_section_header = re.compile(r'^\[[\w._-]+\]$')
re_blank_line = re.compile(r'^(#.*)?$')
re_normal_line = re.compile(r'^([^:=]+)[:=](.*)$')
def __init__(self, multi_valued_sections=()):
self.sections = {}
self.multi_valued_sections = multi_valued_sections
self.current_section = self['core']
def __getitem__(self, section_name):
try:
return self.sections[section_name]
except KeyError:
if section_name in self.multi_valued_sections:
section = MultiValuedSection(section_name)
else:
section = SingleValuedSection(section_name)
self.sections[section_name] = section
return section
def __iter__(self):
return iter(self.sections)
def enter_section(self, name):
self.current_section = self[name]
return self.current_section
def parse(self, f, name_hint=''):
self.enter_section('core')
for lineno, line in enumerate(f):
line = line.strip()
if self.re_section_header.match(line):
section_name = line[1:-1]
self.enter_section(section_name)
elif self.re_blank_line.match(line):
continue
else:
match = self.re_normal_line.match(line)
if not match:
raise ConfigSyntaxError("Invalid line %r at %s:%d" % (
line, name_hint or f, lineno))
key, value = match.groups()
self.current_section[key.strip()] = value.strip()
def __repr__(self):
return '<%s: %r>' % (self.__class__.__name__, self.sections)
|
20tab/twentytab-tree | tree/menu.py | Menu.as_ul | python | def as_ul(self, current_linkable=False, class_current="active_link",
before_1="", after_1="", before_all="", after_all=""):
return self.__do_menu("as_ul", current_linkable, class_current,
before_1=before_1, after_1=after_1, before_all=before_all, after_all=after_all) | It returns menu as ul | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/menu.py#L68-L74 | [
"def __do_menu(self, menu_as, current_linkable=False, class_current=\"\",\n chars=\"\", before_1=\"\", after_1=\"\", before_all=\"\",\n after_all=\"\", render=True):\n nodes = self.root.get_descendants()\n list_nodes = prepare_nodes(list(nodes), self.request)\n\n if not render:\n return list_nodes\n\n if self.menu_depth != 0:\n relative_depth = self.menu_depth + self.root.level\n else:\n relative_depth = 0\n\n return render_to_string('tpl/menu_%s.tpl.html' % menu_as,\n {'NODE': self.upy_context['NODE'], 'nodes': list_nodes, 'chars': chars,\n 'current_linkable': current_linkable,\n 'menu_depth': relative_depth, 'class_current': class_current,\n 'view_hidden': self.view_hidden, 'before_1': before_1,\n 'after_1': after_1, 'before_all': before_all, 'after_all': after_all,\n }, context_instance=RequestContext(self.request))\n"
] | class Menu(object):
"""
This class provides some tools to create and render tree structure menu in according to nodes' structure
created for your application.
It takes some arguments:
- request: simply http request
- root: the root of menu (it's hidden in menu string)
- upy_context: it contains informations about current page and node
- menu_depth: it's depth level for menu introspection
- view_hidden: if True then hidden nodes will be shown
- g11n_depth: check g11n_depth in contrib.g11n.models documentation
"""
def __init__(self, request, root, upy_context, menu_depth=0, view_hidden=False):
self.request = request
self.upy_context = upy_context
self.root = root
self.menu_depth = menu_depth
self.view_hidden = view_hidden
def __do_menu(self, menu_as, current_linkable=False, class_current="",
chars="", before_1="", after_1="", before_all="",
after_all="", render=True):
nodes = self.root.get_descendants()
list_nodes = prepare_nodes(list(nodes), self.request)
if not render:
return list_nodes
if self.menu_depth != 0:
relative_depth = self.menu_depth + self.root.level
else:
relative_depth = 0
return render_to_string('tpl/menu_%s.tpl.html' % menu_as,
{'NODE': self.upy_context['NODE'], 'nodes': list_nodes, 'chars': chars,
'current_linkable': current_linkable,
'menu_depth': relative_depth, 'class_current': class_current,
'view_hidden': self.view_hidden, 'before_1': before_1,
'after_1': after_1, 'before_all': before_all, 'after_all': after_all,
}, context_instance=RequestContext(self.request))
def as_p(self, current_linkable=False, class_current="active_link"):
"""
It returns menu as p
"""
return self.__do_menu("as_p", current_linkable, class_current)
def as_string(self, chars, current_linkable=False, class_current="active_link"):
"""
It returns menu as string
"""
return self.__do_menu("as_string", current_linkable, class_current, chars)
def as_tree(self):
"""
It returns a menu not cached as tree
"""
return self.__do_menu("", render=False)
|
20tab/twentytab-tree | tree/menu.py | Menu.as_string | python | def as_string(self, chars, current_linkable=False, class_current="active_link"):
return self.__do_menu("as_string", current_linkable, class_current, chars) | It returns menu as string | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/menu.py#L82-L86 | [
"def __do_menu(self, menu_as, current_linkable=False, class_current=\"\",\n chars=\"\", before_1=\"\", after_1=\"\", before_all=\"\",\n after_all=\"\", render=True):\n nodes = self.root.get_descendants()\n list_nodes = prepare_nodes(list(nodes), self.request)\n\n if not render:\n return list_nodes\n\n if self.menu_depth != 0:\n relative_depth = self.menu_depth + self.root.level\n else:\n relative_depth = 0\n\n return render_to_string('tpl/menu_%s.tpl.html' % menu_as,\n {'NODE': self.upy_context['NODE'], 'nodes': list_nodes, 'chars': chars,\n 'current_linkable': current_linkable,\n 'menu_depth': relative_depth, 'class_current': class_current,\n 'view_hidden': self.view_hidden, 'before_1': before_1,\n 'after_1': after_1, 'before_all': before_all, 'after_all': after_all,\n }, context_instance=RequestContext(self.request))\n"
] | class Menu(object):
"""
This class provides some tools to create and render tree structure menu in according to nodes' structure
created for your application.
It takes some arguments:
- request: simply http request
- root: the root of menu (it's hidden in menu string)
- upy_context: it contains informations about current page and node
- menu_depth: it's depth level for menu introspection
- view_hidden: if True then hidden nodes will be shown
- g11n_depth: check g11n_depth in contrib.g11n.models documentation
"""
def __init__(self, request, root, upy_context, menu_depth=0, view_hidden=False):
self.request = request
self.upy_context = upy_context
self.root = root
self.menu_depth = menu_depth
self.view_hidden = view_hidden
def __do_menu(self, menu_as, current_linkable=False, class_current="",
chars="", before_1="", after_1="", before_all="",
after_all="", render=True):
nodes = self.root.get_descendants()
list_nodes = prepare_nodes(list(nodes), self.request)
if not render:
return list_nodes
if self.menu_depth != 0:
relative_depth = self.menu_depth + self.root.level
else:
relative_depth = 0
return render_to_string('tpl/menu_%s.tpl.html' % menu_as,
{'NODE': self.upy_context['NODE'], 'nodes': list_nodes, 'chars': chars,
'current_linkable': current_linkable,
'menu_depth': relative_depth, 'class_current': class_current,
'view_hidden': self.view_hidden, 'before_1': before_1,
'after_1': after_1, 'before_all': before_all, 'after_all': after_all,
}, context_instance=RequestContext(self.request))
def as_ul(self, current_linkable=False, class_current="active_link",
before_1="", after_1="", before_all="", after_all=""):
"""
It returns menu as ul
"""
return self.__do_menu("as_ul", current_linkable, class_current,
before_1=before_1, after_1=after_1, before_all=before_all, after_all=after_all)
def as_p(self, current_linkable=False, class_current="active_link"):
"""
It returns menu as p
"""
return self.__do_menu("as_p", current_linkable, class_current)
def as_tree(self):
"""
It returns a menu not cached as tree
"""
return self.__do_menu("", render=False)
|
20tab/twentytab-tree | tree/menu.py | Breadcrumb.as_ul | python | def as_ul(self, show_leaf=True, current_linkable=False, class_current="active_link"):
return self.__do_menu("as_ul", show_leaf, current_linkable, class_current) | It returns breadcrumb as ul | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/menu.py#L132-L136 | [
"def __do_menu(self, menu_as, show_leaf, current_linkable, class_current, chars=\"\", render=True):\n nodes = self.leaf.get_ancestors()[1:]\n list_nodes = list(nodes)\n if show_leaf:\n list_nodes.append(self.leaf)\n\n list_nodes = prepare_nodes(list_nodes, self.request)\n\n if not render:\n return list_nodes\n\n menutpl = render_to_string('tpl/breadcrumb_%s.tpl.html' % menu_as,\n {'NODE': self.upy_context['NODE'], 'nodes': list_nodes, 'chars': chars,\n 'current_linkable': current_linkable,\n 'class_current': class_current,\n 'view_hidden': self.view_hidden}, context_instance=RequestContext(self.request))\n\n return mark_safe(menutpl)\n"
] | class Breadcrumb(object):
"""
This class provides some tools to create and render tree structure breadcrumb in according to nodes' structure
created for your application.
It takes some arguments:
- request: simply http request
- leaf: the the leaf of breadcrumb (it's hidden in menu string)
- upy_context: it contains informations about current page and node
- view_hidden: if True then hidden nodes will be show
- g11n_depth: check g11n_depth in contrib.g11n.models documentation
"""
def __init__(self, request, leaf, upy_context, view_hidden=False):
self.request = request
self.leaf = leaf
self.upy_context = upy_context
self.view_hidden = view_hidden
def __do_menu(self, menu_as, show_leaf, current_linkable, class_current, chars="", render=True):
nodes = self.leaf.get_ancestors()[1:]
list_nodes = list(nodes)
if show_leaf:
list_nodes.append(self.leaf)
list_nodes = prepare_nodes(list_nodes, self.request)
if not render:
return list_nodes
menutpl = render_to_string('tpl/breadcrumb_%s.tpl.html' % menu_as,
{'NODE': self.upy_context['NODE'], 'nodes': list_nodes, 'chars': chars,
'current_linkable': current_linkable,
'class_current': class_current,
'view_hidden': self.view_hidden}, context_instance=RequestContext(self.request))
return mark_safe(menutpl)
def as_p(self, show_leaf=True, current_linkable=False, class_current="active_link"):
"""
It returns breadcrumb as p
"""
return self.__do_menu("as_p", show_leaf, current_linkable, class_current)
def as_string(self, chars, show_leaf=True, current_linkable=False, class_current="active_link"):
"""
It returns breadcrumb as string
"""
return self.__do_menu("as_string", show_leaf, current_linkable, class_current, chars)
def as_tree(self):
"""
It returns a menu not cached as tree
"""
return self.__do_menu("", render=False) |
20tab/twentytab-tree | tree/menu.py | Breadcrumb.as_p | python | def as_p(self, show_leaf=True, current_linkable=False, class_current="active_link"):
return self.__do_menu("as_p", show_leaf, current_linkable, class_current) | It returns breadcrumb as p | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/menu.py#L138-L142 | [
"def __do_menu(self, menu_as, show_leaf, current_linkable, class_current, chars=\"\", render=True):\n nodes = self.leaf.get_ancestors()[1:]\n list_nodes = list(nodes)\n if show_leaf:\n list_nodes.append(self.leaf)\n\n list_nodes = prepare_nodes(list_nodes, self.request)\n\n if not render:\n return list_nodes\n\n menutpl = render_to_string('tpl/breadcrumb_%s.tpl.html' % menu_as,\n {'NODE': self.upy_context['NODE'], 'nodes': list_nodes, 'chars': chars,\n 'current_linkable': current_linkable,\n 'class_current': class_current,\n 'view_hidden': self.view_hidden}, context_instance=RequestContext(self.request))\n\n return mark_safe(menutpl)\n"
] | class Breadcrumb(object):
"""
This class provides some tools to create and render tree structure breadcrumb in according to nodes' structure
created for your application.
It takes some arguments:
- request: simply http request
- leaf: the the leaf of breadcrumb (it's hidden in menu string)
- upy_context: it contains informations about current page and node
- view_hidden: if True then hidden nodes will be show
- g11n_depth: check g11n_depth in contrib.g11n.models documentation
"""
def __init__(self, request, leaf, upy_context, view_hidden=False):
self.request = request
self.leaf = leaf
self.upy_context = upy_context
self.view_hidden = view_hidden
def __do_menu(self, menu_as, show_leaf, current_linkable, class_current, chars="", render=True):
nodes = self.leaf.get_ancestors()[1:]
list_nodes = list(nodes)
if show_leaf:
list_nodes.append(self.leaf)
list_nodes = prepare_nodes(list_nodes, self.request)
if not render:
return list_nodes
menutpl = render_to_string('tpl/breadcrumb_%s.tpl.html' % menu_as,
{'NODE': self.upy_context['NODE'], 'nodes': list_nodes, 'chars': chars,
'current_linkable': current_linkable,
'class_current': class_current,
'view_hidden': self.view_hidden}, context_instance=RequestContext(self.request))
return mark_safe(menutpl)
def as_ul(self, show_leaf=True, current_linkable=False, class_current="active_link"):
"""
It returns breadcrumb as ul
"""
return self.__do_menu("as_ul", show_leaf, current_linkable, class_current)
def as_string(self, chars, show_leaf=True, current_linkable=False, class_current="active_link"):
"""
It returns breadcrumb as string
"""
return self.__do_menu("as_string", show_leaf, current_linkable, class_current, chars)
def as_tree(self):
"""
It returns a menu not cached as tree
"""
return self.__do_menu("", render=False) |
20tab/twentytab-tree | tree/menu.py | Breadcrumb.as_string | python | def as_string(self, chars, show_leaf=True, current_linkable=False, class_current="active_link"):
return self.__do_menu("as_string", show_leaf, current_linkable, class_current, chars) | It returns breadcrumb as string | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/menu.py#L144-L148 | [
"def __do_menu(self, menu_as, show_leaf, current_linkable, class_current, chars=\"\", render=True):\n nodes = self.leaf.get_ancestors()[1:]\n list_nodes = list(nodes)\n if show_leaf:\n list_nodes.append(self.leaf)\n\n list_nodes = prepare_nodes(list_nodes, self.request)\n\n if not render:\n return list_nodes\n\n menutpl = render_to_string('tpl/breadcrumb_%s.tpl.html' % menu_as,\n {'NODE': self.upy_context['NODE'], 'nodes': list_nodes, 'chars': chars,\n 'current_linkable': current_linkable,\n 'class_current': class_current,\n 'view_hidden': self.view_hidden}, context_instance=RequestContext(self.request))\n\n return mark_safe(menutpl)\n"
] | class Breadcrumb(object):
"""
This class provides some tools to create and render tree structure breadcrumb in according to nodes' structure
created for your application.
It takes some arguments:
- request: simply http request
- leaf: the the leaf of breadcrumb (it's hidden in menu string)
- upy_context: it contains informations about current page and node
- view_hidden: if True then hidden nodes will be show
- g11n_depth: check g11n_depth in contrib.g11n.models documentation
"""
def __init__(self, request, leaf, upy_context, view_hidden=False):
self.request = request
self.leaf = leaf
self.upy_context = upy_context
self.view_hidden = view_hidden
def __do_menu(self, menu_as, show_leaf, current_linkable, class_current, chars="", render=True):
nodes = self.leaf.get_ancestors()[1:]
list_nodes = list(nodes)
if show_leaf:
list_nodes.append(self.leaf)
list_nodes = prepare_nodes(list_nodes, self.request)
if not render:
return list_nodes
menutpl = render_to_string('tpl/breadcrumb_%s.tpl.html' % menu_as,
{'NODE': self.upy_context['NODE'], 'nodes': list_nodes, 'chars': chars,
'current_linkable': current_linkable,
'class_current': class_current,
'view_hidden': self.view_hidden}, context_instance=RequestContext(self.request))
return mark_safe(menutpl)
def as_ul(self, show_leaf=True, current_linkable=False, class_current="active_link"):
"""
It returns breadcrumb as ul
"""
return self.__do_menu("as_ul", show_leaf, current_linkable, class_current)
def as_p(self, show_leaf=True, current_linkable=False, class_current="active_link"):
"""
It returns breadcrumb as p
"""
return self.__do_menu("as_p", show_leaf, current_linkable, class_current)
def as_tree(self):
"""
It returns a menu not cached as tree
"""
return self.__do_menu("", render=False) |
20tab/twentytab-tree | tree/utility.py | getUrlList | python | def getUrlList():
"""
IF YOU WANT REBUILD YOUR STRUCTURE UNCOMMENT THE FOLLOWING LINE
"""
#Node.rebuild()
set_to_return = []
set_url = []
roots = Node.objects.filter(parent__isnull=True)
for root in roots:
nodes = root.get_descendants()
for node in nodes:
if node.page:
page = node.page
view = page.view
regex = r'^{0}$'.format(node.get_pattern())
regex_path = '{0}'.format(node.get_pattern())
view = u'{0}.{1}.{2}'.format(view.app_name, view.module_name, view.func_name)
"""
check_static_vars add UPY_CONTEXT to page
"""
page.check_static_vars(node)
app_url = url(regex, view, page.static_vars, page.scheme_name)
set_to_return.append(app_url)
set_url.append(regex_path)
if node.is_index:
regex = r'^$'
regex_path = ''
app_url = url(regex, view, page.static_vars, page.scheme_name)
set_to_return.append(app_url)
set_url.append(regex_path)
return set_to_return, set_url | This function get the Page List from the DB and return the tuple to
use in the urls.py, urlpatterns | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/utility.py#L9-L45 | null | from django.conf.urls import url
from django.conf import settings
from django.template import RequestContext
from django.template.loader import render_to_string
from tree.models import Node
from datetime import date
class UrlSitemap(object):
"""
It defines sitemap url's structure to make sitemap.xml file
"""
def __init__(self, loc, lastmod=None, changefreq=None, priority=None):
self.loc = loc
self.lastmod = lastmod
self.changefreq = changefreq
self.priority = priority
class Sitemap(object):
"""
It creates sitemap.xml
"""
def __init__(self, request):
self.request = request
def _do_sitemap(self):
host = self.request.get_host()
set_to_return = []
for root in Node.objects.filter(parent__isnull=True):
for node in root.get_descendants():
if node.page:
regex = r'%s' % node.slug
url_sitemap = UrlSitemap(loc=regex)
if node.changefreq:
url_sitemap.changefreq = node.changefreq
if node.priority:
url_sitemap.priority = node.priority
set_to_return.append(url_sitemap)
if node.is_index:
regex = r''
url_sitemap = UrlSitemap(loc=regex)
set_to_return.append(url_sitemap)
tpl_str = render_to_string('tpl/sitemap.tpl.html',
{'set': set_to_return, 'host': host, 'today': date.today(), },
context_instance=RequestContext(self.request))
return tpl_str
class RobotTXT(object):
"""
It creates robots.txt
"""
def __init__(self, request):
self.request = request
def _do_robotstxt(self):
set_robot = {}
disallow_all = settings.DISALLOW_ALL_ROBOTS
if not disallow_all:
for root in Node.objects.filter(parent__isnull=True):
for node in root.get_descendants():
if node.page and node.disallow:
regex = r'{0}'.format(node.slug)
for robot in node.robots.all():
if robot.name_id in set_robot.keys():
set_robot[robot.name_id].append(regex)
else:
set_robot[robot.name_id] = [regex, ]
tpl_str = render_to_string('tpl/robots.tpl.html',
{'set': set_robot, 'disallow_all': disallow_all},
context_instance=RequestContext(self.request))
return tpl_str |
20tab/twentytab-tree | tree/models.py | list_apps | python | def list_apps():
return [(d.split('.')[-1], d.split('.')[-1]) for d in os.listdir(
os.getcwd()) if is_app(u"{}/{}".format(os.getcwd(), d))] | It returns a list of application contained in PROJECT_APPS | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/models.py#L291-L296 | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
from mptt.models import MPTTModel, TreeForeignKey, TreeManager
from django.conf import settings
from django.contrib.auth.models import Group
from ast import literal_eval
import os
from twentytab.fields import NullTrueField
from . import conf
class NodeManager(TreeManager):
def get_queryset(self):
return super(NodeManager, self).get_query_set().select_related(
'parent','page').prefetch_related('robots', 'groups')
get_query_set = get_queryset
class Node(MPTTModel):
"""
This is the class that defines tree's nodes.
"""
_default_manager = NodeManager()
name = models.CharField(max_length=50, help_text=_(u"Identifying name of the associated page."),
verbose_name=_(u"Name"))
page = models.ForeignKey(u"Page", null=True, blank=True, help_text=_(u"Set the page for the referenced node."),
verbose_name=_(u"Page"))
parent = TreeForeignKey('self', null=True, blank=True, related_name='children',
help_text=_(u"Set the parent node for this node if it isn't root."),
verbose_name=_(u"Parent"))
position = models.PositiveSmallIntegerField(u'Position', default=0)
is_index = NullTrueField(_('Is index node?'), unique=True)
hide_in_navigation = models.BooleanField(help_text=_(u"Check it to hide the page in this node in the navigation."),
verbose_name=_(u"Hide in navigation"), default=False)
hide_in_url = models.BooleanField(
_(u'Hide in url'), default=False,
help_text=_(u"Check it to hide the node in url path (only if node hasn't a page)."))
show_if_logged = models.BooleanField(
help_text=_(u"Check it if this node must be showed only for logged user or group."),
verbose_name=_(u"Show if logged"), default=False
)
groups = models.ManyToManyField(Group, null=True, blank=True,
help_text=_(u"List of groups to use with 'show if logged' parameter."),
verbose_name=_(u"Groups"), related_name='node_groups')
value_regex = models.CharField(max_length=50, null=True, blank=True,
help_text=_(u"Set the value to respect the regex of the associated page."),
verbose_name=_(u"Value regex"))
changefreq = models.CharField(max_length=50, null=True, blank=True, choices=(("always", "always"),
("hourly", "hourly"),
("daily", "daily"),
("weekly", "weekly"),
("monthly", "monthly"),
("yearly", "yearly"),
("never", "never")),
help_text=_(u"The chengefreq attribute for sitemap.xml"),
verbose_name=_(u"Changefreq"))
priority = models.CharField(max_length=50, choices=(("0.1", "0.1"),
("0.2", "0.2"),
("0.3", "0.3"),
("0.4", "0.4"),
("0.5", "0.5"),
("0.6", "0.6"),
("0.7", "0.7"),
("0.8", "0.8"),
("0.9", "0.9"),
("1.0", "1.0"),),
default="0.5",
help_text=_(u"The priority attribute for sitemap.xml"),
verbose_name=_(u"Priority"))
robots = models.ManyToManyField(u"Robot", null=True, blank=True,
help_text=_(u"List of robots to communicate that this node is disallowed."),
verbose_name=_(u"Robots"))
disallow = models.BooleanField(help_text=_(u"Check it to disallow the page in this node in the file robots.txt."),
verbose_name=_(u"Disallow"), default=False)
@property
def page_name(self):
"""
It returns page's name of this node
"""
if self.page:
return self.page.name
return ""
@property
def view_path(self):
"""
It returns page's view_path
"""
if self.page:
return self.page.view_path
return ""
@property
def slug(self):
"""
It returns node's slug
"""
if self.is_root_node():
return ""
if self.slugable and self.parent.parent:
if not self.page.regex or (self.page.regex and not self.page.show_regex) or self.is_leaf_node():
return u"{0}/{1}".format(self.parent.slug, self.page.slug)
elif self.page.regex and self.value_regex and self.page.show_regex:
return u'{0}/{1}/{2}'.format(self.parent.slug, self.page.slug, self.value_regex)
elif not self.hide_in_url:
return u'{0}/{1}'.format(self.parent.slug, self.name)
elif self.slugable:
if not self.page.regex or (self.page.regex and not self.page.show_regex) or self.is_leaf_node():
return u"{0}".format(self.page.slug)
elif self.page.regex and self.value_regex and self.page.show_regex:
return u'{0}/{1}'.format(self.page.slug, self.value_regex)
elif not self.hide_in_url:
return u'{0}'.format(self.name)
return ""
@property
def slugable(self):
"""
A node is slugable in following cases:
1 - Node doesn't have children.
2 - Node has children but its page doesn't have a regex.
3 - Node has children, its page has regex but it doesn't show it.
4 - Node has children, its page shows his regex and node has a default value for regex.
5 - Node hasn't a page but it ins't hidden in url.
"""
if self.page:
if self.is_leaf_node():
return True
if not self.is_leaf_node() and not self.page.regex:
return True
if not self.is_leaf_node() and self.page.regex and not self.page.show_regex:
return True
if not self.is_leaf_node() and self.page.regex and self.page.show_regex and self.value_regex:
return True
elif not self.is_leaf_node() and not self.hide_in_url:
return True
return False
def get_pattern(self):
"""
It returns its url pattern
"""
if self.is_root_node():
return ""
else:
parent_pattern = self.parent.get_pattern()
if parent_pattern != "":
parent_pattern = u"{}".format(parent_pattern)
if not self.page and not self.is_leaf_node():
if self.hide_in_url:
return u'{0}'.format(parent_pattern)
else:
return u'{0}{1}'.format(parent_pattern, self.name)
else:
if self.is_leaf_node() and self.page.regex and self.page.show_regex:
return u'{0}{1}/{2}'.format(parent_pattern, self.page.slug, self.page.regex)
elif self.is_leaf_node() and (not self.page.regex or not self.page.show_regex):
return u'{0}{1}/'.format(parent_pattern, self.page.slug)
elif not self.is_leaf_node() and self.page.regex and self.page.show_regex:
return u'{0}{1}/{2}/'.format(parent_pattern, self.page.slug, self.page.regex)
else:
return u'{0}{1}/'.format(parent_pattern, self.page.slug)
def get_absolute_url(self):
"""
It returns simply a link as string
"""
return u"{0}".format(self.slug)
@property
def presentation_type(self):
"""
It returns page's presentation_type
"""
if self.page and self.page.presentation_type:
return self.page.presentation_type
return ""
def __unicode__(self):
if self.page_name is None:
page_name = "-"
else:
page_name = self.page_name
return u"{0} ({1})".format(self.name, page_name)
class Meta:
verbose_name = _(u"Node")
verbose_name_plural = _(u"Nodes")
ordering = ['tree_id', 'lft']
class PageManager(models.Manager):
def get_queryset(self):
return super(PageManager, self).get_query_set().select_related('template','view')
get_query_set = get_queryset
class Page(models.Model):
"""
This is the class that defines a page of the structure.
"""
#objects = PageManager()
name = models.CharField(max_length=50, unique=True, help_text=_(u"Identifying page's name."),
verbose_name=_(u"Name"))
slug = models.SlugField(max_length=50, help_text=_(u"Identifying page's url."),
verbose_name=_(u"Slug"))
regex = models.CharField(
max_length=150, null=True, blank=True,
help_text=_(u"Set the regular expression that completes the url (e.g. \"(?P<element_id>\d+)\")."),
verbose_name=_(u"Regex")
)
show_regex = models.BooleanField(_(u'Show regex'), default=True,
help_text=_(u'If it\'s checked the regex will be shown in urlpattern'))
static_vars = models.TextField(
null=True, blank=True,
help_text=_(
u"""Set the dictionary of static parameters of the page in a regular format:
{\"param1\":value1, \"param2\":value2}."""),
verbose_name=_(u"Static vars")
)
scheme_name = models.CharField(max_length=100, null=True, blank=True,
help_text=_(u"Set the unique name to associate the view of a callback url."),
verbose_name=_(u"Scheme name"))
template = models.ForeignKey(u"Template", help_text=_(u"Set the template to associate with the page."),
verbose_name=_(u"Template"))
view = models.ForeignKey(u"View", help_text=_(u"Set the view to associate with the page."),
verbose_name=_(u"View"))
presentation_type = models.CharField(max_length=150, null=True, blank=True,
choices=(("StaticPage", "StaticPage"), ("Custom", "Custom"),),
help_text=_(u"Select the presentation type."),
verbose_name=_(u"Presentation type"))
@property
def view_path(self):
"""
It returns view's view path
"""
if self.scheme_name is None or self.scheme_name == "":
return self.view.view_path
else:
return self.scheme_name
def get_absolute_url(self):
"""
It returns absolute url defined by node related to this page
"""
try:
node = Node.objects.select_related().filter(page=self)[0]
return node.get_absolute_url()
except Exception, e:
raise ValueError(u"Error in {0}.{1}: {2}".format(self.__module__, self.__class__.__name__, e))
return u""
def check_static_vars(self, node):
"""
This function check if a Page has static vars
"""
if self.static_vars == "" and hasattr(self, "template"):
self.static_vars = {
'upy_context': {
'template_name': u"{}/{}".format(self.template.app_name, self.template.file_name)
}
}
elif hasattr(self, "template"):
self.static_vars = literal_eval(self.static_vars)
self.static_vars['upy_context']['template_name'] = u"{}/{}".format(
self.template.app_name, self.template.file_name
)
self.static_vars['upy_context']['NODE'] = node
self.static_vars['upy_context']['PAGE'] = self
def __unicode__(self):
return self.name
class Meta:
verbose_name = _(u"Page")
verbose_name_plural = _(u"Pages")
ordering = ['name']
def is_app(path):
if os.path.isdir(path) and '__init__.py' in os.listdir(path) and 'settings.py' not in os.listdir(path):
return True
return False
class Template(models.Model):
"""
This is the class that defines a template
"""
name = models.CharField(max_length=100, help_text=_(u"Set the template's name."), verbose_name=_(u"Name"))
app_name = models.CharField(max_length=100, help_text=_(u"Set the application's name of the view."),
choices=list_apps(),
verbose_name=_(u"App name"))
file_name = models.CharField(max_length=150, help_text=_(u"Set the template's file name."),
verbose_name=_(u"File name"))
input_vars = models.TextField(null=True, blank=True,
help_text=_(u"Set the variables required by template (separated with ,)."),
verbose_name=_(u"Input vars"))
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
tmpl_path = u'%s/templates/' % self.app_name
if settings.USE_GLOBAL_TEMPLATES_DIR:
tmpl_path = u'%s/templates/%s/' % (os.getcwd(), self.app_name)
if not os.path.exists(tmpl_path):
os.makedirs(tmpl_path)
index_tpl_name = u"%sindex.html" % tmpl_path
file_tpl_name = u'%s%s' % (tmpl_path, self.file_name)
if not os.path.exists(index_tpl_name): #non sovrascrivo l'index se gia esiste
file_tpl = open(index_tpl_name, "w")
str_to_write = u"{% extends \"tree_base.html\" %}\n{% load i18n %}\n\n{% block body %}\n\n{% block main_content %}\n"
for var in self.input_vars.split(","):
if var != "":
str_to_write += u"%s: {{%s}}<br/>\n" % (var, var)
str_to_write += u"{% endblock main_content %}\n\n{% endblock body %}"
file_tpl.write(str_to_write)
file_tpl.close()
if not os.path.exists(file_tpl_name): #non sovrascrivo il file se gia esiste
file_tpl = open(file_tpl_name, "w")
str_to_write = u"{% extends \"index.html\" %}\n{% load i18n %}\n\n{% block main_content %}\n"
for var in self.input_vars.split(","):
if var != "":
str_to_write += u"%s: {{%s}}<br/>\n" % (var, var)
str_to_write += u"{% endblock main_content %}"
file_tpl.write(str_to_write)
file_tpl.close()
super(Template, self).save(*args, **kwargs)
class Meta:
verbose_name = _(u"Template")
verbose_name_plural = _(u"Templates")
ordering = ['name']
class View(models.Model):
"""
It defines view object and it's used to write view definition in views.py module
"""
name = models.CharField(max_length=100, help_text=_(u"Set the view's name."), verbose_name=_(u"Name"))
app_name = models.CharField(max_length=100, help_text=_(u"Set the application's name of the view."),
choices=list_apps(),
verbose_name=_(u"App name"))
func_name = models.CharField(max_length=100, help_text=_(u"Set the view's function name."),
verbose_name=_(u"Func name"))
input_vars = models.TextField(null=True, blank=True, help_text=_(u"Set the input variables required by view."),
verbose_name=_(u"Input vars"))
output_vars = models.TextField(null=True, blank=True,
help_text=_(u"Set the json list of output variables required by template."),
verbose_name=_(u"Output vars"))
module_name = models.CharField(max_length=100, default=u"views", help_text=_(u"Set the module's name of the view."),
verbose_name=_(u"Module name"))
@property
def view_path(self):
"""
It returns view_path as string like: 'app_name.module_mane.func_name'
"""
return u"{0}.{1}.{2}".format(self.app_name, self.module_name, self.func_name)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not os.path.exists(u'{0}/'.format(self.app_name)):
os.makedirs(u'{0}/'.format(self.app_name))
file_view_name = u'{0}/{1}.py'.format(self.app_name, self.module_name)
found = False
if os.path.exists(file_view_name):
file_view = open(file_view_name, "r")
for l in file_view.readlines():
if l[:3] == "def":
cont = len(self.func_name)
if l[4:cont + 4] == self.func_name:
found = True
file_view.close()
if not found:
file_view = open(file_view_name, "a")
upy_context_string = ", upy_context"
str_to_write = u"\n\ndef %s(request%s" % (self.func_name, upy_context_string)
if self.input_vars != "" and self.input_vars:
if self.input_vars[0:1] == ",":
self.input_vars = self.input_vars[1:]
if self.input_vars[-1] == ",":
self.input_vars = self.input_vars[:-1]
str_to_write += ", %s" % self.input_vars
str_to_write += "):\n"
if self.output_vars != "" and self.output_vars:
outputvars = self.output_vars[1:-1]
for item in outputvars.split(','):
obj_tuple = item.split(':')
str_to_write += " {0} = \"{1} to initialize\"\n".format(obj_tuple[1], obj_tuple[1])
str_to_write += " return main_render(request{0}".format(upy_context_string)
if self.output_vars != "" and self.output_vars:
str_to_write += ", {0}".format(self.output_vars)
else:
str_to_write += ", {}"
str_to_write += ")\n"
file_view.write(str_to_write)
file_view.close()
super(View, self).save(*args, **kwargs)
class Meta:
verbose_name = _(u"View")
verbose_name_plural = _(u"Views")
ordering = ['name']
class Robot(models.Model):
"""
It defines robots definition for search engines
"""
name_id = models.CharField(
max_length=250,
help_text=_(
u"""Short name for the robot. Check the robots' list at
<a target='_blank' href='http://www.robotstxt.org/db.html'>All Robots</a>"""),
verbose_name=_(u"Name id")
)
name = models.CharField(max_length=250, help_text=_(u"Full name for the robot"),
verbose_name=_(u"Name"))
def __unicode__(self):
return self.name
class Meta:
verbose_name = _(u"Robot")
verbose_name_plural = _(u"Robots")
ordering = ['name'] |
20tab/twentytab-tree | tree/models.py | Node.slug | python | def slug(self):
if self.is_root_node():
return ""
if self.slugable and self.parent.parent:
if not self.page.regex or (self.page.regex and not self.page.show_regex) or self.is_leaf_node():
return u"{0}/{1}".format(self.parent.slug, self.page.slug)
elif self.page.regex and self.value_regex and self.page.show_regex:
return u'{0}/{1}/{2}'.format(self.parent.slug, self.page.slug, self.value_regex)
elif not self.hide_in_url:
return u'{0}/{1}'.format(self.parent.slug, self.name)
elif self.slugable:
if not self.page.regex or (self.page.regex and not self.page.show_regex) or self.is_leaf_node():
return u"{0}".format(self.page.slug)
elif self.page.regex and self.value_regex and self.page.show_regex:
return u'{0}/{1}'.format(self.page.slug, self.value_regex)
elif not self.hide_in_url:
return u'{0}'.format(self.name)
return "" | It returns node's slug | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/models.py#L97-L117 | null | class Node(MPTTModel):
"""
This is the class that defines tree's nodes.
"""
_default_manager = NodeManager()
name = models.CharField(max_length=50, help_text=_(u"Identifying name of the associated page."),
verbose_name=_(u"Name"))
page = models.ForeignKey(u"Page", null=True, blank=True, help_text=_(u"Set the page for the referenced node."),
verbose_name=_(u"Page"))
parent = TreeForeignKey('self', null=True, blank=True, related_name='children',
help_text=_(u"Set the parent node for this node if it isn't root."),
verbose_name=_(u"Parent"))
position = models.PositiveSmallIntegerField(u'Position', default=0)
is_index = NullTrueField(_('Is index node?'), unique=True)
hide_in_navigation = models.BooleanField(help_text=_(u"Check it to hide the page in this node in the navigation."),
verbose_name=_(u"Hide in navigation"), default=False)
hide_in_url = models.BooleanField(
_(u'Hide in url'), default=False,
help_text=_(u"Check it to hide the node in url path (only if node hasn't a page)."))
show_if_logged = models.BooleanField(
help_text=_(u"Check it if this node must be showed only for logged user or group."),
verbose_name=_(u"Show if logged"), default=False
)
groups = models.ManyToManyField(Group, null=True, blank=True,
help_text=_(u"List of groups to use with 'show if logged' parameter."),
verbose_name=_(u"Groups"), related_name='node_groups')
value_regex = models.CharField(max_length=50, null=True, blank=True,
help_text=_(u"Set the value to respect the regex of the associated page."),
verbose_name=_(u"Value regex"))
changefreq = models.CharField(max_length=50, null=True, blank=True, choices=(("always", "always"),
("hourly", "hourly"),
("daily", "daily"),
("weekly", "weekly"),
("monthly", "monthly"),
("yearly", "yearly"),
("never", "never")),
help_text=_(u"The chengefreq attribute for sitemap.xml"),
verbose_name=_(u"Changefreq"))
priority = models.CharField(max_length=50, choices=(("0.1", "0.1"),
("0.2", "0.2"),
("0.3", "0.3"),
("0.4", "0.4"),
("0.5", "0.5"),
("0.6", "0.6"),
("0.7", "0.7"),
("0.8", "0.8"),
("0.9", "0.9"),
("1.0", "1.0"),),
default="0.5",
help_text=_(u"The priority attribute for sitemap.xml"),
verbose_name=_(u"Priority"))
robots = models.ManyToManyField(u"Robot", null=True, blank=True,
help_text=_(u"List of robots to communicate that this node is disallowed."),
verbose_name=_(u"Robots"))
disallow = models.BooleanField(help_text=_(u"Check it to disallow the page in this node in the file robots.txt."),
verbose_name=_(u"Disallow"), default=False)
@property
def page_name(self):
"""
It returns page's name of this node
"""
if self.page:
return self.page.name
return ""
@property
def view_path(self):
"""
It returns page's view_path
"""
if self.page:
return self.page.view_path
return ""
@property
@property
def slugable(self):
"""
A node is slugable in following cases:
1 - Node doesn't have children.
2 - Node has children but its page doesn't have a regex.
3 - Node has children, its page has regex but it doesn't show it.
4 - Node has children, its page shows his regex and node has a default value for regex.
5 - Node hasn't a page but it ins't hidden in url.
"""
if self.page:
if self.is_leaf_node():
return True
if not self.is_leaf_node() and not self.page.regex:
return True
if not self.is_leaf_node() and self.page.regex and not self.page.show_regex:
return True
if not self.is_leaf_node() and self.page.regex and self.page.show_regex and self.value_regex:
return True
elif not self.is_leaf_node() and not self.hide_in_url:
return True
return False
def get_pattern(self):
"""
It returns its url pattern
"""
if self.is_root_node():
return ""
else:
parent_pattern = self.parent.get_pattern()
if parent_pattern != "":
parent_pattern = u"{}".format(parent_pattern)
if not self.page and not self.is_leaf_node():
if self.hide_in_url:
return u'{0}'.format(parent_pattern)
else:
return u'{0}{1}'.format(parent_pattern, self.name)
else:
if self.is_leaf_node() and self.page.regex and self.page.show_regex:
return u'{0}{1}/{2}'.format(parent_pattern, self.page.slug, self.page.regex)
elif self.is_leaf_node() and (not self.page.regex or not self.page.show_regex):
return u'{0}{1}/'.format(parent_pattern, self.page.slug)
elif not self.is_leaf_node() and self.page.regex and self.page.show_regex:
return u'{0}{1}/{2}/'.format(parent_pattern, self.page.slug, self.page.regex)
else:
return u'{0}{1}/'.format(parent_pattern, self.page.slug)
def get_absolute_url(self):
"""
It returns simply a link as string
"""
return u"{0}".format(self.slug)
@property
def presentation_type(self):
"""
It returns page's presentation_type
"""
if self.page and self.page.presentation_type:
return self.page.presentation_type
return ""
def __unicode__(self):
if self.page_name is None:
page_name = "-"
else:
page_name = self.page_name
return u"{0} ({1})".format(self.name, page_name)
class Meta:
verbose_name = _(u"Node")
verbose_name_plural = _(u"Nodes")
ordering = ['tree_id', 'lft']
|
20tab/twentytab-tree | tree/models.py | Node.slugable | python | def slugable(self):
if self.page:
if self.is_leaf_node():
return True
if not self.is_leaf_node() and not self.page.regex:
return True
if not self.is_leaf_node() and self.page.regex and not self.page.show_regex:
return True
if not self.is_leaf_node() and self.page.regex and self.page.show_regex and self.value_regex:
return True
elif not self.is_leaf_node() and not self.hide_in_url:
return True
return False | A node is slugable in following cases:
1 - Node doesn't have children.
2 - Node has children but its page doesn't have a regex.
3 - Node has children, its page has regex but it doesn't show it.
4 - Node has children, its page shows his regex and node has a default value for regex.
5 - Node hasn't a page but it ins't hidden in url. | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/models.py#L120-L140 | null | class Node(MPTTModel):
"""
This is the class that defines tree's nodes.
"""
_default_manager = NodeManager()
name = models.CharField(max_length=50, help_text=_(u"Identifying name of the associated page."),
verbose_name=_(u"Name"))
page = models.ForeignKey(u"Page", null=True, blank=True, help_text=_(u"Set the page for the referenced node."),
verbose_name=_(u"Page"))
parent = TreeForeignKey('self', null=True, blank=True, related_name='children',
help_text=_(u"Set the parent node for this node if it isn't root."),
verbose_name=_(u"Parent"))
position = models.PositiveSmallIntegerField(u'Position', default=0)
is_index = NullTrueField(_('Is index node?'), unique=True)
hide_in_navigation = models.BooleanField(help_text=_(u"Check it to hide the page in this node in the navigation."),
verbose_name=_(u"Hide in navigation"), default=False)
hide_in_url = models.BooleanField(
_(u'Hide in url'), default=False,
help_text=_(u"Check it to hide the node in url path (only if node hasn't a page)."))
show_if_logged = models.BooleanField(
help_text=_(u"Check it if this node must be showed only for logged user or group."),
verbose_name=_(u"Show if logged"), default=False
)
groups = models.ManyToManyField(Group, null=True, blank=True,
help_text=_(u"List of groups to use with 'show if logged' parameter."),
verbose_name=_(u"Groups"), related_name='node_groups')
value_regex = models.CharField(max_length=50, null=True, blank=True,
help_text=_(u"Set the value to respect the regex of the associated page."),
verbose_name=_(u"Value regex"))
changefreq = models.CharField(max_length=50, null=True, blank=True, choices=(("always", "always"),
("hourly", "hourly"),
("daily", "daily"),
("weekly", "weekly"),
("monthly", "monthly"),
("yearly", "yearly"),
("never", "never")),
help_text=_(u"The chengefreq attribute for sitemap.xml"),
verbose_name=_(u"Changefreq"))
priority = models.CharField(max_length=50, choices=(("0.1", "0.1"),
("0.2", "0.2"),
("0.3", "0.3"),
("0.4", "0.4"),
("0.5", "0.5"),
("0.6", "0.6"),
("0.7", "0.7"),
("0.8", "0.8"),
("0.9", "0.9"),
("1.0", "1.0"),),
default="0.5",
help_text=_(u"The priority attribute for sitemap.xml"),
verbose_name=_(u"Priority"))
robots = models.ManyToManyField(u"Robot", null=True, blank=True,
help_text=_(u"List of robots to communicate that this node is disallowed."),
verbose_name=_(u"Robots"))
disallow = models.BooleanField(help_text=_(u"Check it to disallow the page in this node in the file robots.txt."),
verbose_name=_(u"Disallow"), default=False)
@property
def page_name(self):
"""
It returns page's name of this node
"""
if self.page:
return self.page.name
return ""
@property
def view_path(self):
"""
It returns page's view_path
"""
if self.page:
return self.page.view_path
return ""
@property
def slug(self):
"""
It returns node's slug
"""
if self.is_root_node():
return ""
if self.slugable and self.parent.parent:
if not self.page.regex or (self.page.regex and not self.page.show_regex) or self.is_leaf_node():
return u"{0}/{1}".format(self.parent.slug, self.page.slug)
elif self.page.regex and self.value_regex and self.page.show_regex:
return u'{0}/{1}/{2}'.format(self.parent.slug, self.page.slug, self.value_regex)
elif not self.hide_in_url:
return u'{0}/{1}'.format(self.parent.slug, self.name)
elif self.slugable:
if not self.page.regex or (self.page.regex and not self.page.show_regex) or self.is_leaf_node():
return u"{0}".format(self.page.slug)
elif self.page.regex and self.value_regex and self.page.show_regex:
return u'{0}/{1}'.format(self.page.slug, self.value_regex)
elif not self.hide_in_url:
return u'{0}'.format(self.name)
return ""
@property
def get_pattern(self):
"""
It returns its url pattern
"""
if self.is_root_node():
return ""
else:
parent_pattern = self.parent.get_pattern()
if parent_pattern != "":
parent_pattern = u"{}".format(parent_pattern)
if not self.page and not self.is_leaf_node():
if self.hide_in_url:
return u'{0}'.format(parent_pattern)
else:
return u'{0}{1}'.format(parent_pattern, self.name)
else:
if self.is_leaf_node() and self.page.regex and self.page.show_regex:
return u'{0}{1}/{2}'.format(parent_pattern, self.page.slug, self.page.regex)
elif self.is_leaf_node() and (not self.page.regex or not self.page.show_regex):
return u'{0}{1}/'.format(parent_pattern, self.page.slug)
elif not self.is_leaf_node() and self.page.regex and self.page.show_regex:
return u'{0}{1}/{2}/'.format(parent_pattern, self.page.slug, self.page.regex)
else:
return u'{0}{1}/'.format(parent_pattern, self.page.slug)
def get_absolute_url(self):
"""
It returns simply a link as string
"""
return u"{0}".format(self.slug)
@property
def presentation_type(self):
"""
It returns page's presentation_type
"""
if self.page and self.page.presentation_type:
return self.page.presentation_type
return ""
def __unicode__(self):
if self.page_name is None:
page_name = "-"
else:
page_name = self.page_name
return u"{0} ({1})".format(self.name, page_name)
class Meta:
verbose_name = _(u"Node")
verbose_name_plural = _(u"Nodes")
ordering = ['tree_id', 'lft']
|
20tab/twentytab-tree | tree/models.py | Node.get_pattern | python | def get_pattern(self):
if self.is_root_node():
return ""
else:
parent_pattern = self.parent.get_pattern()
if parent_pattern != "":
parent_pattern = u"{}".format(parent_pattern)
if not self.page and not self.is_leaf_node():
if self.hide_in_url:
return u'{0}'.format(parent_pattern)
else:
return u'{0}{1}'.format(parent_pattern, self.name)
else:
if self.is_leaf_node() and self.page.regex and self.page.show_regex:
return u'{0}{1}/{2}'.format(parent_pattern, self.page.slug, self.page.regex)
elif self.is_leaf_node() and (not self.page.regex or not self.page.show_regex):
return u'{0}{1}/'.format(parent_pattern, self.page.slug)
elif not self.is_leaf_node() and self.page.regex and self.page.show_regex:
return u'{0}{1}/{2}/'.format(parent_pattern, self.page.slug, self.page.regex)
else:
return u'{0}{1}/'.format(parent_pattern, self.page.slug) | It returns its url pattern | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/models.py#L142-L165 | null | class Node(MPTTModel):
"""
This is the class that defines tree's nodes.
"""
_default_manager = NodeManager()
name = models.CharField(max_length=50, help_text=_(u"Identifying name of the associated page."),
verbose_name=_(u"Name"))
page = models.ForeignKey(u"Page", null=True, blank=True, help_text=_(u"Set the page for the referenced node."),
verbose_name=_(u"Page"))
parent = TreeForeignKey('self', null=True, blank=True, related_name='children',
help_text=_(u"Set the parent node for this node if it isn't root."),
verbose_name=_(u"Parent"))
position = models.PositiveSmallIntegerField(u'Position', default=0)
is_index = NullTrueField(_('Is index node?'), unique=True)
hide_in_navigation = models.BooleanField(help_text=_(u"Check it to hide the page in this node in the navigation."),
verbose_name=_(u"Hide in navigation"), default=False)
hide_in_url = models.BooleanField(
_(u'Hide in url'), default=False,
help_text=_(u"Check it to hide the node in url path (only if node hasn't a page)."))
show_if_logged = models.BooleanField(
help_text=_(u"Check it if this node must be showed only for logged user or group."),
verbose_name=_(u"Show if logged"), default=False
)
groups = models.ManyToManyField(Group, null=True, blank=True,
help_text=_(u"List of groups to use with 'show if logged' parameter."),
verbose_name=_(u"Groups"), related_name='node_groups')
value_regex = models.CharField(max_length=50, null=True, blank=True,
help_text=_(u"Set the value to respect the regex of the associated page."),
verbose_name=_(u"Value regex"))
changefreq = models.CharField(max_length=50, null=True, blank=True, choices=(("always", "always"),
("hourly", "hourly"),
("daily", "daily"),
("weekly", "weekly"),
("monthly", "monthly"),
("yearly", "yearly"),
("never", "never")),
help_text=_(u"The chengefreq attribute for sitemap.xml"),
verbose_name=_(u"Changefreq"))
priority = models.CharField(max_length=50, choices=(("0.1", "0.1"),
("0.2", "0.2"),
("0.3", "0.3"),
("0.4", "0.4"),
("0.5", "0.5"),
("0.6", "0.6"),
("0.7", "0.7"),
("0.8", "0.8"),
("0.9", "0.9"),
("1.0", "1.0"),),
default="0.5",
help_text=_(u"The priority attribute for sitemap.xml"),
verbose_name=_(u"Priority"))
robots = models.ManyToManyField(u"Robot", null=True, blank=True,
help_text=_(u"List of robots to communicate that this node is disallowed."),
verbose_name=_(u"Robots"))
disallow = models.BooleanField(help_text=_(u"Check it to disallow the page in this node in the file robots.txt."),
verbose_name=_(u"Disallow"), default=False)
@property
def page_name(self):
"""
It returns page's name of this node
"""
if self.page:
return self.page.name
return ""
@property
def view_path(self):
"""
It returns page's view_path
"""
if self.page:
return self.page.view_path
return ""
@property
def slug(self):
"""
It returns node's slug
"""
if self.is_root_node():
return ""
if self.slugable and self.parent.parent:
if not self.page.regex or (self.page.regex and not self.page.show_regex) or self.is_leaf_node():
return u"{0}/{1}".format(self.parent.slug, self.page.slug)
elif self.page.regex and self.value_regex and self.page.show_regex:
return u'{0}/{1}/{2}'.format(self.parent.slug, self.page.slug, self.value_regex)
elif not self.hide_in_url:
return u'{0}/{1}'.format(self.parent.slug, self.name)
elif self.slugable:
if not self.page.regex or (self.page.regex and not self.page.show_regex) or self.is_leaf_node():
return u"{0}".format(self.page.slug)
elif self.page.regex and self.value_regex and self.page.show_regex:
return u'{0}/{1}'.format(self.page.slug, self.value_regex)
elif not self.hide_in_url:
return u'{0}'.format(self.name)
return ""
@property
def slugable(self):
"""
A node is slugable in following cases:
1 - Node doesn't have children.
2 - Node has children but its page doesn't have a regex.
3 - Node has children, its page has regex but it doesn't show it.
4 - Node has children, its page shows his regex and node has a default value for regex.
5 - Node hasn't a page but it ins't hidden in url.
"""
if self.page:
if self.is_leaf_node():
return True
if not self.is_leaf_node() and not self.page.regex:
return True
if not self.is_leaf_node() and self.page.regex and not self.page.show_regex:
return True
if not self.is_leaf_node() and self.page.regex and self.page.show_regex and self.value_regex:
return True
elif not self.is_leaf_node() and not self.hide_in_url:
return True
return False
def get_absolute_url(self):
"""
It returns simply a link as string
"""
return u"{0}".format(self.slug)
@property
def presentation_type(self):
"""
It returns page's presentation_type
"""
if self.page and self.page.presentation_type:
return self.page.presentation_type
return ""
def __unicode__(self):
if self.page_name is None:
page_name = "-"
else:
page_name = self.page_name
return u"{0} ({1})".format(self.name, page_name)
class Meta:
verbose_name = _(u"Node")
verbose_name_plural = _(u"Nodes")
ordering = ['tree_id', 'lft']
|
20tab/twentytab-tree | tree/models.py | Node.presentation_type | python | def presentation_type(self):
if self.page and self.page.presentation_type:
return self.page.presentation_type
return "" | It returns page's presentation_type | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/models.py#L174-L180 | null | class Node(MPTTModel):
"""
This is the class that defines tree's nodes.
"""
_default_manager = NodeManager()
name = models.CharField(max_length=50, help_text=_(u"Identifying name of the associated page."),
verbose_name=_(u"Name"))
page = models.ForeignKey(u"Page", null=True, blank=True, help_text=_(u"Set the page for the referenced node."),
verbose_name=_(u"Page"))
parent = TreeForeignKey('self', null=True, blank=True, related_name='children',
help_text=_(u"Set the parent node for this node if it isn't root."),
verbose_name=_(u"Parent"))
position = models.PositiveSmallIntegerField(u'Position', default=0)
is_index = NullTrueField(_('Is index node?'), unique=True)
hide_in_navigation = models.BooleanField(help_text=_(u"Check it to hide the page in this node in the navigation."),
verbose_name=_(u"Hide in navigation"), default=False)
hide_in_url = models.BooleanField(
_(u'Hide in url'), default=False,
help_text=_(u"Check it to hide the node in url path (only if node hasn't a page)."))
show_if_logged = models.BooleanField(
help_text=_(u"Check it if this node must be showed only for logged user or group."),
verbose_name=_(u"Show if logged"), default=False
)
groups = models.ManyToManyField(Group, null=True, blank=True,
help_text=_(u"List of groups to use with 'show if logged' parameter."),
verbose_name=_(u"Groups"), related_name='node_groups')
value_regex = models.CharField(max_length=50, null=True, blank=True,
help_text=_(u"Set the value to respect the regex of the associated page."),
verbose_name=_(u"Value regex"))
changefreq = models.CharField(max_length=50, null=True, blank=True, choices=(("always", "always"),
("hourly", "hourly"),
("daily", "daily"),
("weekly", "weekly"),
("monthly", "monthly"),
("yearly", "yearly"),
("never", "never")),
help_text=_(u"The chengefreq attribute for sitemap.xml"),
verbose_name=_(u"Changefreq"))
priority = models.CharField(max_length=50, choices=(("0.1", "0.1"),
("0.2", "0.2"),
("0.3", "0.3"),
("0.4", "0.4"),
("0.5", "0.5"),
("0.6", "0.6"),
("0.7", "0.7"),
("0.8", "0.8"),
("0.9", "0.9"),
("1.0", "1.0"),),
default="0.5",
help_text=_(u"The priority attribute for sitemap.xml"),
verbose_name=_(u"Priority"))
robots = models.ManyToManyField(u"Robot", null=True, blank=True,
help_text=_(u"List of robots to communicate that this node is disallowed."),
verbose_name=_(u"Robots"))
disallow = models.BooleanField(help_text=_(u"Check it to disallow the page in this node in the file robots.txt."),
verbose_name=_(u"Disallow"), default=False)
@property
def page_name(self):
"""
It returns page's name of this node
"""
if self.page:
return self.page.name
return ""
@property
def view_path(self):
"""
It returns page's view_path
"""
if self.page:
return self.page.view_path
return ""
@property
def slug(self):
"""
It returns node's slug
"""
if self.is_root_node():
return ""
if self.slugable and self.parent.parent:
if not self.page.regex or (self.page.regex and not self.page.show_regex) or self.is_leaf_node():
return u"{0}/{1}".format(self.parent.slug, self.page.slug)
elif self.page.regex and self.value_regex and self.page.show_regex:
return u'{0}/{1}/{2}'.format(self.parent.slug, self.page.slug, self.value_regex)
elif not self.hide_in_url:
return u'{0}/{1}'.format(self.parent.slug, self.name)
elif self.slugable:
if not self.page.regex or (self.page.regex and not self.page.show_regex) or self.is_leaf_node():
return u"{0}".format(self.page.slug)
elif self.page.regex and self.value_regex and self.page.show_regex:
return u'{0}/{1}'.format(self.page.slug, self.value_regex)
elif not self.hide_in_url:
return u'{0}'.format(self.name)
return ""
@property
def slugable(self):
"""
A node is slugable in following cases:
1 - Node doesn't have children.
2 - Node has children but its page doesn't have a regex.
3 - Node has children, its page has regex but it doesn't show it.
4 - Node has children, its page shows his regex and node has a default value for regex.
5 - Node hasn't a page but it ins't hidden in url.
"""
if self.page:
if self.is_leaf_node():
return True
if not self.is_leaf_node() and not self.page.regex:
return True
if not self.is_leaf_node() and self.page.regex and not self.page.show_regex:
return True
if not self.is_leaf_node() and self.page.regex and self.page.show_regex and self.value_regex:
return True
elif not self.is_leaf_node() and not self.hide_in_url:
return True
return False
def get_pattern(self):
"""
It returns its url pattern
"""
if self.is_root_node():
return ""
else:
parent_pattern = self.parent.get_pattern()
if parent_pattern != "":
parent_pattern = u"{}".format(parent_pattern)
if not self.page and not self.is_leaf_node():
if self.hide_in_url:
return u'{0}'.format(parent_pattern)
else:
return u'{0}{1}'.format(parent_pattern, self.name)
else:
if self.is_leaf_node() and self.page.regex and self.page.show_regex:
return u'{0}{1}/{2}'.format(parent_pattern, self.page.slug, self.page.regex)
elif self.is_leaf_node() and (not self.page.regex or not self.page.show_regex):
return u'{0}{1}/'.format(parent_pattern, self.page.slug)
elif not self.is_leaf_node() and self.page.regex and self.page.show_regex:
return u'{0}{1}/{2}/'.format(parent_pattern, self.page.slug, self.page.regex)
else:
return u'{0}{1}/'.format(parent_pattern, self.page.slug)
def get_absolute_url(self):
"""
It returns simply a link as string
"""
return u"{0}".format(self.slug)
@property
def __unicode__(self):
if self.page_name is None:
page_name = "-"
else:
page_name = self.page_name
return u"{0} ({1})".format(self.name, page_name)
class Meta:
verbose_name = _(u"Node")
verbose_name_plural = _(u"Nodes")
ordering = ['tree_id', 'lft']
|
20tab/twentytab-tree | tree/models.py | Page.view_path | python | def view_path(self):
if self.scheme_name is None or self.scheme_name == "":
return self.view.view_path
else:
return self.scheme_name | It returns view's view path | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/models.py#L238-L245 | null | class Page(models.Model):
"""
This is the class that defines a page of the structure.
"""
#objects = PageManager()
name = models.CharField(max_length=50, unique=True, help_text=_(u"Identifying page's name."),
verbose_name=_(u"Name"))
slug = models.SlugField(max_length=50, help_text=_(u"Identifying page's url."),
verbose_name=_(u"Slug"))
regex = models.CharField(
max_length=150, null=True, blank=True,
help_text=_(u"Set the regular expression that completes the url (e.g. \"(?P<element_id>\d+)\")."),
verbose_name=_(u"Regex")
)
show_regex = models.BooleanField(_(u'Show regex'), default=True,
help_text=_(u'If it\'s checked the regex will be shown in urlpattern'))
static_vars = models.TextField(
null=True, blank=True,
help_text=_(
u"""Set the dictionary of static parameters of the page in a regular format:
{\"param1\":value1, \"param2\":value2}."""),
verbose_name=_(u"Static vars")
)
scheme_name = models.CharField(max_length=100, null=True, blank=True,
help_text=_(u"Set the unique name to associate the view of a callback url."),
verbose_name=_(u"Scheme name"))
template = models.ForeignKey(u"Template", help_text=_(u"Set the template to associate with the page."),
verbose_name=_(u"Template"))
view = models.ForeignKey(u"View", help_text=_(u"Set the view to associate with the page."),
verbose_name=_(u"View"))
presentation_type = models.CharField(max_length=150, null=True, blank=True,
choices=(("StaticPage", "StaticPage"), ("Custom", "Custom"),),
help_text=_(u"Select the presentation type."),
verbose_name=_(u"Presentation type"))
@property
def get_absolute_url(self):
"""
It returns absolute url defined by node related to this page
"""
try:
node = Node.objects.select_related().filter(page=self)[0]
return node.get_absolute_url()
except Exception, e:
raise ValueError(u"Error in {0}.{1}: {2}".format(self.__module__, self.__class__.__name__, e))
return u""
def check_static_vars(self, node):
"""
This function check if a Page has static vars
"""
if self.static_vars == "" and hasattr(self, "template"):
self.static_vars = {
'upy_context': {
'template_name': u"{}/{}".format(self.template.app_name, self.template.file_name)
}
}
elif hasattr(self, "template"):
self.static_vars = literal_eval(self.static_vars)
self.static_vars['upy_context']['template_name'] = u"{}/{}".format(
self.template.app_name, self.template.file_name
)
self.static_vars['upy_context']['NODE'] = node
self.static_vars['upy_context']['PAGE'] = self
def __unicode__(self):
return self.name
class Meta:
verbose_name = _(u"Page")
verbose_name_plural = _(u"Pages")
ordering = ['name']
|
20tab/twentytab-tree | tree/models.py | Page.get_absolute_url | python | def get_absolute_url(self):
try:
node = Node.objects.select_related().filter(page=self)[0]
return node.get_absolute_url()
except Exception, e:
raise ValueError(u"Error in {0}.{1}: {2}".format(self.__module__, self.__class__.__name__, e))
return u"" | It returns absolute url defined by node related to this page | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/models.py#L247-L256 | null | class Page(models.Model):
"""
This is the class that defines a page of the structure.
"""
#objects = PageManager()
name = models.CharField(max_length=50, unique=True, help_text=_(u"Identifying page's name."),
verbose_name=_(u"Name"))
slug = models.SlugField(max_length=50, help_text=_(u"Identifying page's url."),
verbose_name=_(u"Slug"))
regex = models.CharField(
max_length=150, null=True, blank=True,
help_text=_(u"Set the regular expression that completes the url (e.g. \"(?P<element_id>\d+)\")."),
verbose_name=_(u"Regex")
)
show_regex = models.BooleanField(_(u'Show regex'), default=True,
help_text=_(u'If it\'s checked the regex will be shown in urlpattern'))
static_vars = models.TextField(
null=True, blank=True,
help_text=_(
u"""Set the dictionary of static parameters of the page in a regular format:
{\"param1\":value1, \"param2\":value2}."""),
verbose_name=_(u"Static vars")
)
scheme_name = models.CharField(max_length=100, null=True, blank=True,
help_text=_(u"Set the unique name to associate the view of a callback url."),
verbose_name=_(u"Scheme name"))
template = models.ForeignKey(u"Template", help_text=_(u"Set the template to associate with the page."),
verbose_name=_(u"Template"))
view = models.ForeignKey(u"View", help_text=_(u"Set the view to associate with the page."),
verbose_name=_(u"View"))
presentation_type = models.CharField(max_length=150, null=True, blank=True,
choices=(("StaticPage", "StaticPage"), ("Custom", "Custom"),),
help_text=_(u"Select the presentation type."),
verbose_name=_(u"Presentation type"))
@property
def view_path(self):
"""
It returns view's view path
"""
if self.scheme_name is None or self.scheme_name == "":
return self.view.view_path
else:
return self.scheme_name
def check_static_vars(self, node):
"""
This function check if a Page has static vars
"""
if self.static_vars == "" and hasattr(self, "template"):
self.static_vars = {
'upy_context': {
'template_name': u"{}/{}".format(self.template.app_name, self.template.file_name)
}
}
elif hasattr(self, "template"):
self.static_vars = literal_eval(self.static_vars)
self.static_vars['upy_context']['template_name'] = u"{}/{}".format(
self.template.app_name, self.template.file_name
)
self.static_vars['upy_context']['NODE'] = node
self.static_vars['upy_context']['PAGE'] = self
def __unicode__(self):
return self.name
class Meta:
verbose_name = _(u"Page")
verbose_name_plural = _(u"Pages")
ordering = ['name']
|
20tab/twentytab-tree | tree/models.py | Page.check_static_vars | python | def check_static_vars(self, node):
if self.static_vars == "" and hasattr(self, "template"):
self.static_vars = {
'upy_context': {
'template_name': u"{}/{}".format(self.template.app_name, self.template.file_name)
}
}
elif hasattr(self, "template"):
self.static_vars = literal_eval(self.static_vars)
self.static_vars['upy_context']['template_name'] = u"{}/{}".format(
self.template.app_name, self.template.file_name
)
self.static_vars['upy_context']['NODE'] = node
self.static_vars['upy_context']['PAGE'] = self | This function check if a Page has static vars | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/models.py#L258-L274 | null | class Page(models.Model):
"""
This is the class that defines a page of the structure.
"""
#objects = PageManager()
name = models.CharField(max_length=50, unique=True, help_text=_(u"Identifying page's name."),
verbose_name=_(u"Name"))
slug = models.SlugField(max_length=50, help_text=_(u"Identifying page's url."),
verbose_name=_(u"Slug"))
regex = models.CharField(
max_length=150, null=True, blank=True,
help_text=_(u"Set the regular expression that completes the url (e.g. \"(?P<element_id>\d+)\")."),
verbose_name=_(u"Regex")
)
show_regex = models.BooleanField(_(u'Show regex'), default=True,
help_text=_(u'If it\'s checked the regex will be shown in urlpattern'))
static_vars = models.TextField(
null=True, blank=True,
help_text=_(
u"""Set the dictionary of static parameters of the page in a regular format:
{\"param1\":value1, \"param2\":value2}."""),
verbose_name=_(u"Static vars")
)
scheme_name = models.CharField(max_length=100, null=True, blank=True,
help_text=_(u"Set the unique name to associate the view of a callback url."),
verbose_name=_(u"Scheme name"))
template = models.ForeignKey(u"Template", help_text=_(u"Set the template to associate with the page."),
verbose_name=_(u"Template"))
view = models.ForeignKey(u"View", help_text=_(u"Set the view to associate with the page."),
verbose_name=_(u"View"))
presentation_type = models.CharField(max_length=150, null=True, blank=True,
choices=(("StaticPage", "StaticPage"), ("Custom", "Custom"),),
help_text=_(u"Select the presentation type."),
verbose_name=_(u"Presentation type"))
@property
def view_path(self):
"""
It returns view's view path
"""
if self.scheme_name is None or self.scheme_name == "":
return self.view.view_path
else:
return self.scheme_name
def get_absolute_url(self):
"""
It returns absolute url defined by node related to this page
"""
try:
node = Node.objects.select_related().filter(page=self)[0]
return node.get_absolute_url()
except Exception, e:
raise ValueError(u"Error in {0}.{1}: {2}".format(self.__module__, self.__class__.__name__, e))
return u""
def __unicode__(self):
return self.name
class Meta:
verbose_name = _(u"Page")
verbose_name_plural = _(u"Pages")
ordering = ['name']
|
20tab/twentytab-tree | tree/models.py | View.view_path | python | def view_path(self):
return u"{0}.{1}.{2}".format(self.app_name, self.module_name, self.func_name) | It returns view_path as string like: 'app_name.module_mane.func_name' | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/models.py#L373-L377 | null | class View(models.Model):
"""
It defines view object and it's used to write view definition in views.py module
"""
name = models.CharField(max_length=100, help_text=_(u"Set the view's name."), verbose_name=_(u"Name"))
app_name = models.CharField(max_length=100, help_text=_(u"Set the application's name of the view."),
choices=list_apps(),
verbose_name=_(u"App name"))
func_name = models.CharField(max_length=100, help_text=_(u"Set the view's function name."),
verbose_name=_(u"Func name"))
input_vars = models.TextField(null=True, blank=True, help_text=_(u"Set the input variables required by view."),
verbose_name=_(u"Input vars"))
output_vars = models.TextField(null=True, blank=True,
help_text=_(u"Set the json list of output variables required by template."),
verbose_name=_(u"Output vars"))
module_name = models.CharField(max_length=100, default=u"views", help_text=_(u"Set the module's name of the view."),
verbose_name=_(u"Module name"))
@property
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not os.path.exists(u'{0}/'.format(self.app_name)):
os.makedirs(u'{0}/'.format(self.app_name))
file_view_name = u'{0}/{1}.py'.format(self.app_name, self.module_name)
found = False
if os.path.exists(file_view_name):
file_view = open(file_view_name, "r")
for l in file_view.readlines():
if l[:3] == "def":
cont = len(self.func_name)
if l[4:cont + 4] == self.func_name:
found = True
file_view.close()
if not found:
file_view = open(file_view_name, "a")
upy_context_string = ", upy_context"
str_to_write = u"\n\ndef %s(request%s" % (self.func_name, upy_context_string)
if self.input_vars != "" and self.input_vars:
if self.input_vars[0:1] == ",":
self.input_vars = self.input_vars[1:]
if self.input_vars[-1] == ",":
self.input_vars = self.input_vars[:-1]
str_to_write += ", %s" % self.input_vars
str_to_write += "):\n"
if self.output_vars != "" and self.output_vars:
outputvars = self.output_vars[1:-1]
for item in outputvars.split(','):
obj_tuple = item.split(':')
str_to_write += " {0} = \"{1} to initialize\"\n".format(obj_tuple[1], obj_tuple[1])
str_to_write += " return main_render(request{0}".format(upy_context_string)
if self.output_vars != "" and self.output_vars:
str_to_write += ", {0}".format(self.output_vars)
else:
str_to_write += ", {}"
str_to_write += ")\n"
file_view.write(str_to_write)
file_view.close()
super(View, self).save(*args, **kwargs)
class Meta:
verbose_name = _(u"View")
verbose_name_plural = _(u"Views")
ordering = ['name']
|
20tab/twentytab-tree | tree/views.py | tree_render | python | def tree_render(request, upy_context, vars_dictionary):
page = upy_context['PAGE']
return render_to_response(page.template.file_name, vars_dictionary, context_instance=RequestContext(request)) | It renders template defined in upy_context's page passed in arguments | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/views.py#L8-L13 | null | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from tree.utility import RobotTXT, Sitemap
from django.conf import settings
def view_404(request, url=None):
"""
It returns a 404 http response
"""
res = render_to_response("404.html", {"PAGE_URL": request.get_full_path()},
context_instance=RequestContext(request))
res.status_code = 404
return res
def view_500(request, url=None):
"""
it returns a 500 http response
"""
res = render_to_response("500.html", context_instance=RequestContext(request))
res.status_code = 500
return res
def sitemap(request):
"""
It returns sitemap.xml as http response
"""
upysitemap = Sitemap(request)
return HttpResponse(upysitemap._do_sitemap(), content_type="text/xml")
def robots(request):
"""
It returns robots.txt as http response
"""
upyrobottxt = RobotTXT(request)
return HttpResponse(upyrobottxt._do_robotstxt(), content_type="text")
def favicon(request):
"""
It returns favicon's location
"""
favicon = u"{}tree/images/favicon.ico".format(settings.STATIC_URL)
try:
from seo.models import MetaSite
site = MetaSite.objects.get(default=True)
return HttpResponseRedirect(site.favicon.url)
except:
return HttpResponseRedirect(favicon)
|
20tab/twentytab-tree | tree/views.py | view_404 | python | def view_404(request, url=None):
res = render_to_response("404.html", {"PAGE_URL": request.get_full_path()},
context_instance=RequestContext(request))
res.status_code = 404
return res | It returns a 404 http response | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/views.py#L16-L23 | null | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from tree.utility import RobotTXT, Sitemap
from django.conf import settings
def tree_render(request, upy_context, vars_dictionary):
"""
It renders template defined in upy_context's page passed in arguments
"""
page = upy_context['PAGE']
return render_to_response(page.template.file_name, vars_dictionary, context_instance=RequestContext(request))
def view_500(request, url=None):
"""
it returns a 500 http response
"""
res = render_to_response("500.html", context_instance=RequestContext(request))
res.status_code = 500
return res
def sitemap(request):
"""
It returns sitemap.xml as http response
"""
upysitemap = Sitemap(request)
return HttpResponse(upysitemap._do_sitemap(), content_type="text/xml")
def robots(request):
"""
It returns robots.txt as http response
"""
upyrobottxt = RobotTXT(request)
return HttpResponse(upyrobottxt._do_robotstxt(), content_type="text")
def favicon(request):
"""
It returns favicon's location
"""
favicon = u"{}tree/images/favicon.ico".format(settings.STATIC_URL)
try:
from seo.models import MetaSite
site = MetaSite.objects.get(default=True)
return HttpResponseRedirect(site.favicon.url)
except:
return HttpResponseRedirect(favicon)
|
20tab/twentytab-tree | tree/views.py | view_500 | python | def view_500(request, url=None):
res = render_to_response("500.html", context_instance=RequestContext(request))
res.status_code = 500
return res | it returns a 500 http response | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/views.py#L26-L32 | null | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from tree.utility import RobotTXT, Sitemap
from django.conf import settings
def tree_render(request, upy_context, vars_dictionary):
"""
It renders template defined in upy_context's page passed in arguments
"""
page = upy_context['PAGE']
return render_to_response(page.template.file_name, vars_dictionary, context_instance=RequestContext(request))
def view_404(request, url=None):
"""
It returns a 404 http response
"""
res = render_to_response("404.html", {"PAGE_URL": request.get_full_path()},
context_instance=RequestContext(request))
res.status_code = 404
return res
def sitemap(request):
"""
It returns sitemap.xml as http response
"""
upysitemap = Sitemap(request)
return HttpResponse(upysitemap._do_sitemap(), content_type="text/xml")
def robots(request):
"""
It returns robots.txt as http response
"""
upyrobottxt = RobotTXT(request)
return HttpResponse(upyrobottxt._do_robotstxt(), content_type="text")
def favicon(request):
"""
It returns favicon's location
"""
favicon = u"{}tree/images/favicon.ico".format(settings.STATIC_URL)
try:
from seo.models import MetaSite
site = MetaSite.objects.get(default=True)
return HttpResponseRedirect(site.favicon.url)
except:
return HttpResponseRedirect(favicon)
|
20tab/twentytab-tree | tree/views.py | favicon | python | def favicon(request):
favicon = u"{}tree/images/favicon.ico".format(settings.STATIC_URL)
try:
from seo.models import MetaSite
site = MetaSite.objects.get(default=True)
return HttpResponseRedirect(site.favicon.url)
except:
return HttpResponseRedirect(favicon) | It returns favicon's location | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/views.py#L51-L61 | null | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from tree.utility import RobotTXT, Sitemap
from django.conf import settings
def tree_render(request, upy_context, vars_dictionary):
"""
It renders template defined in upy_context's page passed in arguments
"""
page = upy_context['PAGE']
return render_to_response(page.template.file_name, vars_dictionary, context_instance=RequestContext(request))
def view_404(request, url=None):
"""
It returns a 404 http response
"""
res = render_to_response("404.html", {"PAGE_URL": request.get_full_path()},
context_instance=RequestContext(request))
res.status_code = 404
return res
def view_500(request, url=None):
"""
it returns a 500 http response
"""
res = render_to_response("500.html", context_instance=RequestContext(request))
res.status_code = 500
return res
def sitemap(request):
"""
It returns sitemap.xml as http response
"""
upysitemap = Sitemap(request)
return HttpResponse(upysitemap._do_sitemap(), content_type="text/xml")
def robots(request):
"""
It returns robots.txt as http response
"""
upyrobottxt = RobotTXT(request)
return HttpResponse(upyrobottxt._do_robotstxt(), content_type="text")
|
20tab/twentytab-tree | tree/template_context/context_processors.py | set_meta | python | def set_meta(request):
context_extras = {}
if not request.is_ajax() and hasattr(request, 'upy_context') and request.upy_context['PAGE']:
context_extras['PAGE'] = request.upy_context['PAGE']
context_extras['NODE'] = request.upy_context['NODE']
return context_extras | This context processor returns meta informations contained in cached files.
If there aren't cache it calculates dictionary to return | train | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/template_context/context_processors.py#L1-L10 | null | |
inspirehep/plotextractor | plotextractor/extractor.py | get_context | python | def get_context(lines, backwards=False):
tex_tag = re.compile(r".*\\(\w+).*")
sentence = re.compile(r"(?<=[.?!])[\s]+(?=[A-Z])")
context = []
word_list = lines.split()
if backwards:
word_list.reverse()
# For each word we do the following:
# 1. Check if we have reached word limit
# 2. If not, see if this is a TeX tag and see if its 'illegal'
# 3. Otherwise, add word to context
for word in word_list:
if len(context) >= CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT:
break
match = tex_tag.match(word)
if match and match.group(1) in CFG_PLOTEXTRACTOR_DISALLOWED_TEX:
# TeX Construct matched, return
if backwards:
# When reversed we need to go back and
# remove unwanted data within brackets
temp_word = ""
while len(context):
temp_word = context.pop()
if '}' in temp_word:
break
break
context.append(word)
if backwards:
context.reverse()
text = " ".join(context)
sentence_list = sentence.split(text)
if backwards:
sentence_list.reverse()
if len(sentence_list) > CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT:
return " ".join(
sentence_list[:CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT])
else:
return " ".join(sentence_list) | Get context.
Given a relevant string from a TeX file, this function will extract text
from it as far as it is deemed contextually relevant, either backwards or
forwards in the text.
The level of relevance allowed is configurable. When it reaches some
point in the text that is determined to be out of scope from the current
context, like text that is identified as a new paragraph, a complex TeX
structure ('/begin', '/end', etc.) etc., it will return the previously
allocated text.
For use when extracting text with contextual value for an figure or plot.
:param lines (string): string to examine
:param reversed (bool): are we searching backwards?
:return context (string): extracted context | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/extractor.py#L55-L115 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Plot extractor extractor."""
from __future__ import absolute_import, print_function
import codecs
import os
import re
from .config import (
CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT,
CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT,
CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT,
CFG_PLOTEXTRACTOR_DISALLOWED_TEX,
)
from .output_utils import (
assemble_caption,
find_open_and_close_braces,
get_tex_location,
)
from .converter import rotate_image
ARXIV_HEADER = 'arXiv:'
PLOTS_DIR = 'plots'
MAIN_CAPTION_OR_IMAGE = 0
SUB_CAPTION_OR_IMAGE = 1
def extract_context(tex_file, extracted_image_data):
"""Extract context.
Given a .tex file and a label name, this function will extract the text
before and after for all the references made to this label in the text.
The number of characters to extract before and after is configurable.
:param tex_file (list): path to .tex file
:param extracted_image_data ([(string, string, list), ...]):
a list of tuples of images matched to labels and captions from
this document.
:return extracted_image_data ([(string, string, list, list),
(string, string, list, list),...)]: the same list, but now containing
extracted contexts
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = "".join(get_lines_from_file(tex_file))
# Generate context for each image and its assoc. labels
for data in extracted_image_data:
context_list = []
# Generate a list of index tuples for all matches
indicies = [match.span()
for match in re.finditer(r"(\\(?:fig|ref)\{%s\})" %
(re.escape(data['label']),),
lines)]
for startindex, endindex in indicies:
# Retrive all lines before label until beginning of file
i = startindex - CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
if i < 0:
text_before = lines[:startindex]
else:
text_before = lines[i:startindex]
context_before = get_context(text_before, backwards=True)
# Retrive all lines from label until end of file and get context
i = endindex + CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
text_after = lines[endindex:i]
context_after = get_context(text_after)
context_list.append(
context_before + ' \\ref{' + data['label'] + '} ' +
context_after
)
data['contexts'] = context_list
def extract_captions(tex_file, sdir, image_list, primary=True):
"""Extract captions.
Take the TeX file and the list of images in the tarball (which all,
presumably, are used in the TeX file) and figure out which captions
in the text are associated with which images
:param: lines (list): list of lines of the TeX file
:param: tex_file (string): the name of the TeX file which mentions
the images
:param: sdir (string): path to current sub-directory
:param: image_list (list): list of images in tarball
:param: primary (bool): is this the primary call to extract_caption?
:return: images_and_captions_and_labels ([(string, string, list),
(string, string, list), ...]):
a list of tuples representing the names of images and their
corresponding figure labels from the TeX file
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = get_lines_from_file(tex_file)
# possible figure lead-ins
figure_head = u'\\begin{figure' # also matches figure*
figure_wrap_head = u'\\begin{wrapfigure'
figure_tail = u'\\end{figure' # also matches figure*
figure_wrap_tail = u'\\end{wrapfigure'
picture_head = u'\\begin{picture}'
displaymath_head = u'\\begin{displaymath}'
subfloat_head = u'\\subfloat'
subfig_head = u'\\subfigure'
includegraphics_head = u'\\includegraphics'
epsfig_head = u'\\epsfig'
input_head = u'\\input'
# possible caption lead-ins
caption_head = u'\\caption'
figcaption_head = u'\\figcaption'
label_head = u'\\label'
rotate = u'rotate='
angle = u'angle='
eps_tail = u'.eps'
ps_tail = u'.ps'
doc_head = u'\\begin{document}'
doc_tail = u'\\end{document}'
extracted_image_data = []
cur_image = ''
caption = ''
labels = []
active_label = ""
# cut out shit before the doc head
if primary:
for line_index in range(len(lines)):
if lines[line_index].find(doc_head) < 0:
lines[line_index] = ''
else:
break
# are we using commas in filenames here?
commas_okay = False
for dummy1, dummy2, filenames in \
os.walk(os.path.split(os.path.split(tex_file)[0])[0]):
for filename in filenames:
if filename.find(',') > -1:
commas_okay = True
break
# a comment is a % not preceded by a \
comment = re.compile("(?<!\\\\)%")
for line_index in range(len(lines)):
# get rid of pesky comments by splitting where the comment is
# and keeping only the part before the %
line = comment.split(lines[line_index])[0]
line = line.strip()
lines[line_index] = line
in_figure_tag = 0
for line_index in range(len(lines)):
line = lines[line_index]
if line == '':
continue
if line.find(doc_tail) > -1:
break
"""
FIGURE -
structure of a figure:
\begin{figure}
\formatting...
\includegraphics[someoptions]{FILENAME}
\caption{CAPTION} %caption and includegraphics may be switched!
\end{figure}
"""
index = max([line.find(figure_head), line.find(figure_wrap_head)])
if index > -1:
in_figure_tag = 1
# some punks don't like to put things in the figure tag. so we
# just want to see if there is anything that is sitting outside
# of it when we find it
cur_image, caption, extracted_image_data = put_it_together(
cur_image, caption,
active_label, extracted_image_data,
line_index, lines)
# here, you jerks, just make it so that it's fecking impossible to
# figure out your damn inclusion types
index = max([line.find(eps_tail), line.find(ps_tail),
line.find(epsfig_head)])
if index > -1:
if line.find(eps_tail) > -1 or line.find(ps_tail) > -1:
ext = True
else:
ext = False
filenames = intelligently_find_filenames(line, ext=ext,
commas_okay=commas_okay)
# try to look ahead! sometimes there are better matches after
if line_index < len(lines) - 1:
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index < len(lines) - 2:
filenames.extend(intelligently_find_filenames(
lines[line_index + 2],
commas_okay=commas_okay))
for filename in filenames:
filename = filename.encode('utf-8', 'ignore')
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
Rotate and angle
"""
index = max(line.find(rotate), line.find(angle))
if index > -1:
# which is the image associated to it?
filenames = intelligently_find_filenames(line,
commas_okay=commas_okay)
# try the line after and the line before
if line_index + 1 < len(lines):
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index > 1:
filenames.extend(intelligently_find_filenames(
lines[line_index - 1],
commas_okay=commas_okay))
already_tried = []
for filename in filenames:
if filename != 'ERROR' and filename not in already_tried:
if rotate_image(filename, line, sdir, image_list):
break
already_tried.append(filename)
"""
INCLUDEGRAPHICS -
structure of includegraphics:
\includegraphics[someoptions]{FILENAME}
"""
index = line.find(includegraphics_head)
if index > -1:
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index, index, '{', lines)
filename = lines[open_curly_line][open_curly + 1:close_curly]
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
{\input{FILENAME}}
\caption{CAPTION}
This input is ambiguous, since input is also used for things like
inclusion of data from other LaTeX files directly.
"""
index = line.find(input_head)
if index > -1:
new_tex_names = intelligently_find_filenames(
line, TeX=True,
commas_okay=commas_okay)
for new_tex_name in new_tex_names:
if new_tex_name != 'ERROR':
new_tex_file = get_tex_location(new_tex_name, tex_file)
if new_tex_file and primary: # to kill recursion
extracted_image_data.extend(extract_captions(
new_tex_file, sdir,
image_list,
primary=False
))
"""PICTURE"""
index = line.find(picture_head)
if index > -1:
# structure of a picture:
# \begin{picture}
# ....not worrying about this now
# print('found picture tag')
# FIXME
pass
"""DISPLAYMATH"""
index = line.find(displaymath_head)
if index > -1:
# structure of a displaymath:
# \begin{displaymath}
# ....not worrying about this now
# print('found displaymath tag')
# FIXME
pass
"""
CAPTIONS -
structure of a caption:
\caption[someoptions]{CAPTION}
or
\caption{CAPTION}
or
\caption{{options}{CAPTION}}
"""
index = max([line.find(caption_head), line.find(figcaption_head)])
if index > -1:
open_curly, open_curly_line, close_curly, close_curly_line = \
find_open_and_close_braces(line_index, index, '{', lines)
cap_begin = open_curly + 1
cur_caption = assemble_caption(
open_curly_line, cap_begin,
close_curly_line, close_curly, lines)
if caption == '':
caption = cur_caption
elif type(caption) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
caption[SUB_CAPTION_OR_IMAGE].append(cur_caption)
else:
caption[SUB_CAPTION_OR_IMAGE] = [cur_caption]
elif caption != cur_caption:
caption = ['', [caption, cur_caption]]
"""
SUBFLOATS -
structure of a subfloat (inside of a figure tag):
\subfloat[CAPTION]{options{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfloat_head)
if index > -1:
# if we are dealing with subfloats, we need a different
# sort of structure to keep track of captions and subcaptions
if not isinstance(cur_image, list):
cur_image = [cur_image, []]
if not isinstance(caption, list):
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(
open_square_line,
cap_begin, close_square_line, close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(close_square_line,
close_square, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
SUBFIGURES -
structure of a subfigure (inside a figure tag):
\subfigure[CAPTION]{
\includegraphics[options]{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfig_head)
if index > -1:
# like with subfloats, we need a different structure for keepin
# track of this stuff
if type(cur_image) != list:
cur_image = [cur_image, []]
if type(caption) != list:
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(open_square_line,
cap_begin, close_square_line,
close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
index_cpy = index
# find the graphics tag to get the filename
# it is okay if we eat lines here
index = line.find(includegraphics_head)
while index == -1 and (line_index + 1) < len(lines):
line_index += 1
line = lines[line_index]
index = line.find(includegraphics_head)
if line_index == len(lines):
# didn't find the image name on line
line_index = index_cpy
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index,
index, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
LABELS -
structure of a label:
\label{somelabelnamewhichprobablyincludesacolon}
Labels are used to tag images and will later be used in ref tags
to reference them. This is interesting because in effect the refs
to a plot are additional caption for it.
Notes: labels can be used for many more things than just plots.
We'll have to experiment with how to best associate a label with an
image.. if it's in the caption, it's easy. If it's in a figure, it's
still okay... but the images that aren't in figure tags are numerous.
"""
index = line.find(label_head)
if index > -1 and in_figure_tag:
open_curly, open_curly_line, close_curly, dummy =\
find_open_and_close_braces(line_index,
index, '{', lines)
label = lines[open_curly_line][open_curly + 1:close_curly]
if label not in labels:
active_label = label
labels.append(label)
"""
FIGURE
important: we put the check for the end of the figure at the end
of the loop in case some pathological person puts everything in one
line
"""
index = max([
line.find(figure_tail),
line.find(figure_wrap_tail),
line.find(doc_tail)
])
if index > -1:
in_figure_tag = 0
cur_image, caption, extracted_image_data = \
put_it_together(cur_image, caption, active_label,
extracted_image_data,
line_index, lines)
"""
END DOCUMENT
we shouldn't look at anything after the end document tag is found
"""
index = line.find(doc_tail)
if index > -1:
break
return extracted_image_data
def put_it_together(cur_image, caption, context, extracted_image_data,
line_index, lines):
"""Put it together.
Takes the current image(s) and caption(s) and assembles them into
something useful in the extracted_image_data list.
:param: cur_image (string || list): the image currently being dealt with,
or the list of images, in the case of subimages
:param: caption (string || list): the caption or captions currently in
scope
:param: extracted_image_data ([(string, string), (string, string), ...]):
a list of tuples of images matched to captions from this document.
:param: line_index (int): the index where we are in the lines (for
searchback and searchforward purposes)
:param: lines ([string, string, ...]): the lines in the TeX
:return: (cur_image, caption, extracted_image_data): the same arguments it
was sent, processed appropriately
"""
if type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] == 'ERROR':
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
if image == 'ERROR':
cur_image[SUB_CAPTION_OR_IMAGE].remove(image)
if cur_image != '' and caption != '':
if type(cur_image) == list and type(caption) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '' and\
caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption[MAIN_CAPTION_OR_IMAGE],
context))
if type(cur_image[MAIN_CAPTION_OR_IMAGE]) == list:
# why is the main image a list?
# it's a good idea to attach the main caption to other
# things, but the main image can only be used once
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
for index in \
range(len(cur_image[SUB_CAPTION_OR_IMAGE])):
if index < len(caption[SUB_CAPTION_OR_IMAGE]):
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
caption[SUB_CAPTION_OR_IMAGE][index]
else:
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
'Caption not extracted'
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE][index],
long_caption, context))
else:
long_caption = caption[MAIN_CAPTION_OR_IMAGE] + \
' : ' + caption[SUB_CAPTION_OR_IMAGE]
for sub_image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_image, long_caption, context))
else:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for sub_cap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ' : ' + sub_cap
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], long_caption,
context))
else:
# wtf are they lists for?
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE],
caption[SUB_CAPTION_OR_IMAGE], context))
elif type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append((image, caption, context))
else:
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], caption, context))
elif type(caption) == list:
if caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image, caption[MAIN_CAPTION_OR_IMAGE], context))
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
# multiple caps for one image:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
if long_caption != '':
long_caption += ' : '
long_caption += subcap
extracted_image_data.append((cur_image, long_caption, context))
else:
extracted_image_data.append(
(cur_image, caption[SUB_CAPTION_OR_IMAGE]. context))
else:
extracted_image_data.append((cur_image, caption, context))
elif cur_image != '' and caption == '':
# we may have missed the caption somewhere.
REASONABLE_SEARCHBACK = 25
REASONABLE_SEARCHFORWARD = 5
curly_no_tag_preceding = '(?<!\\w){'
for searchback in range(REASONABLE_SEARCHBACK):
if line_index - searchback < 0:
continue
back_line = lines[line_index - searchback]
m = re.search(curly_no_tag_preceding, back_line)
if m:
open_curly = m.start()
open_curly, open_curly_line, close_curly, \
close_curly_line = find_open_and_close_braces(
line_index - searchback, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line, cap_begin,
close_curly_line, close_curly,
lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, caption, context))
else:
extracted_image_data.append((cur_image, caption, context))
break
if caption == '':
for searchforward in range(REASONABLE_SEARCHFORWARD):
if line_index + searchforward >= len(lines):
break
fwd_line = lines[line_index + searchforward]
m = re.search(curly_no_tag_preceding, fwd_line)
if m:
open_curly = m.start()
open_curly, open_curly_line, close_curly,\
close_curly_line = find_open_and_close_braces(
line_index + searchforward, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line,
cap_begin, close_curly_line,
close_curly, lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, caption, context))
else:
extracted_image_data.append(
(cur_image, caption, context))
break
if caption == '':
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], 'No caption found',
context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, 'No caption', context))
else:
extracted_image_data.append(
(cur_image, 'No caption found', context))
elif caption != '' and cur_image == '':
if type(caption) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ': ' + subcap
else:
long_caption = caption
extracted_image_data.append(('', 'noimg' + long_caption, context))
# if we're leaving the figure, no sense keeping the data
cur_image = ''
caption = ''
return cur_image, caption, extracted_image_data
def intelligently_find_filenames(line, TeX=False, ext=False,
commas_okay=False):
"""Intelligently find filenames.
Find the filename in the line. We don't support all filenames! Just eps
and ps for now.
:param: line (string): the line we want to get a filename out of
:return: filename ([string, ...]): what is probably the name of the file(s)
"""
files_included = ['ERROR']
if commas_okay:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.,%#]+'
else:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.%#]+'
if ext:
valid_for_filename += '\.e*ps[texfi2]*'
if TeX:
valid_for_filename += '[\.latex]*'
file_inclusion = re.findall('=' + valid_for_filename + '[ ,]', line)
if len(file_inclusion) > 0:
# right now it looks like '=FILENAME,' or '=FILENAME '
for file_included in file_inclusion:
files_included.append(file_included[1:-1])
file_inclusion = re.findall('(?:[ps]*file=|figure=)' +
valid_for_filename + '[,\\]} ]*', line)
if len(file_inclusion) > 0:
# still has the =
for file_included in file_inclusion:
part_before_equals = file_included.split('=')[0]
if len(part_before_equals) != file_included:
file_included = file_included[
len(part_before_equals) + 1:].strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall(
'["\'{\\[]' + valid_for_filename + '[}\\],"\']',
line)
if len(file_inclusion) > 0:
# right now it's got the {} or [] or "" or '' around it still
for file_included in file_inclusion:
file_included = file_included[1:-1]
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '[,\\} $]', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('\\s*' + valid_for_filename + '\\s*$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
if files_included != ['ERROR']:
files_included = files_included[1:] # cut off the dummy
for file_included in files_included:
if file_included == '':
files_included.remove(file_included)
if ' ' in file_included:
for subfile in file_included.split(' '):
if subfile not in files_included:
files_included.append(subfile)
if ',' in file_included:
for subfile in file_included.split(' '):
if subfile not in files_included:
files_included.append(subfile)
return files_included
def get_lines_from_file(filepath, encoding="UTF-8"):
"""Return an iterator over lines."""
try:
fd = codecs.open(filepath, 'r', encoding)
lines = fd.readlines()
except UnicodeDecodeError:
# Fall back to 'ISO-8859-1'
fd = codecs.open(filepath, 'r', 'ISO-8859-1')
lines = fd.readlines()
finally:
fd.close()
return lines
|
inspirehep/plotextractor | plotextractor/extractor.py | extract_context | python | def extract_context(tex_file, extracted_image_data):
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = "".join(get_lines_from_file(tex_file))
# Generate context for each image and its assoc. labels
for data in extracted_image_data:
context_list = []
# Generate a list of index tuples for all matches
indicies = [match.span()
for match in re.finditer(r"(\\(?:fig|ref)\{%s\})" %
(re.escape(data['label']),),
lines)]
for startindex, endindex in indicies:
# Retrive all lines before label until beginning of file
i = startindex - CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
if i < 0:
text_before = lines[:startindex]
else:
text_before = lines[i:startindex]
context_before = get_context(text_before, backwards=True)
# Retrive all lines from label until end of file and get context
i = endindex + CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
text_after = lines[endindex:i]
context_after = get_context(text_after)
context_list.append(
context_before + ' \\ref{' + data['label'] + '} ' +
context_after
)
data['contexts'] = context_list | Extract context.
Given a .tex file and a label name, this function will extract the text
before and after for all the references made to this label in the text.
The number of characters to extract before and after is configurable.
:param tex_file (list): path to .tex file
:param extracted_image_data ([(string, string, list), ...]):
a list of tuples of images matched to labels and captions from
this document.
:return extracted_image_data ([(string, string, list, list),
(string, string, list, list),...)]: the same list, but now containing
extracted contexts | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/extractor.py#L118-L165 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Plot extractor extractor."""
from __future__ import absolute_import, print_function
import codecs
import os
import re
from .config import (
CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT,
CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT,
CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT,
CFG_PLOTEXTRACTOR_DISALLOWED_TEX,
)
from .output_utils import (
assemble_caption,
find_open_and_close_braces,
get_tex_location,
)
from .converter import rotate_image
ARXIV_HEADER = 'arXiv:'
PLOTS_DIR = 'plots'
MAIN_CAPTION_OR_IMAGE = 0
SUB_CAPTION_OR_IMAGE = 1
def get_context(lines, backwards=False):
"""Get context.
Given a relevant string from a TeX file, this function will extract text
from it as far as it is deemed contextually relevant, either backwards or
forwards in the text.
The level of relevance allowed is configurable. When it reaches some
point in the text that is determined to be out of scope from the current
context, like text that is identified as a new paragraph, a complex TeX
structure ('/begin', '/end', etc.) etc., it will return the previously
allocated text.
For use when extracting text with contextual value for an figure or plot.
:param lines (string): string to examine
:param reversed (bool): are we searching backwards?
:return context (string): extracted context
"""
tex_tag = re.compile(r".*\\(\w+).*")
sentence = re.compile(r"(?<=[.?!])[\s]+(?=[A-Z])")
context = []
word_list = lines.split()
if backwards:
word_list.reverse()
# For each word we do the following:
# 1. Check if we have reached word limit
# 2. If not, see if this is a TeX tag and see if its 'illegal'
# 3. Otherwise, add word to context
for word in word_list:
if len(context) >= CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT:
break
match = tex_tag.match(word)
if match and match.group(1) in CFG_PLOTEXTRACTOR_DISALLOWED_TEX:
# TeX Construct matched, return
if backwards:
# When reversed we need to go back and
# remove unwanted data within brackets
temp_word = ""
while len(context):
temp_word = context.pop()
if '}' in temp_word:
break
break
context.append(word)
if backwards:
context.reverse()
text = " ".join(context)
sentence_list = sentence.split(text)
if backwards:
sentence_list.reverse()
if len(sentence_list) > CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT:
return " ".join(
sentence_list[:CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT])
else:
return " ".join(sentence_list)
def extract_captions(tex_file, sdir, image_list, primary=True):
"""Extract captions.
Take the TeX file and the list of images in the tarball (which all,
presumably, are used in the TeX file) and figure out which captions
in the text are associated with which images
:param: lines (list): list of lines of the TeX file
:param: tex_file (string): the name of the TeX file which mentions
the images
:param: sdir (string): path to current sub-directory
:param: image_list (list): list of images in tarball
:param: primary (bool): is this the primary call to extract_caption?
:return: images_and_captions_and_labels ([(string, string, list),
(string, string, list), ...]):
a list of tuples representing the names of images and their
corresponding figure labels from the TeX file
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = get_lines_from_file(tex_file)
# possible figure lead-ins
figure_head = u'\\begin{figure' # also matches figure*
figure_wrap_head = u'\\begin{wrapfigure'
figure_tail = u'\\end{figure' # also matches figure*
figure_wrap_tail = u'\\end{wrapfigure'
picture_head = u'\\begin{picture}'
displaymath_head = u'\\begin{displaymath}'
subfloat_head = u'\\subfloat'
subfig_head = u'\\subfigure'
includegraphics_head = u'\\includegraphics'
epsfig_head = u'\\epsfig'
input_head = u'\\input'
# possible caption lead-ins
caption_head = u'\\caption'
figcaption_head = u'\\figcaption'
label_head = u'\\label'
rotate = u'rotate='
angle = u'angle='
eps_tail = u'.eps'
ps_tail = u'.ps'
doc_head = u'\\begin{document}'
doc_tail = u'\\end{document}'
extracted_image_data = []
cur_image = ''
caption = ''
labels = []
active_label = ""
# cut out shit before the doc head
if primary:
for line_index in range(len(lines)):
if lines[line_index].find(doc_head) < 0:
lines[line_index] = ''
else:
break
# are we using commas in filenames here?
commas_okay = False
for dummy1, dummy2, filenames in \
os.walk(os.path.split(os.path.split(tex_file)[0])[0]):
for filename in filenames:
if filename.find(',') > -1:
commas_okay = True
break
# a comment is a % not preceded by a \
comment = re.compile("(?<!\\\\)%")
for line_index in range(len(lines)):
# get rid of pesky comments by splitting where the comment is
# and keeping only the part before the %
line = comment.split(lines[line_index])[0]
line = line.strip()
lines[line_index] = line
in_figure_tag = 0
for line_index in range(len(lines)):
line = lines[line_index]
if line == '':
continue
if line.find(doc_tail) > -1:
break
"""
FIGURE -
structure of a figure:
\begin{figure}
\formatting...
\includegraphics[someoptions]{FILENAME}
\caption{CAPTION} %caption and includegraphics may be switched!
\end{figure}
"""
index = max([line.find(figure_head), line.find(figure_wrap_head)])
if index > -1:
in_figure_tag = 1
# some punks don't like to put things in the figure tag. so we
# just want to see if there is anything that is sitting outside
# of it when we find it
cur_image, caption, extracted_image_data = put_it_together(
cur_image, caption,
active_label, extracted_image_data,
line_index, lines)
# here, you jerks, just make it so that it's fecking impossible to
# figure out your damn inclusion types
index = max([line.find(eps_tail), line.find(ps_tail),
line.find(epsfig_head)])
if index > -1:
if line.find(eps_tail) > -1 or line.find(ps_tail) > -1:
ext = True
else:
ext = False
filenames = intelligently_find_filenames(line, ext=ext,
commas_okay=commas_okay)
# try to look ahead! sometimes there are better matches after
if line_index < len(lines) - 1:
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index < len(lines) - 2:
filenames.extend(intelligently_find_filenames(
lines[line_index + 2],
commas_okay=commas_okay))
for filename in filenames:
filename = filename.encode('utf-8', 'ignore')
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
Rotate and angle
"""
index = max(line.find(rotate), line.find(angle))
if index > -1:
# which is the image associated to it?
filenames = intelligently_find_filenames(line,
commas_okay=commas_okay)
# try the line after and the line before
if line_index + 1 < len(lines):
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index > 1:
filenames.extend(intelligently_find_filenames(
lines[line_index - 1],
commas_okay=commas_okay))
already_tried = []
for filename in filenames:
if filename != 'ERROR' and filename not in already_tried:
if rotate_image(filename, line, sdir, image_list):
break
already_tried.append(filename)
"""
INCLUDEGRAPHICS -
structure of includegraphics:
\includegraphics[someoptions]{FILENAME}
"""
index = line.find(includegraphics_head)
if index > -1:
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index, index, '{', lines)
filename = lines[open_curly_line][open_curly + 1:close_curly]
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
{\input{FILENAME}}
\caption{CAPTION}
This input is ambiguous, since input is also used for things like
inclusion of data from other LaTeX files directly.
"""
index = line.find(input_head)
if index > -1:
new_tex_names = intelligently_find_filenames(
line, TeX=True,
commas_okay=commas_okay)
for new_tex_name in new_tex_names:
if new_tex_name != 'ERROR':
new_tex_file = get_tex_location(new_tex_name, tex_file)
if new_tex_file and primary: # to kill recursion
extracted_image_data.extend(extract_captions(
new_tex_file, sdir,
image_list,
primary=False
))
"""PICTURE"""
index = line.find(picture_head)
if index > -1:
# structure of a picture:
# \begin{picture}
# ....not worrying about this now
# print('found picture tag')
# FIXME
pass
"""DISPLAYMATH"""
index = line.find(displaymath_head)
if index > -1:
# structure of a displaymath:
# \begin{displaymath}
# ....not worrying about this now
# print('found displaymath tag')
# FIXME
pass
"""
CAPTIONS -
structure of a caption:
\caption[someoptions]{CAPTION}
or
\caption{CAPTION}
or
\caption{{options}{CAPTION}}
"""
index = max([line.find(caption_head), line.find(figcaption_head)])
if index > -1:
open_curly, open_curly_line, close_curly, close_curly_line = \
find_open_and_close_braces(line_index, index, '{', lines)
cap_begin = open_curly + 1
cur_caption = assemble_caption(
open_curly_line, cap_begin,
close_curly_line, close_curly, lines)
if caption == '':
caption = cur_caption
elif type(caption) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
caption[SUB_CAPTION_OR_IMAGE].append(cur_caption)
else:
caption[SUB_CAPTION_OR_IMAGE] = [cur_caption]
elif caption != cur_caption:
caption = ['', [caption, cur_caption]]
"""
SUBFLOATS -
structure of a subfloat (inside of a figure tag):
\subfloat[CAPTION]{options{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfloat_head)
if index > -1:
# if we are dealing with subfloats, we need a different
# sort of structure to keep track of captions and subcaptions
if not isinstance(cur_image, list):
cur_image = [cur_image, []]
if not isinstance(caption, list):
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(
open_square_line,
cap_begin, close_square_line, close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(close_square_line,
close_square, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
SUBFIGURES -
structure of a subfigure (inside a figure tag):
\subfigure[CAPTION]{
\includegraphics[options]{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfig_head)
if index > -1:
# like with subfloats, we need a different structure for keepin
# track of this stuff
if type(cur_image) != list:
cur_image = [cur_image, []]
if type(caption) != list:
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(open_square_line,
cap_begin, close_square_line,
close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
index_cpy = index
# find the graphics tag to get the filename
# it is okay if we eat lines here
index = line.find(includegraphics_head)
while index == -1 and (line_index + 1) < len(lines):
line_index += 1
line = lines[line_index]
index = line.find(includegraphics_head)
if line_index == len(lines):
# didn't find the image name on line
line_index = index_cpy
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index,
index, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
LABELS -
structure of a label:
\label{somelabelnamewhichprobablyincludesacolon}
Labels are used to tag images and will later be used in ref tags
to reference them. This is interesting because in effect the refs
to a plot are additional caption for it.
Notes: labels can be used for many more things than just plots.
We'll have to experiment with how to best associate a label with an
image.. if it's in the caption, it's easy. If it's in a figure, it's
still okay... but the images that aren't in figure tags are numerous.
"""
index = line.find(label_head)
if index > -1 and in_figure_tag:
open_curly, open_curly_line, close_curly, dummy =\
find_open_and_close_braces(line_index,
index, '{', lines)
label = lines[open_curly_line][open_curly + 1:close_curly]
if label not in labels:
active_label = label
labels.append(label)
"""
FIGURE
important: we put the check for the end of the figure at the end
of the loop in case some pathological person puts everything in one
line
"""
index = max([
line.find(figure_tail),
line.find(figure_wrap_tail),
line.find(doc_tail)
])
if index > -1:
in_figure_tag = 0
cur_image, caption, extracted_image_data = \
put_it_together(cur_image, caption, active_label,
extracted_image_data,
line_index, lines)
"""
END DOCUMENT
we shouldn't look at anything after the end document tag is found
"""
index = line.find(doc_tail)
if index > -1:
break
return extracted_image_data
def put_it_together(cur_image, caption, context, extracted_image_data,
line_index, lines):
"""Put it together.
Takes the current image(s) and caption(s) and assembles them into
something useful in the extracted_image_data list.
:param: cur_image (string || list): the image currently being dealt with,
or the list of images, in the case of subimages
:param: caption (string || list): the caption or captions currently in
scope
:param: extracted_image_data ([(string, string), (string, string), ...]):
a list of tuples of images matched to captions from this document.
:param: line_index (int): the index where we are in the lines (for
searchback and searchforward purposes)
:param: lines ([string, string, ...]): the lines in the TeX
:return: (cur_image, caption, extracted_image_data): the same arguments it
was sent, processed appropriately
"""
if type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] == 'ERROR':
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
if image == 'ERROR':
cur_image[SUB_CAPTION_OR_IMAGE].remove(image)
if cur_image != '' and caption != '':
if type(cur_image) == list and type(caption) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '' and\
caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption[MAIN_CAPTION_OR_IMAGE],
context))
if type(cur_image[MAIN_CAPTION_OR_IMAGE]) == list:
# why is the main image a list?
# it's a good idea to attach the main caption to other
# things, but the main image can only be used once
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
for index in \
range(len(cur_image[SUB_CAPTION_OR_IMAGE])):
if index < len(caption[SUB_CAPTION_OR_IMAGE]):
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
caption[SUB_CAPTION_OR_IMAGE][index]
else:
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
'Caption not extracted'
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE][index],
long_caption, context))
else:
long_caption = caption[MAIN_CAPTION_OR_IMAGE] + \
' : ' + caption[SUB_CAPTION_OR_IMAGE]
for sub_image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_image, long_caption, context))
else:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for sub_cap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ' : ' + sub_cap
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], long_caption,
context))
else:
# wtf are they lists for?
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE],
caption[SUB_CAPTION_OR_IMAGE], context))
elif type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append((image, caption, context))
else:
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], caption, context))
elif type(caption) == list:
if caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image, caption[MAIN_CAPTION_OR_IMAGE], context))
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
# multiple caps for one image:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
if long_caption != '':
long_caption += ' : '
long_caption += subcap
extracted_image_data.append((cur_image, long_caption, context))
else:
extracted_image_data.append(
(cur_image, caption[SUB_CAPTION_OR_IMAGE]. context))
else:
extracted_image_data.append((cur_image, caption, context))
elif cur_image != '' and caption == '':
# we may have missed the caption somewhere.
REASONABLE_SEARCHBACK = 25
REASONABLE_SEARCHFORWARD = 5
curly_no_tag_preceding = '(?<!\\w){'
for searchback in range(REASONABLE_SEARCHBACK):
if line_index - searchback < 0:
continue
back_line = lines[line_index - searchback]
m = re.search(curly_no_tag_preceding, back_line)
if m:
open_curly = m.start()
open_curly, open_curly_line, close_curly, \
close_curly_line = find_open_and_close_braces(
line_index - searchback, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line, cap_begin,
close_curly_line, close_curly,
lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, caption, context))
else:
extracted_image_data.append((cur_image, caption, context))
break
if caption == '':
for searchforward in range(REASONABLE_SEARCHFORWARD):
if line_index + searchforward >= len(lines):
break
fwd_line = lines[line_index + searchforward]
m = re.search(curly_no_tag_preceding, fwd_line)
if m:
open_curly = m.start()
open_curly, open_curly_line, close_curly,\
close_curly_line = find_open_and_close_braces(
line_index + searchforward, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line,
cap_begin, close_curly_line,
close_curly, lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, caption, context))
else:
extracted_image_data.append(
(cur_image, caption, context))
break
if caption == '':
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], 'No caption found',
context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, 'No caption', context))
else:
extracted_image_data.append(
(cur_image, 'No caption found', context))
elif caption != '' and cur_image == '':
if type(caption) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ': ' + subcap
else:
long_caption = caption
extracted_image_data.append(('', 'noimg' + long_caption, context))
# if we're leaving the figure, no sense keeping the data
cur_image = ''
caption = ''
return cur_image, caption, extracted_image_data
def intelligently_find_filenames(line, TeX=False, ext=False,
commas_okay=False):
"""Intelligently find filenames.
Find the filename in the line. We don't support all filenames! Just eps
and ps for now.
:param: line (string): the line we want to get a filename out of
:return: filename ([string, ...]): what is probably the name of the file(s)
"""
files_included = ['ERROR']
if commas_okay:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.,%#]+'
else:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.%#]+'
if ext:
valid_for_filename += '\.e*ps[texfi2]*'
if TeX:
valid_for_filename += '[\.latex]*'
file_inclusion = re.findall('=' + valid_for_filename + '[ ,]', line)
if len(file_inclusion) > 0:
# right now it looks like '=FILENAME,' or '=FILENAME '
for file_included in file_inclusion:
files_included.append(file_included[1:-1])
file_inclusion = re.findall('(?:[ps]*file=|figure=)' +
valid_for_filename + '[,\\]} ]*', line)
if len(file_inclusion) > 0:
# still has the =
for file_included in file_inclusion:
part_before_equals = file_included.split('=')[0]
if len(part_before_equals) != file_included:
file_included = file_included[
len(part_before_equals) + 1:].strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall(
'["\'{\\[]' + valid_for_filename + '[}\\],"\']',
line)
if len(file_inclusion) > 0:
# right now it's got the {} or [] or "" or '' around it still
for file_included in file_inclusion:
file_included = file_included[1:-1]
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '[,\\} $]', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('\\s*' + valid_for_filename + '\\s*$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
if files_included != ['ERROR']:
files_included = files_included[1:] # cut off the dummy
for file_included in files_included:
if file_included == '':
files_included.remove(file_included)
if ' ' in file_included:
for subfile in file_included.split(' '):
if subfile not in files_included:
files_included.append(subfile)
if ',' in file_included:
for subfile in file_included.split(' '):
if subfile not in files_included:
files_included.append(subfile)
return files_included
def get_lines_from_file(filepath, encoding="UTF-8"):
"""Return an iterator over lines."""
try:
fd = codecs.open(filepath, 'r', encoding)
lines = fd.readlines()
except UnicodeDecodeError:
# Fall back to 'ISO-8859-1'
fd = codecs.open(filepath, 'r', 'ISO-8859-1')
lines = fd.readlines()
finally:
fd.close()
return lines
|
inspirehep/plotextractor | plotextractor/extractor.py | extract_captions | python | def extract_captions(tex_file, sdir, image_list, primary=True):
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = get_lines_from_file(tex_file)
# possible figure lead-ins
figure_head = u'\\begin{figure' # also matches figure*
figure_wrap_head = u'\\begin{wrapfigure'
figure_tail = u'\\end{figure' # also matches figure*
figure_wrap_tail = u'\\end{wrapfigure'
picture_head = u'\\begin{picture}'
displaymath_head = u'\\begin{displaymath}'
subfloat_head = u'\\subfloat'
subfig_head = u'\\subfigure'
includegraphics_head = u'\\includegraphics'
epsfig_head = u'\\epsfig'
input_head = u'\\input'
# possible caption lead-ins
caption_head = u'\\caption'
figcaption_head = u'\\figcaption'
label_head = u'\\label'
rotate = u'rotate='
angle = u'angle='
eps_tail = u'.eps'
ps_tail = u'.ps'
doc_head = u'\\begin{document}'
doc_tail = u'\\end{document}'
extracted_image_data = []
cur_image = ''
caption = ''
labels = []
active_label = ""
# cut out shit before the doc head
if primary:
for line_index in range(len(lines)):
if lines[line_index].find(doc_head) < 0:
lines[line_index] = ''
else:
break
# are we using commas in filenames here?
commas_okay = False
for dummy1, dummy2, filenames in \
os.walk(os.path.split(os.path.split(tex_file)[0])[0]):
for filename in filenames:
if filename.find(',') > -1:
commas_okay = True
break
# a comment is a % not preceded by a \
comment = re.compile("(?<!\\\\)%")
for line_index in range(len(lines)):
# get rid of pesky comments by splitting where the comment is
# and keeping only the part before the %
line = comment.split(lines[line_index])[0]
line = line.strip()
lines[line_index] = line
in_figure_tag = 0
for line_index in range(len(lines)):
line = lines[line_index]
if line == '':
continue
if line.find(doc_tail) > -1:
break
"""
FIGURE -
structure of a figure:
\begin{figure}
\formatting...
\includegraphics[someoptions]{FILENAME}
\caption{CAPTION} %caption and includegraphics may be switched!
\end{figure}
"""
index = max([line.find(figure_head), line.find(figure_wrap_head)])
if index > -1:
in_figure_tag = 1
# some punks don't like to put things in the figure tag. so we
# just want to see if there is anything that is sitting outside
# of it when we find it
cur_image, caption, extracted_image_data = put_it_together(
cur_image, caption,
active_label, extracted_image_data,
line_index, lines)
# here, you jerks, just make it so that it's fecking impossible to
# figure out your damn inclusion types
index = max([line.find(eps_tail), line.find(ps_tail),
line.find(epsfig_head)])
if index > -1:
if line.find(eps_tail) > -1 or line.find(ps_tail) > -1:
ext = True
else:
ext = False
filenames = intelligently_find_filenames(line, ext=ext,
commas_okay=commas_okay)
# try to look ahead! sometimes there are better matches after
if line_index < len(lines) - 1:
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index < len(lines) - 2:
filenames.extend(intelligently_find_filenames(
lines[line_index + 2],
commas_okay=commas_okay))
for filename in filenames:
filename = filename.encode('utf-8', 'ignore')
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
Rotate and angle
"""
index = max(line.find(rotate), line.find(angle))
if index > -1:
# which is the image associated to it?
filenames = intelligently_find_filenames(line,
commas_okay=commas_okay)
# try the line after and the line before
if line_index + 1 < len(lines):
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index > 1:
filenames.extend(intelligently_find_filenames(
lines[line_index - 1],
commas_okay=commas_okay))
already_tried = []
for filename in filenames:
if filename != 'ERROR' and filename not in already_tried:
if rotate_image(filename, line, sdir, image_list):
break
already_tried.append(filename)
"""
INCLUDEGRAPHICS -
structure of includegraphics:
\includegraphics[someoptions]{FILENAME}
"""
index = line.find(includegraphics_head)
if index > -1:
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index, index, '{', lines)
filename = lines[open_curly_line][open_curly + 1:close_curly]
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
{\input{FILENAME}}
\caption{CAPTION}
This input is ambiguous, since input is also used for things like
inclusion of data from other LaTeX files directly.
"""
index = line.find(input_head)
if index > -1:
new_tex_names = intelligently_find_filenames(
line, TeX=True,
commas_okay=commas_okay)
for new_tex_name in new_tex_names:
if new_tex_name != 'ERROR':
new_tex_file = get_tex_location(new_tex_name, tex_file)
if new_tex_file and primary: # to kill recursion
extracted_image_data.extend(extract_captions(
new_tex_file, sdir,
image_list,
primary=False
))
"""PICTURE"""
index = line.find(picture_head)
if index > -1:
# structure of a picture:
# \begin{picture}
# ....not worrying about this now
# print('found picture tag')
# FIXME
pass
"""DISPLAYMATH"""
index = line.find(displaymath_head)
if index > -1:
# structure of a displaymath:
# \begin{displaymath}
# ....not worrying about this now
# print('found displaymath tag')
# FIXME
pass
"""
CAPTIONS -
structure of a caption:
\caption[someoptions]{CAPTION}
or
\caption{CAPTION}
or
\caption{{options}{CAPTION}}
"""
index = max([line.find(caption_head), line.find(figcaption_head)])
if index > -1:
open_curly, open_curly_line, close_curly, close_curly_line = \
find_open_and_close_braces(line_index, index, '{', lines)
cap_begin = open_curly + 1
cur_caption = assemble_caption(
open_curly_line, cap_begin,
close_curly_line, close_curly, lines)
if caption == '':
caption = cur_caption
elif type(caption) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
caption[SUB_CAPTION_OR_IMAGE].append(cur_caption)
else:
caption[SUB_CAPTION_OR_IMAGE] = [cur_caption]
elif caption != cur_caption:
caption = ['', [caption, cur_caption]]
"""
SUBFLOATS -
structure of a subfloat (inside of a figure tag):
\subfloat[CAPTION]{options{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfloat_head)
if index > -1:
# if we are dealing with subfloats, we need a different
# sort of structure to keep track of captions and subcaptions
if not isinstance(cur_image, list):
cur_image = [cur_image, []]
if not isinstance(caption, list):
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(
open_square_line,
cap_begin, close_square_line, close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(close_square_line,
close_square, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
SUBFIGURES -
structure of a subfigure (inside a figure tag):
\subfigure[CAPTION]{
\includegraphics[options]{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfig_head)
if index > -1:
# like with subfloats, we need a different structure for keepin
# track of this stuff
if type(cur_image) != list:
cur_image = [cur_image, []]
if type(caption) != list:
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(open_square_line,
cap_begin, close_square_line,
close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
index_cpy = index
# find the graphics tag to get the filename
# it is okay if we eat lines here
index = line.find(includegraphics_head)
while index == -1 and (line_index + 1) < len(lines):
line_index += 1
line = lines[line_index]
index = line.find(includegraphics_head)
if line_index == len(lines):
# didn't find the image name on line
line_index = index_cpy
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index,
index, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
LABELS -
structure of a label:
\label{somelabelnamewhichprobablyincludesacolon}
Labels are used to tag images and will later be used in ref tags
to reference them. This is interesting because in effect the refs
to a plot are additional caption for it.
Notes: labels can be used for many more things than just plots.
We'll have to experiment with how to best associate a label with an
image.. if it's in the caption, it's easy. If it's in a figure, it's
still okay... but the images that aren't in figure tags are numerous.
"""
index = line.find(label_head)
if index > -1 and in_figure_tag:
open_curly, open_curly_line, close_curly, dummy =\
find_open_and_close_braces(line_index,
index, '{', lines)
label = lines[open_curly_line][open_curly + 1:close_curly]
if label not in labels:
active_label = label
labels.append(label)
"""
FIGURE
important: we put the check for the end of the figure at the end
of the loop in case some pathological person puts everything in one
line
"""
index = max([
line.find(figure_tail),
line.find(figure_wrap_tail),
line.find(doc_tail)
])
if index > -1:
in_figure_tag = 0
cur_image, caption, extracted_image_data = \
put_it_together(cur_image, caption, active_label,
extracted_image_data,
line_index, lines)
"""
END DOCUMENT
we shouldn't look at anything after the end document tag is found
"""
index = line.find(doc_tail)
if index > -1:
break
return extracted_image_data | Extract captions.
Take the TeX file and the list of images in the tarball (which all,
presumably, are used in the TeX file) and figure out which captions
in the text are associated with which images
:param: lines (list): list of lines of the TeX file
:param: tex_file (string): the name of the TeX file which mentions
the images
:param: sdir (string): path to current sub-directory
:param: image_list (list): list of images in tarball
:param: primary (bool): is this the primary call to extract_caption?
:return: images_and_captions_and_labels ([(string, string, list),
(string, string, list), ...]):
a list of tuples representing the names of images and their
corresponding figure labels from the TeX file | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/extractor.py#L168-L567 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Plot extractor extractor."""
from __future__ import absolute_import, print_function
import codecs
import os
import re
from .config import (
CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT,
CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT,
CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT,
CFG_PLOTEXTRACTOR_DISALLOWED_TEX,
)
from .output_utils import (
assemble_caption,
find_open_and_close_braces,
get_tex_location,
)
from .converter import rotate_image
ARXIV_HEADER = 'arXiv:'
PLOTS_DIR = 'plots'
MAIN_CAPTION_OR_IMAGE = 0
SUB_CAPTION_OR_IMAGE = 1
def get_context(lines, backwards=False):
"""Get context.
Given a relevant string from a TeX file, this function will extract text
from it as far as it is deemed contextually relevant, either backwards or
forwards in the text.
The level of relevance allowed is configurable. When it reaches some
point in the text that is determined to be out of scope from the current
context, like text that is identified as a new paragraph, a complex TeX
structure ('/begin', '/end', etc.) etc., it will return the previously
allocated text.
For use when extracting text with contextual value for an figure or plot.
:param lines (string): string to examine
:param reversed (bool): are we searching backwards?
:return context (string): extracted context
"""
tex_tag = re.compile(r".*\\(\w+).*")
sentence = re.compile(r"(?<=[.?!])[\s]+(?=[A-Z])")
context = []
word_list = lines.split()
if backwards:
word_list.reverse()
# For each word we do the following:
# 1. Check if we have reached word limit
# 2. If not, see if this is a TeX tag and see if its 'illegal'
# 3. Otherwise, add word to context
for word in word_list:
if len(context) >= CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT:
break
match = tex_tag.match(word)
if match and match.group(1) in CFG_PLOTEXTRACTOR_DISALLOWED_TEX:
# TeX Construct matched, return
if backwards:
# When reversed we need to go back and
# remove unwanted data within brackets
temp_word = ""
while len(context):
temp_word = context.pop()
if '}' in temp_word:
break
break
context.append(word)
if backwards:
context.reverse()
text = " ".join(context)
sentence_list = sentence.split(text)
if backwards:
sentence_list.reverse()
if len(sentence_list) > CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT:
return " ".join(
sentence_list[:CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT])
else:
return " ".join(sentence_list)
def extract_context(tex_file, extracted_image_data):
"""Extract context.
Given a .tex file and a label name, this function will extract the text
before and after for all the references made to this label in the text.
The number of characters to extract before and after is configurable.
:param tex_file (list): path to .tex file
:param extracted_image_data ([(string, string, list), ...]):
a list of tuples of images matched to labels and captions from
this document.
:return extracted_image_data ([(string, string, list, list),
(string, string, list, list),...)]: the same list, but now containing
extracted contexts
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = "".join(get_lines_from_file(tex_file))
# Generate context for each image and its assoc. labels
for data in extracted_image_data:
context_list = []
# Generate a list of index tuples for all matches
indicies = [match.span()
for match in re.finditer(r"(\\(?:fig|ref)\{%s\})" %
(re.escape(data['label']),),
lines)]
for startindex, endindex in indicies:
# Retrive all lines before label until beginning of file
i = startindex - CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
if i < 0:
text_before = lines[:startindex]
else:
text_before = lines[i:startindex]
context_before = get_context(text_before, backwards=True)
# Retrive all lines from label until end of file and get context
i = endindex + CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
text_after = lines[endindex:i]
context_after = get_context(text_after)
context_list.append(
context_before + ' \\ref{' + data['label'] + '} ' +
context_after
)
data['contexts'] = context_list
def put_it_together(cur_image, caption, context, extracted_image_data,
line_index, lines):
"""Put it together.
Takes the current image(s) and caption(s) and assembles them into
something useful in the extracted_image_data list.
:param: cur_image (string || list): the image currently being dealt with,
or the list of images, in the case of subimages
:param: caption (string || list): the caption or captions currently in
scope
:param: extracted_image_data ([(string, string), (string, string), ...]):
a list of tuples of images matched to captions from this document.
:param: line_index (int): the index where we are in the lines (for
searchback and searchforward purposes)
:param: lines ([string, string, ...]): the lines in the TeX
:return: (cur_image, caption, extracted_image_data): the same arguments it
was sent, processed appropriately
"""
if type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] == 'ERROR':
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
if image == 'ERROR':
cur_image[SUB_CAPTION_OR_IMAGE].remove(image)
if cur_image != '' and caption != '':
if type(cur_image) == list and type(caption) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '' and\
caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption[MAIN_CAPTION_OR_IMAGE],
context))
if type(cur_image[MAIN_CAPTION_OR_IMAGE]) == list:
# why is the main image a list?
# it's a good idea to attach the main caption to other
# things, but the main image can only be used once
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
for index in \
range(len(cur_image[SUB_CAPTION_OR_IMAGE])):
if index < len(caption[SUB_CAPTION_OR_IMAGE]):
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
caption[SUB_CAPTION_OR_IMAGE][index]
else:
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
'Caption not extracted'
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE][index],
long_caption, context))
else:
long_caption = caption[MAIN_CAPTION_OR_IMAGE] + \
' : ' + caption[SUB_CAPTION_OR_IMAGE]
for sub_image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_image, long_caption, context))
else:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for sub_cap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ' : ' + sub_cap
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], long_caption,
context))
else:
# wtf are they lists for?
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE],
caption[SUB_CAPTION_OR_IMAGE], context))
elif type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append((image, caption, context))
else:
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], caption, context))
elif type(caption) == list:
if caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image, caption[MAIN_CAPTION_OR_IMAGE], context))
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
# multiple caps for one image:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
if long_caption != '':
long_caption += ' : '
long_caption += subcap
extracted_image_data.append((cur_image, long_caption, context))
else:
extracted_image_data.append(
(cur_image, caption[SUB_CAPTION_OR_IMAGE]. context))
else:
extracted_image_data.append((cur_image, caption, context))
elif cur_image != '' and caption == '':
# we may have missed the caption somewhere.
REASONABLE_SEARCHBACK = 25
REASONABLE_SEARCHFORWARD = 5
curly_no_tag_preceding = '(?<!\\w){'
for searchback in range(REASONABLE_SEARCHBACK):
if line_index - searchback < 0:
continue
back_line = lines[line_index - searchback]
m = re.search(curly_no_tag_preceding, back_line)
if m:
open_curly = m.start()
open_curly, open_curly_line, close_curly, \
close_curly_line = find_open_and_close_braces(
line_index - searchback, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line, cap_begin,
close_curly_line, close_curly,
lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, caption, context))
else:
extracted_image_data.append((cur_image, caption, context))
break
if caption == '':
for searchforward in range(REASONABLE_SEARCHFORWARD):
if line_index + searchforward >= len(lines):
break
fwd_line = lines[line_index + searchforward]
m = re.search(curly_no_tag_preceding, fwd_line)
if m:
open_curly = m.start()
open_curly, open_curly_line, close_curly,\
close_curly_line = find_open_and_close_braces(
line_index + searchforward, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line,
cap_begin, close_curly_line,
close_curly, lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, caption, context))
else:
extracted_image_data.append(
(cur_image, caption, context))
break
if caption == '':
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], 'No caption found',
context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, 'No caption', context))
else:
extracted_image_data.append(
(cur_image, 'No caption found', context))
elif caption != '' and cur_image == '':
if type(caption) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ': ' + subcap
else:
long_caption = caption
extracted_image_data.append(('', 'noimg' + long_caption, context))
# if we're leaving the figure, no sense keeping the data
cur_image = ''
caption = ''
return cur_image, caption, extracted_image_data
def intelligently_find_filenames(line, TeX=False, ext=False,
commas_okay=False):
"""Intelligently find filenames.
Find the filename in the line. We don't support all filenames! Just eps
and ps for now.
:param: line (string): the line we want to get a filename out of
:return: filename ([string, ...]): what is probably the name of the file(s)
"""
files_included = ['ERROR']
if commas_okay:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.,%#]+'
else:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.%#]+'
if ext:
valid_for_filename += '\.e*ps[texfi2]*'
if TeX:
valid_for_filename += '[\.latex]*'
file_inclusion = re.findall('=' + valid_for_filename + '[ ,]', line)
if len(file_inclusion) > 0:
# right now it looks like '=FILENAME,' or '=FILENAME '
for file_included in file_inclusion:
files_included.append(file_included[1:-1])
file_inclusion = re.findall('(?:[ps]*file=|figure=)' +
valid_for_filename + '[,\\]} ]*', line)
if len(file_inclusion) > 0:
# still has the =
for file_included in file_inclusion:
part_before_equals = file_included.split('=')[0]
if len(part_before_equals) != file_included:
file_included = file_included[
len(part_before_equals) + 1:].strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall(
'["\'{\\[]' + valid_for_filename + '[}\\],"\']',
line)
if len(file_inclusion) > 0:
# right now it's got the {} or [] or "" or '' around it still
for file_included in file_inclusion:
file_included = file_included[1:-1]
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '[,\\} $]', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('\\s*' + valid_for_filename + '\\s*$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
if files_included != ['ERROR']:
files_included = files_included[1:] # cut off the dummy
for file_included in files_included:
if file_included == '':
files_included.remove(file_included)
if ' ' in file_included:
for subfile in file_included.split(' '):
if subfile not in files_included:
files_included.append(subfile)
if ',' in file_included:
for subfile in file_included.split(' '):
if subfile not in files_included:
files_included.append(subfile)
return files_included
def get_lines_from_file(filepath, encoding="UTF-8"):
"""Return an iterator over lines."""
try:
fd = codecs.open(filepath, 'r', encoding)
lines = fd.readlines()
except UnicodeDecodeError:
# Fall back to 'ISO-8859-1'
fd = codecs.open(filepath, 'r', 'ISO-8859-1')
lines = fd.readlines()
finally:
fd.close()
return lines
|
inspirehep/plotextractor | plotextractor/extractor.py | put_it_together | python | def put_it_together(cur_image, caption, context, extracted_image_data,
line_index, lines):
if type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] == 'ERROR':
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
if image == 'ERROR':
cur_image[SUB_CAPTION_OR_IMAGE].remove(image)
if cur_image != '' and caption != '':
if type(cur_image) == list and type(caption) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '' and\
caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption[MAIN_CAPTION_OR_IMAGE],
context))
if type(cur_image[MAIN_CAPTION_OR_IMAGE]) == list:
# why is the main image a list?
# it's a good idea to attach the main caption to other
# things, but the main image can only be used once
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
for index in \
range(len(cur_image[SUB_CAPTION_OR_IMAGE])):
if index < len(caption[SUB_CAPTION_OR_IMAGE]):
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
caption[SUB_CAPTION_OR_IMAGE][index]
else:
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
'Caption not extracted'
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE][index],
long_caption, context))
else:
long_caption = caption[MAIN_CAPTION_OR_IMAGE] + \
' : ' + caption[SUB_CAPTION_OR_IMAGE]
for sub_image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_image, long_caption, context))
else:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for sub_cap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ' : ' + sub_cap
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], long_caption,
context))
else:
# wtf are they lists for?
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE],
caption[SUB_CAPTION_OR_IMAGE], context))
elif type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append((image, caption, context))
else:
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], caption, context))
elif type(caption) == list:
if caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image, caption[MAIN_CAPTION_OR_IMAGE], context))
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
# multiple caps for one image:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
if long_caption != '':
long_caption += ' : '
long_caption += subcap
extracted_image_data.append((cur_image, long_caption, context))
else:
extracted_image_data.append(
(cur_image, caption[SUB_CAPTION_OR_IMAGE]. context))
else:
extracted_image_data.append((cur_image, caption, context))
elif cur_image != '' and caption == '':
# we may have missed the caption somewhere.
REASONABLE_SEARCHBACK = 25
REASONABLE_SEARCHFORWARD = 5
curly_no_tag_preceding = '(?<!\\w){'
for searchback in range(REASONABLE_SEARCHBACK):
if line_index - searchback < 0:
continue
back_line = lines[line_index - searchback]
m = re.search(curly_no_tag_preceding, back_line)
if m:
open_curly = m.start()
open_curly, open_curly_line, close_curly, \
close_curly_line = find_open_and_close_braces(
line_index - searchback, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line, cap_begin,
close_curly_line, close_curly,
lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, caption, context))
else:
extracted_image_data.append((cur_image, caption, context))
break
if caption == '':
for searchforward in range(REASONABLE_SEARCHFORWARD):
if line_index + searchforward >= len(lines):
break
fwd_line = lines[line_index + searchforward]
m = re.search(curly_no_tag_preceding, fwd_line)
if m:
open_curly = m.start()
open_curly, open_curly_line, close_curly,\
close_curly_line = find_open_and_close_braces(
line_index + searchforward, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line,
cap_begin, close_curly_line,
close_curly, lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, caption, context))
else:
extracted_image_data.append(
(cur_image, caption, context))
break
if caption == '':
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], 'No caption found',
context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, 'No caption', context))
else:
extracted_image_data.append(
(cur_image, 'No caption found', context))
elif caption != '' and cur_image == '':
if type(caption) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ': ' + subcap
else:
long_caption = caption
extracted_image_data.append(('', 'noimg' + long_caption, context))
# if we're leaving the figure, no sense keeping the data
cur_image = ''
caption = ''
return cur_image, caption, extracted_image_data | Put it together.
Takes the current image(s) and caption(s) and assembles them into
something useful in the extracted_image_data list.
:param: cur_image (string || list): the image currently being dealt with,
or the list of images, in the case of subimages
:param: caption (string || list): the caption or captions currently in
scope
:param: extracted_image_data ([(string, string), (string, string), ...]):
a list of tuples of images matched to captions from this document.
:param: line_index (int): the index where we are in the lines (for
searchback and searchforward purposes)
:param: lines ([string, string, ...]): the lines in the TeX
:return: (cur_image, caption, extracted_image_data): the same arguments it
was sent, processed appropriately | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/extractor.py#L570-L771 | [
"def assemble_caption(begin_line, begin_index, end_line, end_index, lines):\n \"\"\"\n Take the caption of a picture and put it all together\n in a nice way. If it spans multiple lines, put it on one line. If it\n contains controlled characters, strip them out. If it has tags we don't\n want to worry about, get rid of them, etc.\n\n :param: begin_line (int): the index of the line where the caption begins\n :param: begin_index (int): the index within the line where the caption\n begins\n :param: end_line (int): the index of the line where the caption ends\n :param: end_index (int): the index within the line where the caption ends\n :param: lines ([string, string, ...]): the line strings of the text\n\n :return: caption (string): the caption, formatted and pieced together\n \"\"\"\n\n # stuff we don't like\n label_head = '\\\\label{'\n\n # reassemble that sucker\n if end_line > begin_line:\n # our caption spanned multiple lines\n caption = lines[begin_line][begin_index:]\n\n for included_line_index in range(begin_line + 1, end_line):\n caption = caption + ' ' + lines[included_line_index]\n\n caption = caption + ' ' + lines[end_line][:end_index]\n caption = caption.replace('\\n', ' ')\n caption = caption.replace(' ', ' ')\n else:\n # it fit on one line\n caption = lines[begin_line][begin_index:end_index]\n\n # clean out a label tag, if there is one\n label_begin = caption.find(label_head)\n if label_begin > -1:\n # we know that our caption is only one line, so if there's a label\n # tag in it, it will be all on one line. so we make up some args\n dummy_start, dummy_start_line, label_end, dummy_end = \\\n find_open_and_close_braces(0, label_begin, '{', [caption])\n caption = caption[:label_begin] + caption[label_end + 1:]\n\n caption = caption.strip()\n\n if len(caption) > 1 and caption[0] == '{' and caption[-1] == '}':\n caption = caption[1:-1]\n\n return caption\n",
"def find_open_and_close_braces(line_index, start, brace, lines):\n \"\"\"\n Take the line where we want to start and the index where we want to start\n and find the first instance of matched open and close braces of the same\n type as brace in file file.\n\n :param: line (int): the index of the line we want to start searching at\n :param: start (int): the index in the line we want to start searching at\n :param: brace (string): one of the type of brace we are looking for ({, },\n [, or ])\n :param lines ([string, string, ...]): the array of lines in the file we\n are looking in.\n\n :return: (start, start_line, end, end_line): (int, int, int): the index\n of the start and end of whatever braces we are looking for, and the\n line number that the end is on (since it may be different than the line\n we started on)\n \"\"\"\n\n if brace in ['[', ']']:\n open_brace = '['\n close_brace = ']'\n elif brace in ['{', '}']:\n open_brace = '{'\n close_brace = '}'\n elif brace in ['(', ')']:\n open_brace = '('\n close_brace = ')'\n else:\n # unacceptable brace type!\n return (-1, -1, -1, -1)\n\n open_braces = []\n line = lines[line_index]\n\n ret_open_index = line.find(open_brace, start)\n line_index_cpy = line_index\n # sometimes people don't put the braces on the same line\n # as the tag\n while ret_open_index == -1:\n line_index = line_index + 1\n if line_index >= len(lines):\n # failed to find open braces...\n return (0, line_index_cpy, 0, line_index_cpy)\n line = lines[line_index]\n ret_open_index = line.find(open_brace)\n\n open_braces.append(open_brace)\n\n ret_open_line = line_index\n\n open_index = ret_open_index\n close_index = ret_open_index\n\n while len(open_braces) > 0:\n if open_index == -1 and close_index == -1:\n # we hit the end of the line! oh, noez!\n line_index = line_index + 1\n\n if line_index >= len(lines):\n # hanging braces!\n return (ret_open_index, ret_open_line,\n ret_open_index, ret_open_line)\n\n line = lines[line_index]\n # to not skip things that are at the beginning of the line\n close_index = line.find(close_brace)\n open_index = line.find(open_brace)\n\n else:\n if close_index != -1:\n close_index = line.find(close_brace, close_index + 1)\n if open_index != -1:\n open_index = line.find(open_brace, open_index + 1)\n\n if close_index != -1:\n open_braces.pop()\n if len(open_braces) == 0 and \\\n (open_index > close_index or open_index == -1):\n break\n if open_index != -1:\n open_braces.append(open_brace)\n\n ret_close_index = close_index\n\n return (ret_open_index, ret_open_line, ret_close_index, line_index)\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Plot extractor extractor."""
from __future__ import absolute_import, print_function
import codecs
import os
import re
from .config import (
CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT,
CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT,
CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT,
CFG_PLOTEXTRACTOR_DISALLOWED_TEX,
)
from .output_utils import (
assemble_caption,
find_open_and_close_braces,
get_tex_location,
)
from .converter import rotate_image
ARXIV_HEADER = 'arXiv:'
PLOTS_DIR = 'plots'
MAIN_CAPTION_OR_IMAGE = 0
SUB_CAPTION_OR_IMAGE = 1
def get_context(lines, backwards=False):
"""Get context.
Given a relevant string from a TeX file, this function will extract text
from it as far as it is deemed contextually relevant, either backwards or
forwards in the text.
The level of relevance allowed is configurable. When it reaches some
point in the text that is determined to be out of scope from the current
context, like text that is identified as a new paragraph, a complex TeX
structure ('/begin', '/end', etc.) etc., it will return the previously
allocated text.
For use when extracting text with contextual value for an figure or plot.
:param lines (string): string to examine
:param reversed (bool): are we searching backwards?
:return context (string): extracted context
"""
tex_tag = re.compile(r".*\\(\w+).*")
sentence = re.compile(r"(?<=[.?!])[\s]+(?=[A-Z])")
context = []
word_list = lines.split()
if backwards:
word_list.reverse()
# For each word we do the following:
# 1. Check if we have reached word limit
# 2. If not, see if this is a TeX tag and see if its 'illegal'
# 3. Otherwise, add word to context
for word in word_list:
if len(context) >= CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT:
break
match = tex_tag.match(word)
if match and match.group(1) in CFG_PLOTEXTRACTOR_DISALLOWED_TEX:
# TeX Construct matched, return
if backwards:
# When reversed we need to go back and
# remove unwanted data within brackets
temp_word = ""
while len(context):
temp_word = context.pop()
if '}' in temp_word:
break
break
context.append(word)
if backwards:
context.reverse()
text = " ".join(context)
sentence_list = sentence.split(text)
if backwards:
sentence_list.reverse()
if len(sentence_list) > CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT:
return " ".join(
sentence_list[:CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT])
else:
return " ".join(sentence_list)
def extract_context(tex_file, extracted_image_data):
"""Extract context.
Given a .tex file and a label name, this function will extract the text
before and after for all the references made to this label in the text.
The number of characters to extract before and after is configurable.
:param tex_file (list): path to .tex file
:param extracted_image_data ([(string, string, list), ...]):
a list of tuples of images matched to labels and captions from
this document.
:return extracted_image_data ([(string, string, list, list),
(string, string, list, list),...)]: the same list, but now containing
extracted contexts
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = "".join(get_lines_from_file(tex_file))
# Generate context for each image and its assoc. labels
for data in extracted_image_data:
context_list = []
# Generate a list of index tuples for all matches
indicies = [match.span()
for match in re.finditer(r"(\\(?:fig|ref)\{%s\})" %
(re.escape(data['label']),),
lines)]
for startindex, endindex in indicies:
# Retrive all lines before label until beginning of file
i = startindex - CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
if i < 0:
text_before = lines[:startindex]
else:
text_before = lines[i:startindex]
context_before = get_context(text_before, backwards=True)
# Retrive all lines from label until end of file and get context
i = endindex + CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
text_after = lines[endindex:i]
context_after = get_context(text_after)
context_list.append(
context_before + ' \\ref{' + data['label'] + '} ' +
context_after
)
data['contexts'] = context_list
def extract_captions(tex_file, sdir, image_list, primary=True):
"""Extract captions.
Take the TeX file and the list of images in the tarball (which all,
presumably, are used in the TeX file) and figure out which captions
in the text are associated with which images
:param: lines (list): list of lines of the TeX file
:param: tex_file (string): the name of the TeX file which mentions
the images
:param: sdir (string): path to current sub-directory
:param: image_list (list): list of images in tarball
:param: primary (bool): is this the primary call to extract_caption?
:return: images_and_captions_and_labels ([(string, string, list),
(string, string, list), ...]):
a list of tuples representing the names of images and their
corresponding figure labels from the TeX file
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = get_lines_from_file(tex_file)
# possible figure lead-ins
figure_head = u'\\begin{figure' # also matches figure*
figure_wrap_head = u'\\begin{wrapfigure'
figure_tail = u'\\end{figure' # also matches figure*
figure_wrap_tail = u'\\end{wrapfigure'
picture_head = u'\\begin{picture}'
displaymath_head = u'\\begin{displaymath}'
subfloat_head = u'\\subfloat'
subfig_head = u'\\subfigure'
includegraphics_head = u'\\includegraphics'
epsfig_head = u'\\epsfig'
input_head = u'\\input'
# possible caption lead-ins
caption_head = u'\\caption'
figcaption_head = u'\\figcaption'
label_head = u'\\label'
rotate = u'rotate='
angle = u'angle='
eps_tail = u'.eps'
ps_tail = u'.ps'
doc_head = u'\\begin{document}'
doc_tail = u'\\end{document}'
extracted_image_data = []
cur_image = ''
caption = ''
labels = []
active_label = ""
# cut out shit before the doc head
if primary:
for line_index in range(len(lines)):
if lines[line_index].find(doc_head) < 0:
lines[line_index] = ''
else:
break
# are we using commas in filenames here?
commas_okay = False
for dummy1, dummy2, filenames in \
os.walk(os.path.split(os.path.split(tex_file)[0])[0]):
for filename in filenames:
if filename.find(',') > -1:
commas_okay = True
break
# a comment is a % not preceded by a \
comment = re.compile("(?<!\\\\)%")
for line_index in range(len(lines)):
# get rid of pesky comments by splitting where the comment is
# and keeping only the part before the %
line = comment.split(lines[line_index])[0]
line = line.strip()
lines[line_index] = line
in_figure_tag = 0
for line_index in range(len(lines)):
line = lines[line_index]
if line == '':
continue
if line.find(doc_tail) > -1:
break
"""
FIGURE -
structure of a figure:
\begin{figure}
\formatting...
\includegraphics[someoptions]{FILENAME}
\caption{CAPTION} %caption and includegraphics may be switched!
\end{figure}
"""
index = max([line.find(figure_head), line.find(figure_wrap_head)])
if index > -1:
in_figure_tag = 1
# some punks don't like to put things in the figure tag. so we
# just want to see if there is anything that is sitting outside
# of it when we find it
cur_image, caption, extracted_image_data = put_it_together(
cur_image, caption,
active_label, extracted_image_data,
line_index, lines)
# here, you jerks, just make it so that it's fecking impossible to
# figure out your damn inclusion types
index = max([line.find(eps_tail), line.find(ps_tail),
line.find(epsfig_head)])
if index > -1:
if line.find(eps_tail) > -1 or line.find(ps_tail) > -1:
ext = True
else:
ext = False
filenames = intelligently_find_filenames(line, ext=ext,
commas_okay=commas_okay)
# try to look ahead! sometimes there are better matches after
if line_index < len(lines) - 1:
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index < len(lines) - 2:
filenames.extend(intelligently_find_filenames(
lines[line_index + 2],
commas_okay=commas_okay))
for filename in filenames:
filename = filename.encode('utf-8', 'ignore')
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
Rotate and angle
"""
index = max(line.find(rotate), line.find(angle))
if index > -1:
# which is the image associated to it?
filenames = intelligently_find_filenames(line,
commas_okay=commas_okay)
# try the line after and the line before
if line_index + 1 < len(lines):
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index > 1:
filenames.extend(intelligently_find_filenames(
lines[line_index - 1],
commas_okay=commas_okay))
already_tried = []
for filename in filenames:
if filename != 'ERROR' and filename not in already_tried:
if rotate_image(filename, line, sdir, image_list):
break
already_tried.append(filename)
"""
INCLUDEGRAPHICS -
structure of includegraphics:
\includegraphics[someoptions]{FILENAME}
"""
index = line.find(includegraphics_head)
if index > -1:
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index, index, '{', lines)
filename = lines[open_curly_line][open_curly + 1:close_curly]
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
{\input{FILENAME}}
\caption{CAPTION}
This input is ambiguous, since input is also used for things like
inclusion of data from other LaTeX files directly.
"""
index = line.find(input_head)
if index > -1:
new_tex_names = intelligently_find_filenames(
line, TeX=True,
commas_okay=commas_okay)
for new_tex_name in new_tex_names:
if new_tex_name != 'ERROR':
new_tex_file = get_tex_location(new_tex_name, tex_file)
if new_tex_file and primary: # to kill recursion
extracted_image_data.extend(extract_captions(
new_tex_file, sdir,
image_list,
primary=False
))
"""PICTURE"""
index = line.find(picture_head)
if index > -1:
# structure of a picture:
# \begin{picture}
# ....not worrying about this now
# print('found picture tag')
# FIXME
pass
"""DISPLAYMATH"""
index = line.find(displaymath_head)
if index > -1:
# structure of a displaymath:
# \begin{displaymath}
# ....not worrying about this now
# print('found displaymath tag')
# FIXME
pass
"""
CAPTIONS -
structure of a caption:
\caption[someoptions]{CAPTION}
or
\caption{CAPTION}
or
\caption{{options}{CAPTION}}
"""
index = max([line.find(caption_head), line.find(figcaption_head)])
if index > -1:
open_curly, open_curly_line, close_curly, close_curly_line = \
find_open_and_close_braces(line_index, index, '{', lines)
cap_begin = open_curly + 1
cur_caption = assemble_caption(
open_curly_line, cap_begin,
close_curly_line, close_curly, lines)
if caption == '':
caption = cur_caption
elif type(caption) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
caption[SUB_CAPTION_OR_IMAGE].append(cur_caption)
else:
caption[SUB_CAPTION_OR_IMAGE] = [cur_caption]
elif caption != cur_caption:
caption = ['', [caption, cur_caption]]
"""
SUBFLOATS -
structure of a subfloat (inside of a figure tag):
\subfloat[CAPTION]{options{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfloat_head)
if index > -1:
# if we are dealing with subfloats, we need a different
# sort of structure to keep track of captions and subcaptions
if not isinstance(cur_image, list):
cur_image = [cur_image, []]
if not isinstance(caption, list):
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(
open_square_line,
cap_begin, close_square_line, close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(close_square_line,
close_square, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
SUBFIGURES -
structure of a subfigure (inside a figure tag):
\subfigure[CAPTION]{
\includegraphics[options]{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfig_head)
if index > -1:
# like with subfloats, we need a different structure for keepin
# track of this stuff
if type(cur_image) != list:
cur_image = [cur_image, []]
if type(caption) != list:
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(open_square_line,
cap_begin, close_square_line,
close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
index_cpy = index
# find the graphics tag to get the filename
# it is okay if we eat lines here
index = line.find(includegraphics_head)
while index == -1 and (line_index + 1) < len(lines):
line_index += 1
line = lines[line_index]
index = line.find(includegraphics_head)
if line_index == len(lines):
# didn't find the image name on line
line_index = index_cpy
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index,
index, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
LABELS -
structure of a label:
\label{somelabelnamewhichprobablyincludesacolon}
Labels are used to tag images and will later be used in ref tags
to reference them. This is interesting because in effect the refs
to a plot are additional caption for it.
Notes: labels can be used for many more things than just plots.
We'll have to experiment with how to best associate a label with an
image.. if it's in the caption, it's easy. If it's in a figure, it's
still okay... but the images that aren't in figure tags are numerous.
"""
index = line.find(label_head)
if index > -1 and in_figure_tag:
open_curly, open_curly_line, close_curly, dummy =\
find_open_and_close_braces(line_index,
index, '{', lines)
label = lines[open_curly_line][open_curly + 1:close_curly]
if label not in labels:
active_label = label
labels.append(label)
"""
FIGURE
important: we put the check for the end of the figure at the end
of the loop in case some pathological person puts everything in one
line
"""
index = max([
line.find(figure_tail),
line.find(figure_wrap_tail),
line.find(doc_tail)
])
if index > -1:
in_figure_tag = 0
cur_image, caption, extracted_image_data = \
put_it_together(cur_image, caption, active_label,
extracted_image_data,
line_index, lines)
"""
END DOCUMENT
we shouldn't look at anything after the end document tag is found
"""
index = line.find(doc_tail)
if index > -1:
break
return extracted_image_data
def intelligently_find_filenames(line, TeX=False, ext=False,
commas_okay=False):
"""Intelligently find filenames.
Find the filename in the line. We don't support all filenames! Just eps
and ps for now.
:param: line (string): the line we want to get a filename out of
:return: filename ([string, ...]): what is probably the name of the file(s)
"""
files_included = ['ERROR']
if commas_okay:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.,%#]+'
else:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.%#]+'
if ext:
valid_for_filename += '\.e*ps[texfi2]*'
if TeX:
valid_for_filename += '[\.latex]*'
file_inclusion = re.findall('=' + valid_for_filename + '[ ,]', line)
if len(file_inclusion) > 0:
# right now it looks like '=FILENAME,' or '=FILENAME '
for file_included in file_inclusion:
files_included.append(file_included[1:-1])
file_inclusion = re.findall('(?:[ps]*file=|figure=)' +
valid_for_filename + '[,\\]} ]*', line)
if len(file_inclusion) > 0:
# still has the =
for file_included in file_inclusion:
part_before_equals = file_included.split('=')[0]
if len(part_before_equals) != file_included:
file_included = file_included[
len(part_before_equals) + 1:].strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall(
'["\'{\\[]' + valid_for_filename + '[}\\],"\']',
line)
if len(file_inclusion) > 0:
# right now it's got the {} or [] or "" or '' around it still
for file_included in file_inclusion:
file_included = file_included[1:-1]
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '[,\\} $]', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('\\s*' + valid_for_filename + '\\s*$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
if files_included != ['ERROR']:
files_included = files_included[1:] # cut off the dummy
for file_included in files_included:
if file_included == '':
files_included.remove(file_included)
if ' ' in file_included:
for subfile in file_included.split(' '):
if subfile not in files_included:
files_included.append(subfile)
if ',' in file_included:
for subfile in file_included.split(' '):
if subfile not in files_included:
files_included.append(subfile)
return files_included
def get_lines_from_file(filepath, encoding="UTF-8"):
"""Return an iterator over lines."""
try:
fd = codecs.open(filepath, 'r', encoding)
lines = fd.readlines()
except UnicodeDecodeError:
# Fall back to 'ISO-8859-1'
fd = codecs.open(filepath, 'r', 'ISO-8859-1')
lines = fd.readlines()
finally:
fd.close()
return lines
|
inspirehep/plotextractor | plotextractor/extractor.py | intelligently_find_filenames | python | def intelligently_find_filenames(line, TeX=False, ext=False,
commas_okay=False):
files_included = ['ERROR']
if commas_okay:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.,%#]+'
else:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.%#]+'
if ext:
valid_for_filename += '\.e*ps[texfi2]*'
if TeX:
valid_for_filename += '[\.latex]*'
file_inclusion = re.findall('=' + valid_for_filename + '[ ,]', line)
if len(file_inclusion) > 0:
# right now it looks like '=FILENAME,' or '=FILENAME '
for file_included in file_inclusion:
files_included.append(file_included[1:-1])
file_inclusion = re.findall('(?:[ps]*file=|figure=)' +
valid_for_filename + '[,\\]} ]*', line)
if len(file_inclusion) > 0:
# still has the =
for file_included in file_inclusion:
part_before_equals = file_included.split('=')[0]
if len(part_before_equals) != file_included:
file_included = file_included[
len(part_before_equals) + 1:].strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall(
'["\'{\\[]' + valid_for_filename + '[}\\],"\']',
line)
if len(file_inclusion) > 0:
# right now it's got the {} or [] or "" or '' around it still
for file_included in file_inclusion:
file_included = file_included[1:-1]
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '[,\\} $]', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('\\s*' + valid_for_filename + '\\s*$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
if files_included != ['ERROR']:
files_included = files_included[1:] # cut off the dummy
for file_included in files_included:
if file_included == '':
files_included.remove(file_included)
if ' ' in file_included:
for subfile in file_included.split(' '):
if subfile not in files_included:
files_included.append(subfile)
if ',' in file_included:
for subfile in file_included.split(' '):
if subfile not in files_included:
files_included.append(subfile)
return files_included | Intelligently find filenames.
Find the filename in the line. We don't support all filenames! Just eps
and ps for now.
:param: line (string): the line we want to get a filename out of
:return: filename ([string, ...]): what is probably the name of the file(s) | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/extractor.py#L774-L869 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Plot extractor extractor."""
from __future__ import absolute_import, print_function
import codecs
import os
import re
from .config import (
CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT,
CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT,
CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT,
CFG_PLOTEXTRACTOR_DISALLOWED_TEX,
)
from .output_utils import (
assemble_caption,
find_open_and_close_braces,
get_tex_location,
)
from .converter import rotate_image
ARXIV_HEADER = 'arXiv:'
PLOTS_DIR = 'plots'
MAIN_CAPTION_OR_IMAGE = 0
SUB_CAPTION_OR_IMAGE = 1
def get_context(lines, backwards=False):
"""Get context.
Given a relevant string from a TeX file, this function will extract text
from it as far as it is deemed contextually relevant, either backwards or
forwards in the text.
The level of relevance allowed is configurable. When it reaches some
point in the text that is determined to be out of scope from the current
context, like text that is identified as a new paragraph, a complex TeX
structure ('/begin', '/end', etc.) etc., it will return the previously
allocated text.
For use when extracting text with contextual value for an figure or plot.
:param lines (string): string to examine
:param reversed (bool): are we searching backwards?
:return context (string): extracted context
"""
tex_tag = re.compile(r".*\\(\w+).*")
sentence = re.compile(r"(?<=[.?!])[\s]+(?=[A-Z])")
context = []
word_list = lines.split()
if backwards:
word_list.reverse()
# For each word we do the following:
# 1. Check if we have reached word limit
# 2. If not, see if this is a TeX tag and see if its 'illegal'
# 3. Otherwise, add word to context
for word in word_list:
if len(context) >= CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT:
break
match = tex_tag.match(word)
if match and match.group(1) in CFG_PLOTEXTRACTOR_DISALLOWED_TEX:
# TeX Construct matched, return
if backwards:
# When reversed we need to go back and
# remove unwanted data within brackets
temp_word = ""
while len(context):
temp_word = context.pop()
if '}' in temp_word:
break
break
context.append(word)
if backwards:
context.reverse()
text = " ".join(context)
sentence_list = sentence.split(text)
if backwards:
sentence_list.reverse()
if len(sentence_list) > CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT:
return " ".join(
sentence_list[:CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT])
else:
return " ".join(sentence_list)
def extract_context(tex_file, extracted_image_data):
"""Extract context.
Given a .tex file and a label name, this function will extract the text
before and after for all the references made to this label in the text.
The number of characters to extract before and after is configurable.
:param tex_file (list): path to .tex file
:param extracted_image_data ([(string, string, list), ...]):
a list of tuples of images matched to labels and captions from
this document.
:return extracted_image_data ([(string, string, list, list),
(string, string, list, list),...)]: the same list, but now containing
extracted contexts
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = "".join(get_lines_from_file(tex_file))
# Generate context for each image and its assoc. labels
for data in extracted_image_data:
context_list = []
# Generate a list of index tuples for all matches
indicies = [match.span()
for match in re.finditer(r"(\\(?:fig|ref)\{%s\})" %
(re.escape(data['label']),),
lines)]
for startindex, endindex in indicies:
# Retrive all lines before label until beginning of file
i = startindex - CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
if i < 0:
text_before = lines[:startindex]
else:
text_before = lines[i:startindex]
context_before = get_context(text_before, backwards=True)
# Retrive all lines from label until end of file and get context
i = endindex + CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
text_after = lines[endindex:i]
context_after = get_context(text_after)
context_list.append(
context_before + ' \\ref{' + data['label'] + '} ' +
context_after
)
data['contexts'] = context_list
def extract_captions(tex_file, sdir, image_list, primary=True):
"""Extract captions.
Take the TeX file and the list of images in the tarball (which all,
presumably, are used in the TeX file) and figure out which captions
in the text are associated with which images
:param: lines (list): list of lines of the TeX file
:param: tex_file (string): the name of the TeX file which mentions
the images
:param: sdir (string): path to current sub-directory
:param: image_list (list): list of images in tarball
:param: primary (bool): is this the primary call to extract_caption?
:return: images_and_captions_and_labels ([(string, string, list),
(string, string, list), ...]):
a list of tuples representing the names of images and their
corresponding figure labels from the TeX file
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = get_lines_from_file(tex_file)
# possible figure lead-ins
figure_head = u'\\begin{figure' # also matches figure*
figure_wrap_head = u'\\begin{wrapfigure'
figure_tail = u'\\end{figure' # also matches figure*
figure_wrap_tail = u'\\end{wrapfigure'
picture_head = u'\\begin{picture}'
displaymath_head = u'\\begin{displaymath}'
subfloat_head = u'\\subfloat'
subfig_head = u'\\subfigure'
includegraphics_head = u'\\includegraphics'
epsfig_head = u'\\epsfig'
input_head = u'\\input'
# possible caption lead-ins
caption_head = u'\\caption'
figcaption_head = u'\\figcaption'
label_head = u'\\label'
rotate = u'rotate='
angle = u'angle='
eps_tail = u'.eps'
ps_tail = u'.ps'
doc_head = u'\\begin{document}'
doc_tail = u'\\end{document}'
extracted_image_data = []
cur_image = ''
caption = ''
labels = []
active_label = ""
# cut out shit before the doc head
if primary:
for line_index in range(len(lines)):
if lines[line_index].find(doc_head) < 0:
lines[line_index] = ''
else:
break
# are we using commas in filenames here?
commas_okay = False
for dummy1, dummy2, filenames in \
os.walk(os.path.split(os.path.split(tex_file)[0])[0]):
for filename in filenames:
if filename.find(',') > -1:
commas_okay = True
break
# a comment is a % not preceded by a \
comment = re.compile("(?<!\\\\)%")
for line_index in range(len(lines)):
# get rid of pesky comments by splitting where the comment is
# and keeping only the part before the %
line = comment.split(lines[line_index])[0]
line = line.strip()
lines[line_index] = line
in_figure_tag = 0
for line_index in range(len(lines)):
line = lines[line_index]
if line == '':
continue
if line.find(doc_tail) > -1:
break
"""
FIGURE -
structure of a figure:
\begin{figure}
\formatting...
\includegraphics[someoptions]{FILENAME}
\caption{CAPTION} %caption and includegraphics may be switched!
\end{figure}
"""
index = max([line.find(figure_head), line.find(figure_wrap_head)])
if index > -1:
in_figure_tag = 1
# some punks don't like to put things in the figure tag. so we
# just want to see if there is anything that is sitting outside
# of it when we find it
cur_image, caption, extracted_image_data = put_it_together(
cur_image, caption,
active_label, extracted_image_data,
line_index, lines)
# here, you jerks, just make it so that it's fecking impossible to
# figure out your damn inclusion types
index = max([line.find(eps_tail), line.find(ps_tail),
line.find(epsfig_head)])
if index > -1:
if line.find(eps_tail) > -1 or line.find(ps_tail) > -1:
ext = True
else:
ext = False
filenames = intelligently_find_filenames(line, ext=ext,
commas_okay=commas_okay)
# try to look ahead! sometimes there are better matches after
if line_index < len(lines) - 1:
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index < len(lines) - 2:
filenames.extend(intelligently_find_filenames(
lines[line_index + 2],
commas_okay=commas_okay))
for filename in filenames:
filename = filename.encode('utf-8', 'ignore')
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
Rotate and angle
"""
index = max(line.find(rotate), line.find(angle))
if index > -1:
# which is the image associated to it?
filenames = intelligently_find_filenames(line,
commas_okay=commas_okay)
# try the line after and the line before
if line_index + 1 < len(lines):
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index > 1:
filenames.extend(intelligently_find_filenames(
lines[line_index - 1],
commas_okay=commas_okay))
already_tried = []
for filename in filenames:
if filename != 'ERROR' and filename not in already_tried:
if rotate_image(filename, line, sdir, image_list):
break
already_tried.append(filename)
"""
INCLUDEGRAPHICS -
structure of includegraphics:
\includegraphics[someoptions]{FILENAME}
"""
index = line.find(includegraphics_head)
if index > -1:
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index, index, '{', lines)
filename = lines[open_curly_line][open_curly + 1:close_curly]
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
{\input{FILENAME}}
\caption{CAPTION}
This input is ambiguous, since input is also used for things like
inclusion of data from other LaTeX files directly.
"""
index = line.find(input_head)
if index > -1:
new_tex_names = intelligently_find_filenames(
line, TeX=True,
commas_okay=commas_okay)
for new_tex_name in new_tex_names:
if new_tex_name != 'ERROR':
new_tex_file = get_tex_location(new_tex_name, tex_file)
if new_tex_file and primary: # to kill recursion
extracted_image_data.extend(extract_captions(
new_tex_file, sdir,
image_list,
primary=False
))
"""PICTURE"""
index = line.find(picture_head)
if index > -1:
# structure of a picture:
# \begin{picture}
# ....not worrying about this now
# print('found picture tag')
# FIXME
pass
"""DISPLAYMATH"""
index = line.find(displaymath_head)
if index > -1:
# structure of a displaymath:
# \begin{displaymath}
# ....not worrying about this now
# print('found displaymath tag')
# FIXME
pass
"""
CAPTIONS -
structure of a caption:
\caption[someoptions]{CAPTION}
or
\caption{CAPTION}
or
\caption{{options}{CAPTION}}
"""
index = max([line.find(caption_head), line.find(figcaption_head)])
if index > -1:
open_curly, open_curly_line, close_curly, close_curly_line = \
find_open_and_close_braces(line_index, index, '{', lines)
cap_begin = open_curly + 1
cur_caption = assemble_caption(
open_curly_line, cap_begin,
close_curly_line, close_curly, lines)
if caption == '':
caption = cur_caption
elif type(caption) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
caption[SUB_CAPTION_OR_IMAGE].append(cur_caption)
else:
caption[SUB_CAPTION_OR_IMAGE] = [cur_caption]
elif caption != cur_caption:
caption = ['', [caption, cur_caption]]
"""
SUBFLOATS -
structure of a subfloat (inside of a figure tag):
\subfloat[CAPTION]{options{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfloat_head)
if index > -1:
# if we are dealing with subfloats, we need a different
# sort of structure to keep track of captions and subcaptions
if not isinstance(cur_image, list):
cur_image = [cur_image, []]
if not isinstance(caption, list):
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(
open_square_line,
cap_begin, close_square_line, close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(close_square_line,
close_square, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
SUBFIGURES -
structure of a subfigure (inside a figure tag):
\subfigure[CAPTION]{
\includegraphics[options]{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfig_head)
if index > -1:
# like with subfloats, we need a different structure for keepin
# track of this stuff
if type(cur_image) != list:
cur_image = [cur_image, []]
if type(caption) != list:
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(open_square_line,
cap_begin, close_square_line,
close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
index_cpy = index
# find the graphics tag to get the filename
# it is okay if we eat lines here
index = line.find(includegraphics_head)
while index == -1 and (line_index + 1) < len(lines):
line_index += 1
line = lines[line_index]
index = line.find(includegraphics_head)
if line_index == len(lines):
# didn't find the image name on line
line_index = index_cpy
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index,
index, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
LABELS -
structure of a label:
\label{somelabelnamewhichprobablyincludesacolon}
Labels are used to tag images and will later be used in ref tags
to reference them. This is interesting because in effect the refs
to a plot are additional caption for it.
Notes: labels can be used for many more things than just plots.
We'll have to experiment with how to best associate a label with an
image.. if it's in the caption, it's easy. If it's in a figure, it's
still okay... but the images that aren't in figure tags are numerous.
"""
index = line.find(label_head)
if index > -1 and in_figure_tag:
open_curly, open_curly_line, close_curly, dummy =\
find_open_and_close_braces(line_index,
index, '{', lines)
label = lines[open_curly_line][open_curly + 1:close_curly]
if label not in labels:
active_label = label
labels.append(label)
"""
FIGURE
important: we put the check for the end of the figure at the end
of the loop in case some pathological person puts everything in one
line
"""
index = max([
line.find(figure_tail),
line.find(figure_wrap_tail),
line.find(doc_tail)
])
if index > -1:
in_figure_tag = 0
cur_image, caption, extracted_image_data = \
put_it_together(cur_image, caption, active_label,
extracted_image_data,
line_index, lines)
"""
END DOCUMENT
we shouldn't look at anything after the end document tag is found
"""
index = line.find(doc_tail)
if index > -1:
break
return extracted_image_data
def put_it_together(cur_image, caption, context, extracted_image_data,
line_index, lines):
"""Put it together.
Takes the current image(s) and caption(s) and assembles them into
something useful in the extracted_image_data list.
:param: cur_image (string || list): the image currently being dealt with,
or the list of images, in the case of subimages
:param: caption (string || list): the caption or captions currently in
scope
:param: extracted_image_data ([(string, string), (string, string), ...]):
a list of tuples of images matched to captions from this document.
:param: line_index (int): the index where we are in the lines (for
searchback and searchforward purposes)
:param: lines ([string, string, ...]): the lines in the TeX
:return: (cur_image, caption, extracted_image_data): the same arguments it
was sent, processed appropriately
"""
if type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] == 'ERROR':
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
if image == 'ERROR':
cur_image[SUB_CAPTION_OR_IMAGE].remove(image)
if cur_image != '' and caption != '':
if type(cur_image) == list and type(caption) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '' and\
caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption[MAIN_CAPTION_OR_IMAGE],
context))
if type(cur_image[MAIN_CAPTION_OR_IMAGE]) == list:
# why is the main image a list?
# it's a good idea to attach the main caption to other
# things, but the main image can only be used once
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
for index in \
range(len(cur_image[SUB_CAPTION_OR_IMAGE])):
if index < len(caption[SUB_CAPTION_OR_IMAGE]):
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
caption[SUB_CAPTION_OR_IMAGE][index]
else:
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
'Caption not extracted'
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE][index],
long_caption, context))
else:
long_caption = caption[MAIN_CAPTION_OR_IMAGE] + \
' : ' + caption[SUB_CAPTION_OR_IMAGE]
for sub_image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_image, long_caption, context))
else:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for sub_cap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ' : ' + sub_cap
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], long_caption,
context))
else:
# wtf are they lists for?
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE],
caption[SUB_CAPTION_OR_IMAGE], context))
elif type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append((image, caption, context))
else:
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], caption, context))
elif type(caption) == list:
if caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image, caption[MAIN_CAPTION_OR_IMAGE], context))
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
# multiple caps for one image:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
if long_caption != '':
long_caption += ' : '
long_caption += subcap
extracted_image_data.append((cur_image, long_caption, context))
else:
extracted_image_data.append(
(cur_image, caption[SUB_CAPTION_OR_IMAGE]. context))
else:
extracted_image_data.append((cur_image, caption, context))
elif cur_image != '' and caption == '':
# we may have missed the caption somewhere.
REASONABLE_SEARCHBACK = 25
REASONABLE_SEARCHFORWARD = 5
curly_no_tag_preceding = '(?<!\\w){'
for searchback in range(REASONABLE_SEARCHBACK):
if line_index - searchback < 0:
continue
back_line = lines[line_index - searchback]
m = re.search(curly_no_tag_preceding, back_line)
if m:
open_curly = m.start()
open_curly, open_curly_line, close_curly, \
close_curly_line = find_open_and_close_braces(
line_index - searchback, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line, cap_begin,
close_curly_line, close_curly,
lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, caption, context))
else:
extracted_image_data.append((cur_image, caption, context))
break
if caption == '':
for searchforward in range(REASONABLE_SEARCHFORWARD):
if line_index + searchforward >= len(lines):
break
fwd_line = lines[line_index + searchforward]
m = re.search(curly_no_tag_preceding, fwd_line)
if m:
open_curly = m.start()
open_curly, open_curly_line, close_curly,\
close_curly_line = find_open_and_close_braces(
line_index + searchforward, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line,
cap_begin, close_curly_line,
close_curly, lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, caption, context))
else:
extracted_image_data.append(
(cur_image, caption, context))
break
if caption == '':
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], 'No caption found',
context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, 'No caption', context))
else:
extracted_image_data.append(
(cur_image, 'No caption found', context))
elif caption != '' and cur_image == '':
if type(caption) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ': ' + subcap
else:
long_caption = caption
extracted_image_data.append(('', 'noimg' + long_caption, context))
# if we're leaving the figure, no sense keeping the data
cur_image = ''
caption = ''
return cur_image, caption, extracted_image_data
def get_lines_from_file(filepath, encoding="UTF-8"):
"""Return an iterator over lines."""
try:
fd = codecs.open(filepath, 'r', encoding)
lines = fd.readlines()
except UnicodeDecodeError:
# Fall back to 'ISO-8859-1'
fd = codecs.open(filepath, 'r', 'ISO-8859-1')
lines = fd.readlines()
finally:
fd.close()
return lines
|
inspirehep/plotextractor | plotextractor/extractor.py | get_lines_from_file | python | def get_lines_from_file(filepath, encoding="UTF-8"):
try:
fd = codecs.open(filepath, 'r', encoding)
lines = fd.readlines()
except UnicodeDecodeError:
# Fall back to 'ISO-8859-1'
fd = codecs.open(filepath, 'r', 'ISO-8859-1')
lines = fd.readlines()
finally:
fd.close()
return lines | Return an iterator over lines. | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/extractor.py#L872-L883 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Plot extractor extractor."""
from __future__ import absolute_import, print_function
import codecs
import os
import re
from .config import (
CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT,
CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT,
CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT,
CFG_PLOTEXTRACTOR_DISALLOWED_TEX,
)
from .output_utils import (
assemble_caption,
find_open_and_close_braces,
get_tex_location,
)
from .converter import rotate_image
ARXIV_HEADER = 'arXiv:'
PLOTS_DIR = 'plots'
MAIN_CAPTION_OR_IMAGE = 0
SUB_CAPTION_OR_IMAGE = 1
def get_context(lines, backwards=False):
"""Get context.
Given a relevant string from a TeX file, this function will extract text
from it as far as it is deemed contextually relevant, either backwards or
forwards in the text.
The level of relevance allowed is configurable. When it reaches some
point in the text that is determined to be out of scope from the current
context, like text that is identified as a new paragraph, a complex TeX
structure ('/begin', '/end', etc.) etc., it will return the previously
allocated text.
For use when extracting text with contextual value for an figure or plot.
:param lines (string): string to examine
:param reversed (bool): are we searching backwards?
:return context (string): extracted context
"""
tex_tag = re.compile(r".*\\(\w+).*")
sentence = re.compile(r"(?<=[.?!])[\s]+(?=[A-Z])")
context = []
word_list = lines.split()
if backwards:
word_list.reverse()
# For each word we do the following:
# 1. Check if we have reached word limit
# 2. If not, see if this is a TeX tag and see if its 'illegal'
# 3. Otherwise, add word to context
for word in word_list:
if len(context) >= CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT:
break
match = tex_tag.match(word)
if match and match.group(1) in CFG_PLOTEXTRACTOR_DISALLOWED_TEX:
# TeX Construct matched, return
if backwards:
# When reversed we need to go back and
# remove unwanted data within brackets
temp_word = ""
while len(context):
temp_word = context.pop()
if '}' in temp_word:
break
break
context.append(word)
if backwards:
context.reverse()
text = " ".join(context)
sentence_list = sentence.split(text)
if backwards:
sentence_list.reverse()
if len(sentence_list) > CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT:
return " ".join(
sentence_list[:CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT])
else:
return " ".join(sentence_list)
def extract_context(tex_file, extracted_image_data):
"""Extract context.
Given a .tex file and a label name, this function will extract the text
before and after for all the references made to this label in the text.
The number of characters to extract before and after is configurable.
:param tex_file (list): path to .tex file
:param extracted_image_data ([(string, string, list), ...]):
a list of tuples of images matched to labels and captions from
this document.
:return extracted_image_data ([(string, string, list, list),
(string, string, list, list),...)]: the same list, but now containing
extracted contexts
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = "".join(get_lines_from_file(tex_file))
# Generate context for each image and its assoc. labels
for data in extracted_image_data:
context_list = []
# Generate a list of index tuples for all matches
indicies = [match.span()
for match in re.finditer(r"(\\(?:fig|ref)\{%s\})" %
(re.escape(data['label']),),
lines)]
for startindex, endindex in indicies:
# Retrive all lines before label until beginning of file
i = startindex - CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
if i < 0:
text_before = lines[:startindex]
else:
text_before = lines[i:startindex]
context_before = get_context(text_before, backwards=True)
# Retrive all lines from label until end of file and get context
i = endindex + CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
text_after = lines[endindex:i]
context_after = get_context(text_after)
context_list.append(
context_before + ' \\ref{' + data['label'] + '} ' +
context_after
)
data['contexts'] = context_list
def extract_captions(tex_file, sdir, image_list, primary=True):
"""Extract captions.
Take the TeX file and the list of images in the tarball (which all,
presumably, are used in the TeX file) and figure out which captions
in the text are associated with which images
:param: lines (list): list of lines of the TeX file
:param: tex_file (string): the name of the TeX file which mentions
the images
:param: sdir (string): path to current sub-directory
:param: image_list (list): list of images in tarball
:param: primary (bool): is this the primary call to extract_caption?
:return: images_and_captions_and_labels ([(string, string, list),
(string, string, list), ...]):
a list of tuples representing the names of images and their
corresponding figure labels from the TeX file
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = get_lines_from_file(tex_file)
# possible figure lead-ins
figure_head = u'\\begin{figure' # also matches figure*
figure_wrap_head = u'\\begin{wrapfigure'
figure_tail = u'\\end{figure' # also matches figure*
figure_wrap_tail = u'\\end{wrapfigure'
picture_head = u'\\begin{picture}'
displaymath_head = u'\\begin{displaymath}'
subfloat_head = u'\\subfloat'
subfig_head = u'\\subfigure'
includegraphics_head = u'\\includegraphics'
epsfig_head = u'\\epsfig'
input_head = u'\\input'
# possible caption lead-ins
caption_head = u'\\caption'
figcaption_head = u'\\figcaption'
label_head = u'\\label'
rotate = u'rotate='
angle = u'angle='
eps_tail = u'.eps'
ps_tail = u'.ps'
doc_head = u'\\begin{document}'
doc_tail = u'\\end{document}'
extracted_image_data = []
cur_image = ''
caption = ''
labels = []
active_label = ""
# cut out shit before the doc head
if primary:
for line_index in range(len(lines)):
if lines[line_index].find(doc_head) < 0:
lines[line_index] = ''
else:
break
# are we using commas in filenames here?
commas_okay = False
for dummy1, dummy2, filenames in \
os.walk(os.path.split(os.path.split(tex_file)[0])[0]):
for filename in filenames:
if filename.find(',') > -1:
commas_okay = True
break
# a comment is a % not preceded by a \
comment = re.compile("(?<!\\\\)%")
for line_index in range(len(lines)):
# get rid of pesky comments by splitting where the comment is
# and keeping only the part before the %
line = comment.split(lines[line_index])[0]
line = line.strip()
lines[line_index] = line
in_figure_tag = 0
for line_index in range(len(lines)):
line = lines[line_index]
if line == '':
continue
if line.find(doc_tail) > -1:
break
"""
FIGURE -
structure of a figure:
\begin{figure}
\formatting...
\includegraphics[someoptions]{FILENAME}
\caption{CAPTION} %caption and includegraphics may be switched!
\end{figure}
"""
index = max([line.find(figure_head), line.find(figure_wrap_head)])
if index > -1:
in_figure_tag = 1
# some punks don't like to put things in the figure tag. so we
# just want to see if there is anything that is sitting outside
# of it when we find it
cur_image, caption, extracted_image_data = put_it_together(
cur_image, caption,
active_label, extracted_image_data,
line_index, lines)
# here, you jerks, just make it so that it's fecking impossible to
# figure out your damn inclusion types
index = max([line.find(eps_tail), line.find(ps_tail),
line.find(epsfig_head)])
if index > -1:
if line.find(eps_tail) > -1 or line.find(ps_tail) > -1:
ext = True
else:
ext = False
filenames = intelligently_find_filenames(line, ext=ext,
commas_okay=commas_okay)
# try to look ahead! sometimes there are better matches after
if line_index < len(lines) - 1:
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index < len(lines) - 2:
filenames.extend(intelligently_find_filenames(
lines[line_index + 2],
commas_okay=commas_okay))
for filename in filenames:
filename = filename.encode('utf-8', 'ignore')
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
Rotate and angle
"""
index = max(line.find(rotate), line.find(angle))
if index > -1:
# which is the image associated to it?
filenames = intelligently_find_filenames(line,
commas_okay=commas_okay)
# try the line after and the line before
if line_index + 1 < len(lines):
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index > 1:
filenames.extend(intelligently_find_filenames(
lines[line_index - 1],
commas_okay=commas_okay))
already_tried = []
for filename in filenames:
if filename != 'ERROR' and filename not in already_tried:
if rotate_image(filename, line, sdir, image_list):
break
already_tried.append(filename)
"""
INCLUDEGRAPHICS -
structure of includegraphics:
\includegraphics[someoptions]{FILENAME}
"""
index = line.find(includegraphics_head)
if index > -1:
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index, index, '{', lines)
filename = lines[open_curly_line][open_curly + 1:close_curly]
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
{\input{FILENAME}}
\caption{CAPTION}
This input is ambiguous, since input is also used for things like
inclusion of data from other LaTeX files directly.
"""
index = line.find(input_head)
if index > -1:
new_tex_names = intelligently_find_filenames(
line, TeX=True,
commas_okay=commas_okay)
for new_tex_name in new_tex_names:
if new_tex_name != 'ERROR':
new_tex_file = get_tex_location(new_tex_name, tex_file)
if new_tex_file and primary: # to kill recursion
extracted_image_data.extend(extract_captions(
new_tex_file, sdir,
image_list,
primary=False
))
"""PICTURE"""
index = line.find(picture_head)
if index > -1:
# structure of a picture:
# \begin{picture}
# ....not worrying about this now
# print('found picture tag')
# FIXME
pass
"""DISPLAYMATH"""
index = line.find(displaymath_head)
if index > -1:
# structure of a displaymath:
# \begin{displaymath}
# ....not worrying about this now
# print('found displaymath tag')
# FIXME
pass
"""
CAPTIONS -
structure of a caption:
\caption[someoptions]{CAPTION}
or
\caption{CAPTION}
or
\caption{{options}{CAPTION}}
"""
index = max([line.find(caption_head), line.find(figcaption_head)])
if index > -1:
open_curly, open_curly_line, close_curly, close_curly_line = \
find_open_and_close_braces(line_index, index, '{', lines)
cap_begin = open_curly + 1
cur_caption = assemble_caption(
open_curly_line, cap_begin,
close_curly_line, close_curly, lines)
if caption == '':
caption = cur_caption
elif type(caption) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
caption[SUB_CAPTION_OR_IMAGE].append(cur_caption)
else:
caption[SUB_CAPTION_OR_IMAGE] = [cur_caption]
elif caption != cur_caption:
caption = ['', [caption, cur_caption]]
"""
SUBFLOATS -
structure of a subfloat (inside of a figure tag):
\subfloat[CAPTION]{options{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfloat_head)
if index > -1:
# if we are dealing with subfloats, we need a different
# sort of structure to keep track of captions and subcaptions
if not isinstance(cur_image, list):
cur_image = [cur_image, []]
if not isinstance(caption, list):
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(
open_square_line,
cap_begin, close_square_line, close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(close_square_line,
close_square, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
SUBFIGURES -
structure of a subfigure (inside a figure tag):
\subfigure[CAPTION]{
\includegraphics[options]{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfig_head)
if index > -1:
# like with subfloats, we need a different structure for keepin
# track of this stuff
if type(cur_image) != list:
cur_image = [cur_image, []]
if type(caption) != list:
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(open_square_line,
cap_begin, close_square_line,
close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
index_cpy = index
# find the graphics tag to get the filename
# it is okay if we eat lines here
index = line.find(includegraphics_head)
while index == -1 and (line_index + 1) < len(lines):
line_index += 1
line = lines[line_index]
index = line.find(includegraphics_head)
if line_index == len(lines):
# didn't find the image name on line
line_index = index_cpy
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index,
index, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
LABELS -
structure of a label:
\label{somelabelnamewhichprobablyincludesacolon}
Labels are used to tag images and will later be used in ref tags
to reference them. This is interesting because in effect the refs
to a plot are additional caption for it.
Notes: labels can be used for many more things than just plots.
We'll have to experiment with how to best associate a label with an
image.. if it's in the caption, it's easy. If it's in a figure, it's
still okay... but the images that aren't in figure tags are numerous.
"""
index = line.find(label_head)
if index > -1 and in_figure_tag:
open_curly, open_curly_line, close_curly, dummy =\
find_open_and_close_braces(line_index,
index, '{', lines)
label = lines[open_curly_line][open_curly + 1:close_curly]
if label not in labels:
active_label = label
labels.append(label)
"""
FIGURE
important: we put the check for the end of the figure at the end
of the loop in case some pathological person puts everything in one
line
"""
index = max([
line.find(figure_tail),
line.find(figure_wrap_tail),
line.find(doc_tail)
])
if index > -1:
in_figure_tag = 0
cur_image, caption, extracted_image_data = \
put_it_together(cur_image, caption, active_label,
extracted_image_data,
line_index, lines)
"""
END DOCUMENT
we shouldn't look at anything after the end document tag is found
"""
index = line.find(doc_tail)
if index > -1:
break
return extracted_image_data
def put_it_together(cur_image, caption, context, extracted_image_data,
line_index, lines):
"""Put it together.
Takes the current image(s) and caption(s) and assembles them into
something useful in the extracted_image_data list.
:param: cur_image (string || list): the image currently being dealt with,
or the list of images, in the case of subimages
:param: caption (string || list): the caption or captions currently in
scope
:param: extracted_image_data ([(string, string), (string, string), ...]):
a list of tuples of images matched to captions from this document.
:param: line_index (int): the index where we are in the lines (for
searchback and searchforward purposes)
:param: lines ([string, string, ...]): the lines in the TeX
:return: (cur_image, caption, extracted_image_data): the same arguments it
was sent, processed appropriately
"""
if type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] == 'ERROR':
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
if image == 'ERROR':
cur_image[SUB_CAPTION_OR_IMAGE].remove(image)
if cur_image != '' and caption != '':
if type(cur_image) == list and type(caption) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '' and\
caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption[MAIN_CAPTION_OR_IMAGE],
context))
if type(cur_image[MAIN_CAPTION_OR_IMAGE]) == list:
# why is the main image a list?
# it's a good idea to attach the main caption to other
# things, but the main image can only be used once
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
for index in \
range(len(cur_image[SUB_CAPTION_OR_IMAGE])):
if index < len(caption[SUB_CAPTION_OR_IMAGE]):
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
caption[SUB_CAPTION_OR_IMAGE][index]
else:
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
'Caption not extracted'
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE][index],
long_caption, context))
else:
long_caption = caption[MAIN_CAPTION_OR_IMAGE] + \
' : ' + caption[SUB_CAPTION_OR_IMAGE]
for sub_image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_image, long_caption, context))
else:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for sub_cap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ' : ' + sub_cap
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], long_caption,
context))
else:
# wtf are they lists for?
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE],
caption[SUB_CAPTION_OR_IMAGE], context))
elif type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append((image, caption, context))
else:
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], caption, context))
elif type(caption) == list:
if caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image, caption[MAIN_CAPTION_OR_IMAGE], context))
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
# multiple caps for one image:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
if long_caption != '':
long_caption += ' : '
long_caption += subcap
extracted_image_data.append((cur_image, long_caption, context))
else:
extracted_image_data.append(
(cur_image, caption[SUB_CAPTION_OR_IMAGE]. context))
else:
extracted_image_data.append((cur_image, caption, context))
elif cur_image != '' and caption == '':
# we may have missed the caption somewhere.
REASONABLE_SEARCHBACK = 25
REASONABLE_SEARCHFORWARD = 5
curly_no_tag_preceding = '(?<!\\w){'
for searchback in range(REASONABLE_SEARCHBACK):
if line_index - searchback < 0:
continue
back_line = lines[line_index - searchback]
m = re.search(curly_no_tag_preceding, back_line)
if m:
open_curly = m.start()
open_curly, open_curly_line, close_curly, \
close_curly_line = find_open_and_close_braces(
line_index - searchback, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line, cap_begin,
close_curly_line, close_curly,
lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, caption, context))
else:
extracted_image_data.append((cur_image, caption, context))
break
if caption == '':
for searchforward in range(REASONABLE_SEARCHFORWARD):
if line_index + searchforward >= len(lines):
break
fwd_line = lines[line_index + searchforward]
m = re.search(curly_no_tag_preceding, fwd_line)
if m:
open_curly = m.start()
open_curly, open_curly_line, close_curly,\
close_curly_line = find_open_and_close_braces(
line_index + searchforward, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line,
cap_begin, close_curly_line,
close_curly, lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, caption, context))
else:
extracted_image_data.append(
(cur_image, caption, context))
break
if caption == '':
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], 'No caption found',
context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_img, 'No caption', context))
else:
extracted_image_data.append(
(cur_image, 'No caption found', context))
elif caption != '' and cur_image == '':
if type(caption) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ': ' + subcap
else:
long_caption = caption
extracted_image_data.append(('', 'noimg' + long_caption, context))
# if we're leaving the figure, no sense keeping the data
cur_image = ''
caption = ''
return cur_image, caption, extracted_image_data
def intelligently_find_filenames(line, TeX=False, ext=False,
commas_okay=False):
"""Intelligently find filenames.
Find the filename in the line. We don't support all filenames! Just eps
and ps for now.
:param: line (string): the line we want to get a filename out of
:return: filename ([string, ...]): what is probably the name of the file(s)
"""
files_included = ['ERROR']
if commas_okay:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.,%#]+'
else:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.%#]+'
if ext:
valid_for_filename += '\.e*ps[texfi2]*'
if TeX:
valid_for_filename += '[\.latex]*'
file_inclusion = re.findall('=' + valid_for_filename + '[ ,]', line)
if len(file_inclusion) > 0:
# right now it looks like '=FILENAME,' or '=FILENAME '
for file_included in file_inclusion:
files_included.append(file_included[1:-1])
file_inclusion = re.findall('(?:[ps]*file=|figure=)' +
valid_for_filename + '[,\\]} ]*', line)
if len(file_inclusion) > 0:
# still has the =
for file_included in file_inclusion:
part_before_equals = file_included.split('=')[0]
if len(part_before_equals) != file_included:
file_included = file_included[
len(part_before_equals) + 1:].strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall(
'["\'{\\[]' + valid_for_filename + '[}\\],"\']',
line)
if len(file_inclusion) > 0:
# right now it's got the {} or [] or "" or '' around it still
for file_included in file_inclusion:
file_included = file_included[1:-1]
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '[,\\} $]', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
file_inclusion = re.findall('\\s*' + valid_for_filename + '\\s*$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if file_included not in files_included:
files_included.append(file_included)
if files_included != ['ERROR']:
files_included = files_included[1:] # cut off the dummy
for file_included in files_included:
if file_included == '':
files_included.remove(file_included)
if ' ' in file_included:
for subfile in file_included.split(' '):
if subfile not in files_included:
files_included.append(subfile)
if ',' in file_included:
for subfile in file_included.split(' '):
if subfile not in files_included:
files_included.append(subfile)
return files_included
|
inspirehep/plotextractor | plotextractor/output_utils.py | find_open_and_close_braces | python | def find_open_and_close_braces(line_index, start, brace, lines):
if brace in ['[', ']']:
open_brace = '['
close_brace = ']'
elif brace in ['{', '}']:
open_brace = '{'
close_brace = '}'
elif brace in ['(', ')']:
open_brace = '('
close_brace = ')'
else:
# unacceptable brace type!
return (-1, -1, -1, -1)
open_braces = []
line = lines[line_index]
ret_open_index = line.find(open_brace, start)
line_index_cpy = line_index
# sometimes people don't put the braces on the same line
# as the tag
while ret_open_index == -1:
line_index = line_index + 1
if line_index >= len(lines):
# failed to find open braces...
return (0, line_index_cpy, 0, line_index_cpy)
line = lines[line_index]
ret_open_index = line.find(open_brace)
open_braces.append(open_brace)
ret_open_line = line_index
open_index = ret_open_index
close_index = ret_open_index
while len(open_braces) > 0:
if open_index == -1 and close_index == -1:
# we hit the end of the line! oh, noez!
line_index = line_index + 1
if line_index >= len(lines):
# hanging braces!
return (ret_open_index, ret_open_line,
ret_open_index, ret_open_line)
line = lines[line_index]
# to not skip things that are at the beginning of the line
close_index = line.find(close_brace)
open_index = line.find(open_brace)
else:
if close_index != -1:
close_index = line.find(close_brace, close_index + 1)
if open_index != -1:
open_index = line.find(open_brace, open_index + 1)
if close_index != -1:
open_braces.pop()
if len(open_braces) == 0 and \
(open_index > close_index or open_index == -1):
break
if open_index != -1:
open_braces.append(open_brace)
ret_close_index = close_index
return (ret_open_index, ret_open_line, ret_close_index, line_index) | Take the line where we want to start and the index where we want to start
and find the first instance of matched open and close braces of the same
type as brace in file file.
:param: line (int): the index of the line we want to start searching at
:param: start (int): the index in the line we want to start searching at
:param: brace (string): one of the type of brace we are looking for ({, },
[, or ])
:param lines ([string, string, ...]): the array of lines in the file we
are looking in.
:return: (start, start_line, end, end_line): (int, int, int): the index
of the start and end of whatever braces we are looking for, and the
line number that the end is on (since it may be different than the line
we started on) | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/output_utils.py#L31-L116 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015, 2016 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import os
import re
def assemble_caption(begin_line, begin_index, end_line, end_index, lines):
"""
Take the caption of a picture and put it all together
in a nice way. If it spans multiple lines, put it on one line. If it
contains controlled characters, strip them out. If it has tags we don't
want to worry about, get rid of them, etc.
:param: begin_line (int): the index of the line where the caption begins
:param: begin_index (int): the index within the line where the caption
begins
:param: end_line (int): the index of the line where the caption ends
:param: end_index (int): the index within the line where the caption ends
:param: lines ([string, string, ...]): the line strings of the text
:return: caption (string): the caption, formatted and pieced together
"""
# stuff we don't like
label_head = '\\label{'
# reassemble that sucker
if end_line > begin_line:
# our caption spanned multiple lines
caption = lines[begin_line][begin_index:]
for included_line_index in range(begin_line + 1, end_line):
caption = caption + ' ' + lines[included_line_index]
caption = caption + ' ' + lines[end_line][:end_index]
caption = caption.replace('\n', ' ')
caption = caption.replace(' ', ' ')
else:
# it fit on one line
caption = lines[begin_line][begin_index:end_index]
# clean out a label tag, if there is one
label_begin = caption.find(label_head)
if label_begin > -1:
# we know that our caption is only one line, so if there's a label
# tag in it, it will be all on one line. so we make up some args
dummy_start, dummy_start_line, label_end, dummy_end = \
find_open_and_close_braces(0, label_begin, '{', [caption])
caption = caption[:label_begin] + caption[label_end + 1:]
caption = caption.strip()
if len(caption) > 1 and caption[0] == '{' and caption[-1] == '}':
caption = caption[1:-1]
return caption
def prepare_image_data(extracted_image_data, output_directory,
image_mapping):
"""Prepare and clean image-data from duplicates and other garbage.
:param: extracted_image_data ([(string, string, list, list) ...],
...])): the images and their captions + contexts, ordered
:param: tex_file (string): the location of the TeX (used for finding the
associated images; the TeX is assumed to be in the same directory
as the converted images)
:param: image_list ([string, string, ...]): a list of the converted
image file names
:return extracted_image_data ([(string, string, list, list) ...],
...])) again the list of image data cleaned for output
"""
img_list = {}
for image, caption, label in extracted_image_data:
if not image or image == 'ERROR':
continue
image_location = get_image_location(
image,
output_directory,
image_mapping.keys()
)
if not image_location or not os.path.exists(image_location) or \
len(image_location) < 3:
continue
image_location = os.path.normpath(image_location)
if image_location in img_list:
if caption not in img_list[image_location]['captions']:
img_list[image_location]['captions'].append(caption)
else:
img_list[image_location] = dict(
url=image_location,
original_url=image_mapping[image_location],
captions=[caption],
label=label,
name=get_name_from_path(image_location, output_directory)
)
return img_list.values()
def get_image_location(image, sdir, image_list, recurred=False):
"""Take a raw image name + directory and return the location of image.
:param: image (string): the name of the raw image from the TeX
:param: sdir (string): the directory where everything was unzipped to
:param: image_list ([string, string, ...]): the list of images that
were extracted from the tarball and possibly converted
:return: converted_image (string): the full path to the (possibly
converted) image file
"""
if isinstance(image, list):
# image is a list, not good
return None
image = image.encode('utf-8', 'ignore')
image = image.strip()
figure_or_file = '(figure=|file=)'
figure_or_file_in_image = re.findall(figure_or_file, image)
if len(figure_or_file_in_image) > 0:
image = image.replace(figure_or_file_in_image[0], '')
includegraphics = r'\\includegraphics{(.+)}'
includegraphics_in_image = re.findall(includegraphics, image)
if len(includegraphics_in_image) > 0:
image = includegraphics_in_image[0]
image = image.strip()
some_kind_of_tag = '\\\\\\w+ '
if image.startswith('./'):
image = image[2:]
if re.match(some_kind_of_tag, image):
image = image[len(image.split(' ')[0]) + 1:]
if image.startswith('='):
image = image[1:]
if len(image) == 1:
return None
image = image.strip()
converted_image_should_be = get_converted_image_name(image)
if image_list is None:
image_list = os.listdir(sdir)
for png_image in image_list:
png_image_rel = os.path.relpath(png_image, start=sdir)
if converted_image_should_be == png_image_rel:
return png_image
# maybe it's in a subfolder (TeX just understands that)
for prefix in ['eps', 'fig', 'figs', 'figures', 'figs', 'images']:
if os.path.isdir(os.path.join(sdir, prefix)):
image_list = os.listdir(os.path.join(sdir, prefix))
for png_image in image_list:
if converted_image_should_be == png_image:
return os.path.join(sdir, prefix, png_image)
# maybe it is actually just loose.
for png_image in os.listdir(sdir):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if os.path.isdir(os.path.join(sdir, png_image)):
# try that, too! we just do two levels, because that's all that's
# reasonable..
sub_dir = os.path.join(sdir, png_image)
for sub_dir_file in os.listdir(sub_dir):
if os.path.split(converted_image_should_be)[-1] == sub_dir_file: # noqa
return os.path.join(sub_dir, converted_image_should_be)
# maybe it's actually up a directory or two: this happens in nested
# tarballs where the TeX is stored in a different directory from the images
for png_image in os.listdir(os.path.split(sdir)[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
for png_image in os.listdir(os.path.split(os.path.split(sdir)[0])[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if recurred:
return None
# agh, this calls for drastic measures
for piece in image.split(' '):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split(','):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split('='):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
return None
def get_converted_image_name(image):
"""Return the name of the image after it has been converted to png format.
Strips off the old extension.
:param: image (string): The fullpath of the image before conversion
:return: converted_image (string): the fullpath of the image after convert
"""
png_extension = '.png'
if image[(0 - len(png_extension)):] == png_extension:
# it already ends in png! we're golden
return image
img_dir = os.path.split(image)[0]
image = os.path.split(image)[-1]
# cut off the old extension
if len(image.split('.')) > 1:
old_extension = '.' + image.split('.')[-1]
converted_image = image[:(0 - len(old_extension))] + png_extension
else:
# no extension... damn
converted_image = image + png_extension
return os.path.join(img_dir, converted_image)
def get_tex_location(new_tex_name, current_tex_name, recurred=False):
"""
Takes the name of a TeX file and attempts to match it to an actual file
in the tarball.
:param: new_tex_name (string): the name of the TeX file to find
:param: current_tex_name (string): the location of the TeX file where we
found the reference
:return: tex_location (string): the location of the other TeX file on
disk or None if it is not found
"""
tex_location = None
current_dir = os.path.split(current_tex_name)[0]
some_kind_of_tag = '\\\\\\w+ '
new_tex_name = new_tex_name.strip()
if new_tex_name.startswith('input'):
new_tex_name = new_tex_name[len('input'):]
if re.match(some_kind_of_tag, new_tex_name):
new_tex_name = new_tex_name[len(new_tex_name.split(' ')[0]) + 1:]
if new_tex_name.startswith('./'):
new_tex_name = new_tex_name[2:]
if len(new_tex_name) == 0:
return None
new_tex_name = new_tex_name.strip()
new_tex_file = os.path.split(new_tex_name)[-1]
new_tex_folder = os.path.split(new_tex_name)[0]
if new_tex_folder == new_tex_file:
new_tex_folder = ''
# could be in the current directory
for any_file in os.listdir(current_dir):
if any_file == new_tex_file:
return os.path.join(current_dir, new_tex_file)
# could be in a subfolder of the current directory
if os.path.isdir(os.path.join(current_dir, new_tex_folder)):
for any_file in os.listdir(os.path.join(current_dir, new_tex_folder)):
if any_file == new_tex_file:
return os.path.join(os.path.join(current_dir, new_tex_folder),
new_tex_file)
# could be in a subfolder of a higher directory
one_dir_up = os.path.join(os.path.split(current_dir)[0], new_tex_folder)
if os.path.isdir(one_dir_up):
for any_file in os.listdir(one_dir_up):
if any_file == new_tex_file:
return os.path.join(one_dir_up, new_tex_file)
two_dirs_up = os.path.join(os.path.split(os.path.split(current_dir)[0])[0],
new_tex_folder)
if os.path.isdir(two_dirs_up):
for any_file in os.listdir(two_dirs_up):
if any_file == new_tex_file:
return os.path.join(two_dirs_up, new_tex_file)
if tex_location is None and not recurred:
return get_tex_location(new_tex_name + '.tex', current_tex_name,
recurred=True)
return tex_location
def get_name_from_path(full_path, root_path):
"""Create a filename by merging path after root directory."""
relative_image_path = os.path.relpath(full_path, root_path)
return "_".join(relative_image_path.split('.')[:-1]).replace('/', '_')\
.replace(';', '').replace(':', '')
|
inspirehep/plotextractor | plotextractor/output_utils.py | assemble_caption | python | def assemble_caption(begin_line, begin_index, end_line, end_index, lines):
# stuff we don't like
label_head = '\\label{'
# reassemble that sucker
if end_line > begin_line:
# our caption spanned multiple lines
caption = lines[begin_line][begin_index:]
for included_line_index in range(begin_line + 1, end_line):
caption = caption + ' ' + lines[included_line_index]
caption = caption + ' ' + lines[end_line][:end_index]
caption = caption.replace('\n', ' ')
caption = caption.replace(' ', ' ')
else:
# it fit on one line
caption = lines[begin_line][begin_index:end_index]
# clean out a label tag, if there is one
label_begin = caption.find(label_head)
if label_begin > -1:
# we know that our caption is only one line, so if there's a label
# tag in it, it will be all on one line. so we make up some args
dummy_start, dummy_start_line, label_end, dummy_end = \
find_open_and_close_braces(0, label_begin, '{', [caption])
caption = caption[:label_begin] + caption[label_end + 1:]
caption = caption.strip()
if len(caption) > 1 and caption[0] == '{' and caption[-1] == '}':
caption = caption[1:-1]
return caption | Take the caption of a picture and put it all together
in a nice way. If it spans multiple lines, put it on one line. If it
contains controlled characters, strip them out. If it has tags we don't
want to worry about, get rid of them, etc.
:param: begin_line (int): the index of the line where the caption begins
:param: begin_index (int): the index within the line where the caption
begins
:param: end_line (int): the index of the line where the caption ends
:param: end_index (int): the index within the line where the caption ends
:param: lines ([string, string, ...]): the line strings of the text
:return: caption (string): the caption, formatted and pieced together | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/output_utils.py#L119-L168 | [
"def find_open_and_close_braces(line_index, start, brace, lines):\n \"\"\"\n Take the line where we want to start and the index where we want to start\n and find the first instance of matched open and close braces of the same\n type as brace in file file.\n\n :param: line (int): the index of the line we want to start searching at\n :param: start (int): the index in the line we want to start searching at\n :param: brace (string): one of the type of brace we are looking for ({, },\n [, or ])\n :param lines ([string, string, ...]): the array of lines in the file we\n are looking in.\n\n :return: (start, start_line, end, end_line): (int, int, int): the index\n of the start and end of whatever braces we are looking for, and the\n line number that the end is on (since it may be different than the line\n we started on)\n \"\"\"\n\n if brace in ['[', ']']:\n open_brace = '['\n close_brace = ']'\n elif brace in ['{', '}']:\n open_brace = '{'\n close_brace = '}'\n elif brace in ['(', ')']:\n open_brace = '('\n close_brace = ')'\n else:\n # unacceptable brace type!\n return (-1, -1, -1, -1)\n\n open_braces = []\n line = lines[line_index]\n\n ret_open_index = line.find(open_brace, start)\n line_index_cpy = line_index\n # sometimes people don't put the braces on the same line\n # as the tag\n while ret_open_index == -1:\n line_index = line_index + 1\n if line_index >= len(lines):\n # failed to find open braces...\n return (0, line_index_cpy, 0, line_index_cpy)\n line = lines[line_index]\n ret_open_index = line.find(open_brace)\n\n open_braces.append(open_brace)\n\n ret_open_line = line_index\n\n open_index = ret_open_index\n close_index = ret_open_index\n\n while len(open_braces) > 0:\n if open_index == -1 and close_index == -1:\n # we hit the end of the line! oh, noez!\n line_index = line_index + 1\n\n if line_index >= len(lines):\n # hanging braces!\n return (ret_open_index, ret_open_line,\n ret_open_index, ret_open_line)\n\n line = lines[line_index]\n # to not skip things that are at the beginning of the line\n close_index = line.find(close_brace)\n open_index = line.find(open_brace)\n\n else:\n if close_index != -1:\n close_index = line.find(close_brace, close_index + 1)\n if open_index != -1:\n open_index = line.find(open_brace, open_index + 1)\n\n if close_index != -1:\n open_braces.pop()\n if len(open_braces) == 0 and \\\n (open_index > close_index or open_index == -1):\n break\n if open_index != -1:\n open_braces.append(open_brace)\n\n ret_close_index = close_index\n\n return (ret_open_index, ret_open_line, ret_close_index, line_index)\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015, 2016 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import os
import re
def find_open_and_close_braces(line_index, start, brace, lines):
"""
Take the line where we want to start and the index where we want to start
and find the first instance of matched open and close braces of the same
type as brace in file file.
:param: line (int): the index of the line we want to start searching at
:param: start (int): the index in the line we want to start searching at
:param: brace (string): one of the type of brace we are looking for ({, },
[, or ])
:param lines ([string, string, ...]): the array of lines in the file we
are looking in.
:return: (start, start_line, end, end_line): (int, int, int): the index
of the start and end of whatever braces we are looking for, and the
line number that the end is on (since it may be different than the line
we started on)
"""
if brace in ['[', ']']:
open_brace = '['
close_brace = ']'
elif brace in ['{', '}']:
open_brace = '{'
close_brace = '}'
elif brace in ['(', ')']:
open_brace = '('
close_brace = ')'
else:
# unacceptable brace type!
return (-1, -1, -1, -1)
open_braces = []
line = lines[line_index]
ret_open_index = line.find(open_brace, start)
line_index_cpy = line_index
# sometimes people don't put the braces on the same line
# as the tag
while ret_open_index == -1:
line_index = line_index + 1
if line_index >= len(lines):
# failed to find open braces...
return (0, line_index_cpy, 0, line_index_cpy)
line = lines[line_index]
ret_open_index = line.find(open_brace)
open_braces.append(open_brace)
ret_open_line = line_index
open_index = ret_open_index
close_index = ret_open_index
while len(open_braces) > 0:
if open_index == -1 and close_index == -1:
# we hit the end of the line! oh, noez!
line_index = line_index + 1
if line_index >= len(lines):
# hanging braces!
return (ret_open_index, ret_open_line,
ret_open_index, ret_open_line)
line = lines[line_index]
# to not skip things that are at the beginning of the line
close_index = line.find(close_brace)
open_index = line.find(open_brace)
else:
if close_index != -1:
close_index = line.find(close_brace, close_index + 1)
if open_index != -1:
open_index = line.find(open_brace, open_index + 1)
if close_index != -1:
open_braces.pop()
if len(open_braces) == 0 and \
(open_index > close_index or open_index == -1):
break
if open_index != -1:
open_braces.append(open_brace)
ret_close_index = close_index
return (ret_open_index, ret_open_line, ret_close_index, line_index)
def prepare_image_data(extracted_image_data, output_directory,
image_mapping):
"""Prepare and clean image-data from duplicates and other garbage.
:param: extracted_image_data ([(string, string, list, list) ...],
...])): the images and their captions + contexts, ordered
:param: tex_file (string): the location of the TeX (used for finding the
associated images; the TeX is assumed to be in the same directory
as the converted images)
:param: image_list ([string, string, ...]): a list of the converted
image file names
:return extracted_image_data ([(string, string, list, list) ...],
...])) again the list of image data cleaned for output
"""
img_list = {}
for image, caption, label in extracted_image_data:
if not image or image == 'ERROR':
continue
image_location = get_image_location(
image,
output_directory,
image_mapping.keys()
)
if not image_location or not os.path.exists(image_location) or \
len(image_location) < 3:
continue
image_location = os.path.normpath(image_location)
if image_location in img_list:
if caption not in img_list[image_location]['captions']:
img_list[image_location]['captions'].append(caption)
else:
img_list[image_location] = dict(
url=image_location,
original_url=image_mapping[image_location],
captions=[caption],
label=label,
name=get_name_from_path(image_location, output_directory)
)
return img_list.values()
def get_image_location(image, sdir, image_list, recurred=False):
"""Take a raw image name + directory and return the location of image.
:param: image (string): the name of the raw image from the TeX
:param: sdir (string): the directory where everything was unzipped to
:param: image_list ([string, string, ...]): the list of images that
were extracted from the tarball and possibly converted
:return: converted_image (string): the full path to the (possibly
converted) image file
"""
if isinstance(image, list):
# image is a list, not good
return None
image = image.encode('utf-8', 'ignore')
image = image.strip()
figure_or_file = '(figure=|file=)'
figure_or_file_in_image = re.findall(figure_or_file, image)
if len(figure_or_file_in_image) > 0:
image = image.replace(figure_or_file_in_image[0], '')
includegraphics = r'\\includegraphics{(.+)}'
includegraphics_in_image = re.findall(includegraphics, image)
if len(includegraphics_in_image) > 0:
image = includegraphics_in_image[0]
image = image.strip()
some_kind_of_tag = '\\\\\\w+ '
if image.startswith('./'):
image = image[2:]
if re.match(some_kind_of_tag, image):
image = image[len(image.split(' ')[0]) + 1:]
if image.startswith('='):
image = image[1:]
if len(image) == 1:
return None
image = image.strip()
converted_image_should_be = get_converted_image_name(image)
if image_list is None:
image_list = os.listdir(sdir)
for png_image in image_list:
png_image_rel = os.path.relpath(png_image, start=sdir)
if converted_image_should_be == png_image_rel:
return png_image
# maybe it's in a subfolder (TeX just understands that)
for prefix in ['eps', 'fig', 'figs', 'figures', 'figs', 'images']:
if os.path.isdir(os.path.join(sdir, prefix)):
image_list = os.listdir(os.path.join(sdir, prefix))
for png_image in image_list:
if converted_image_should_be == png_image:
return os.path.join(sdir, prefix, png_image)
# maybe it is actually just loose.
for png_image in os.listdir(sdir):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if os.path.isdir(os.path.join(sdir, png_image)):
# try that, too! we just do two levels, because that's all that's
# reasonable..
sub_dir = os.path.join(sdir, png_image)
for sub_dir_file in os.listdir(sub_dir):
if os.path.split(converted_image_should_be)[-1] == sub_dir_file: # noqa
return os.path.join(sub_dir, converted_image_should_be)
# maybe it's actually up a directory or two: this happens in nested
# tarballs where the TeX is stored in a different directory from the images
for png_image in os.listdir(os.path.split(sdir)[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
for png_image in os.listdir(os.path.split(os.path.split(sdir)[0])[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if recurred:
return None
# agh, this calls for drastic measures
for piece in image.split(' '):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split(','):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split('='):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
return None
def get_converted_image_name(image):
"""Return the name of the image after it has been converted to png format.
Strips off the old extension.
:param: image (string): The fullpath of the image before conversion
:return: converted_image (string): the fullpath of the image after convert
"""
png_extension = '.png'
if image[(0 - len(png_extension)):] == png_extension:
# it already ends in png! we're golden
return image
img_dir = os.path.split(image)[0]
image = os.path.split(image)[-1]
# cut off the old extension
if len(image.split('.')) > 1:
old_extension = '.' + image.split('.')[-1]
converted_image = image[:(0 - len(old_extension))] + png_extension
else:
# no extension... damn
converted_image = image + png_extension
return os.path.join(img_dir, converted_image)
def get_tex_location(new_tex_name, current_tex_name, recurred=False):
"""
Takes the name of a TeX file and attempts to match it to an actual file
in the tarball.
:param: new_tex_name (string): the name of the TeX file to find
:param: current_tex_name (string): the location of the TeX file where we
found the reference
:return: tex_location (string): the location of the other TeX file on
disk or None if it is not found
"""
tex_location = None
current_dir = os.path.split(current_tex_name)[0]
some_kind_of_tag = '\\\\\\w+ '
new_tex_name = new_tex_name.strip()
if new_tex_name.startswith('input'):
new_tex_name = new_tex_name[len('input'):]
if re.match(some_kind_of_tag, new_tex_name):
new_tex_name = new_tex_name[len(new_tex_name.split(' ')[0]) + 1:]
if new_tex_name.startswith('./'):
new_tex_name = new_tex_name[2:]
if len(new_tex_name) == 0:
return None
new_tex_name = new_tex_name.strip()
new_tex_file = os.path.split(new_tex_name)[-1]
new_tex_folder = os.path.split(new_tex_name)[0]
if new_tex_folder == new_tex_file:
new_tex_folder = ''
# could be in the current directory
for any_file in os.listdir(current_dir):
if any_file == new_tex_file:
return os.path.join(current_dir, new_tex_file)
# could be in a subfolder of the current directory
if os.path.isdir(os.path.join(current_dir, new_tex_folder)):
for any_file in os.listdir(os.path.join(current_dir, new_tex_folder)):
if any_file == new_tex_file:
return os.path.join(os.path.join(current_dir, new_tex_folder),
new_tex_file)
# could be in a subfolder of a higher directory
one_dir_up = os.path.join(os.path.split(current_dir)[0], new_tex_folder)
if os.path.isdir(one_dir_up):
for any_file in os.listdir(one_dir_up):
if any_file == new_tex_file:
return os.path.join(one_dir_up, new_tex_file)
two_dirs_up = os.path.join(os.path.split(os.path.split(current_dir)[0])[0],
new_tex_folder)
if os.path.isdir(two_dirs_up):
for any_file in os.listdir(two_dirs_up):
if any_file == new_tex_file:
return os.path.join(two_dirs_up, new_tex_file)
if tex_location is None and not recurred:
return get_tex_location(new_tex_name + '.tex', current_tex_name,
recurred=True)
return tex_location
def get_name_from_path(full_path, root_path):
"""Create a filename by merging path after root directory."""
relative_image_path = os.path.relpath(full_path, root_path)
return "_".join(relative_image_path.split('.')[:-1]).replace('/', '_')\
.replace(';', '').replace(':', '')
|
inspirehep/plotextractor | plotextractor/output_utils.py | prepare_image_data | python | def prepare_image_data(extracted_image_data, output_directory,
image_mapping):
img_list = {}
for image, caption, label in extracted_image_data:
if not image or image == 'ERROR':
continue
image_location = get_image_location(
image,
output_directory,
image_mapping.keys()
)
if not image_location or not os.path.exists(image_location) or \
len(image_location) < 3:
continue
image_location = os.path.normpath(image_location)
if image_location in img_list:
if caption not in img_list[image_location]['captions']:
img_list[image_location]['captions'].append(caption)
else:
img_list[image_location] = dict(
url=image_location,
original_url=image_mapping[image_location],
captions=[caption],
label=label,
name=get_name_from_path(image_location, output_directory)
)
return img_list.values() | Prepare and clean image-data from duplicates and other garbage.
:param: extracted_image_data ([(string, string, list, list) ...],
...])): the images and their captions + contexts, ordered
:param: tex_file (string): the location of the TeX (used for finding the
associated images; the TeX is assumed to be in the same directory
as the converted images)
:param: image_list ([string, string, ...]): a list of the converted
image file names
:return extracted_image_data ([(string, string, list, list) ...],
...])) again the list of image data cleaned for output | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/output_utils.py#L171-L211 | [
"def get_image_location(image, sdir, image_list, recurred=False):\n \"\"\"Take a raw image name + directory and return the location of image.\n\n :param: image (string): the name of the raw image from the TeX\n :param: sdir (string): the directory where everything was unzipped to\n :param: image_list ([string, string, ...]): the list of images that\n were extracted from the tarball and possibly converted\n\n :return: converted_image (string): the full path to the (possibly\n converted) image file\n \"\"\"\n if isinstance(image, list):\n # image is a list, not good\n return None\n\n image = image.encode('utf-8', 'ignore')\n image = image.strip()\n\n figure_or_file = '(figure=|file=)'\n figure_or_file_in_image = re.findall(figure_or_file, image)\n if len(figure_or_file_in_image) > 0:\n image = image.replace(figure_or_file_in_image[0], '')\n\n includegraphics = r'\\\\includegraphics{(.+)}'\n includegraphics_in_image = re.findall(includegraphics, image)\n if len(includegraphics_in_image) > 0:\n image = includegraphics_in_image[0]\n\n image = image.strip()\n\n some_kind_of_tag = '\\\\\\\\\\\\w+ '\n\n if image.startswith('./'):\n image = image[2:]\n if re.match(some_kind_of_tag, image):\n image = image[len(image.split(' ')[0]) + 1:]\n if image.startswith('='):\n image = image[1:]\n\n if len(image) == 1:\n return None\n\n image = image.strip()\n converted_image_should_be = get_converted_image_name(image)\n\n if image_list is None:\n image_list = os.listdir(sdir)\n\n for png_image in image_list:\n png_image_rel = os.path.relpath(png_image, start=sdir)\n if converted_image_should_be == png_image_rel:\n return png_image\n\n # maybe it's in a subfolder (TeX just understands that)\n for prefix in ['eps', 'fig', 'figs', 'figures', 'figs', 'images']:\n if os.path.isdir(os.path.join(sdir, prefix)):\n image_list = os.listdir(os.path.join(sdir, prefix))\n for png_image in image_list:\n if converted_image_should_be == png_image:\n return os.path.join(sdir, prefix, png_image)\n\n # maybe it is actually just loose.\n for png_image in os.listdir(sdir):\n if os.path.split(converted_image_should_be)[-1] == png_image:\n return converted_image_should_be\n if os.path.isdir(os.path.join(sdir, png_image)):\n # try that, too! we just do two levels, because that's all that's\n # reasonable..\n sub_dir = os.path.join(sdir, png_image)\n for sub_dir_file in os.listdir(sub_dir):\n if os.path.split(converted_image_should_be)[-1] == sub_dir_file: # noqa\n return os.path.join(sub_dir, converted_image_should_be)\n\n # maybe it's actually up a directory or two: this happens in nested\n # tarballs where the TeX is stored in a different directory from the images\n for png_image in os.listdir(os.path.split(sdir)[0]):\n if os.path.split(converted_image_should_be)[-1] == png_image:\n return converted_image_should_be\n for png_image in os.listdir(os.path.split(os.path.split(sdir)[0])[0]):\n if os.path.split(converted_image_should_be)[-1] == png_image:\n return converted_image_should_be\n\n if recurred:\n return None\n\n # agh, this calls for drastic measures\n for piece in image.split(' '):\n res = get_image_location(piece, sdir, image_list, recurred=True)\n if res is not None:\n return res\n\n for piece in image.split(','):\n res = get_image_location(piece, sdir, image_list, recurred=True)\n if res is not None:\n return res\n\n for piece in image.split('='):\n res = get_image_location(piece, sdir, image_list, recurred=True)\n if res is not None:\n return res\n\n return None\n",
"def get_name_from_path(full_path, root_path):\n \"\"\"Create a filename by merging path after root directory.\"\"\"\n relative_image_path = os.path.relpath(full_path, root_path)\n return \"_\".join(relative_image_path.split('.')[:-1]).replace('/', '_')\\\n .replace(';', '').replace(':', '')\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015, 2016 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import os
import re
def find_open_and_close_braces(line_index, start, brace, lines):
"""
Take the line where we want to start and the index where we want to start
and find the first instance of matched open and close braces of the same
type as brace in file file.
:param: line (int): the index of the line we want to start searching at
:param: start (int): the index in the line we want to start searching at
:param: brace (string): one of the type of brace we are looking for ({, },
[, or ])
:param lines ([string, string, ...]): the array of lines in the file we
are looking in.
:return: (start, start_line, end, end_line): (int, int, int): the index
of the start and end of whatever braces we are looking for, and the
line number that the end is on (since it may be different than the line
we started on)
"""
if brace in ['[', ']']:
open_brace = '['
close_brace = ']'
elif brace in ['{', '}']:
open_brace = '{'
close_brace = '}'
elif brace in ['(', ')']:
open_brace = '('
close_brace = ')'
else:
# unacceptable brace type!
return (-1, -1, -1, -1)
open_braces = []
line = lines[line_index]
ret_open_index = line.find(open_brace, start)
line_index_cpy = line_index
# sometimes people don't put the braces on the same line
# as the tag
while ret_open_index == -1:
line_index = line_index + 1
if line_index >= len(lines):
# failed to find open braces...
return (0, line_index_cpy, 0, line_index_cpy)
line = lines[line_index]
ret_open_index = line.find(open_brace)
open_braces.append(open_brace)
ret_open_line = line_index
open_index = ret_open_index
close_index = ret_open_index
while len(open_braces) > 0:
if open_index == -1 and close_index == -1:
# we hit the end of the line! oh, noez!
line_index = line_index + 1
if line_index >= len(lines):
# hanging braces!
return (ret_open_index, ret_open_line,
ret_open_index, ret_open_line)
line = lines[line_index]
# to not skip things that are at the beginning of the line
close_index = line.find(close_brace)
open_index = line.find(open_brace)
else:
if close_index != -1:
close_index = line.find(close_brace, close_index + 1)
if open_index != -1:
open_index = line.find(open_brace, open_index + 1)
if close_index != -1:
open_braces.pop()
if len(open_braces) == 0 and \
(open_index > close_index or open_index == -1):
break
if open_index != -1:
open_braces.append(open_brace)
ret_close_index = close_index
return (ret_open_index, ret_open_line, ret_close_index, line_index)
def assemble_caption(begin_line, begin_index, end_line, end_index, lines):
"""
Take the caption of a picture and put it all together
in a nice way. If it spans multiple lines, put it on one line. If it
contains controlled characters, strip them out. If it has tags we don't
want to worry about, get rid of them, etc.
:param: begin_line (int): the index of the line where the caption begins
:param: begin_index (int): the index within the line where the caption
begins
:param: end_line (int): the index of the line where the caption ends
:param: end_index (int): the index within the line where the caption ends
:param: lines ([string, string, ...]): the line strings of the text
:return: caption (string): the caption, formatted and pieced together
"""
# stuff we don't like
label_head = '\\label{'
# reassemble that sucker
if end_line > begin_line:
# our caption spanned multiple lines
caption = lines[begin_line][begin_index:]
for included_line_index in range(begin_line + 1, end_line):
caption = caption + ' ' + lines[included_line_index]
caption = caption + ' ' + lines[end_line][:end_index]
caption = caption.replace('\n', ' ')
caption = caption.replace(' ', ' ')
else:
# it fit on one line
caption = lines[begin_line][begin_index:end_index]
# clean out a label tag, if there is one
label_begin = caption.find(label_head)
if label_begin > -1:
# we know that our caption is only one line, so if there's a label
# tag in it, it will be all on one line. so we make up some args
dummy_start, dummy_start_line, label_end, dummy_end = \
find_open_and_close_braces(0, label_begin, '{', [caption])
caption = caption[:label_begin] + caption[label_end + 1:]
caption = caption.strip()
if len(caption) > 1 and caption[0] == '{' and caption[-1] == '}':
caption = caption[1:-1]
return caption
def get_image_location(image, sdir, image_list, recurred=False):
"""Take a raw image name + directory and return the location of image.
:param: image (string): the name of the raw image from the TeX
:param: sdir (string): the directory where everything was unzipped to
:param: image_list ([string, string, ...]): the list of images that
were extracted from the tarball and possibly converted
:return: converted_image (string): the full path to the (possibly
converted) image file
"""
if isinstance(image, list):
# image is a list, not good
return None
image = image.encode('utf-8', 'ignore')
image = image.strip()
figure_or_file = '(figure=|file=)'
figure_or_file_in_image = re.findall(figure_or_file, image)
if len(figure_or_file_in_image) > 0:
image = image.replace(figure_or_file_in_image[0], '')
includegraphics = r'\\includegraphics{(.+)}'
includegraphics_in_image = re.findall(includegraphics, image)
if len(includegraphics_in_image) > 0:
image = includegraphics_in_image[0]
image = image.strip()
some_kind_of_tag = '\\\\\\w+ '
if image.startswith('./'):
image = image[2:]
if re.match(some_kind_of_tag, image):
image = image[len(image.split(' ')[0]) + 1:]
if image.startswith('='):
image = image[1:]
if len(image) == 1:
return None
image = image.strip()
converted_image_should_be = get_converted_image_name(image)
if image_list is None:
image_list = os.listdir(sdir)
for png_image in image_list:
png_image_rel = os.path.relpath(png_image, start=sdir)
if converted_image_should_be == png_image_rel:
return png_image
# maybe it's in a subfolder (TeX just understands that)
for prefix in ['eps', 'fig', 'figs', 'figures', 'figs', 'images']:
if os.path.isdir(os.path.join(sdir, prefix)):
image_list = os.listdir(os.path.join(sdir, prefix))
for png_image in image_list:
if converted_image_should_be == png_image:
return os.path.join(sdir, prefix, png_image)
# maybe it is actually just loose.
for png_image in os.listdir(sdir):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if os.path.isdir(os.path.join(sdir, png_image)):
# try that, too! we just do two levels, because that's all that's
# reasonable..
sub_dir = os.path.join(sdir, png_image)
for sub_dir_file in os.listdir(sub_dir):
if os.path.split(converted_image_should_be)[-1] == sub_dir_file: # noqa
return os.path.join(sub_dir, converted_image_should_be)
# maybe it's actually up a directory or two: this happens in nested
# tarballs where the TeX is stored in a different directory from the images
for png_image in os.listdir(os.path.split(sdir)[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
for png_image in os.listdir(os.path.split(os.path.split(sdir)[0])[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if recurred:
return None
# agh, this calls for drastic measures
for piece in image.split(' '):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split(','):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split('='):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
return None
def get_converted_image_name(image):
"""Return the name of the image after it has been converted to png format.
Strips off the old extension.
:param: image (string): The fullpath of the image before conversion
:return: converted_image (string): the fullpath of the image after convert
"""
png_extension = '.png'
if image[(0 - len(png_extension)):] == png_extension:
# it already ends in png! we're golden
return image
img_dir = os.path.split(image)[0]
image = os.path.split(image)[-1]
# cut off the old extension
if len(image.split('.')) > 1:
old_extension = '.' + image.split('.')[-1]
converted_image = image[:(0 - len(old_extension))] + png_extension
else:
# no extension... damn
converted_image = image + png_extension
return os.path.join(img_dir, converted_image)
def get_tex_location(new_tex_name, current_tex_name, recurred=False):
"""
Takes the name of a TeX file and attempts to match it to an actual file
in the tarball.
:param: new_tex_name (string): the name of the TeX file to find
:param: current_tex_name (string): the location of the TeX file where we
found the reference
:return: tex_location (string): the location of the other TeX file on
disk or None if it is not found
"""
tex_location = None
current_dir = os.path.split(current_tex_name)[0]
some_kind_of_tag = '\\\\\\w+ '
new_tex_name = new_tex_name.strip()
if new_tex_name.startswith('input'):
new_tex_name = new_tex_name[len('input'):]
if re.match(some_kind_of_tag, new_tex_name):
new_tex_name = new_tex_name[len(new_tex_name.split(' ')[0]) + 1:]
if new_tex_name.startswith('./'):
new_tex_name = new_tex_name[2:]
if len(new_tex_name) == 0:
return None
new_tex_name = new_tex_name.strip()
new_tex_file = os.path.split(new_tex_name)[-1]
new_tex_folder = os.path.split(new_tex_name)[0]
if new_tex_folder == new_tex_file:
new_tex_folder = ''
# could be in the current directory
for any_file in os.listdir(current_dir):
if any_file == new_tex_file:
return os.path.join(current_dir, new_tex_file)
# could be in a subfolder of the current directory
if os.path.isdir(os.path.join(current_dir, new_tex_folder)):
for any_file in os.listdir(os.path.join(current_dir, new_tex_folder)):
if any_file == new_tex_file:
return os.path.join(os.path.join(current_dir, new_tex_folder),
new_tex_file)
# could be in a subfolder of a higher directory
one_dir_up = os.path.join(os.path.split(current_dir)[0], new_tex_folder)
if os.path.isdir(one_dir_up):
for any_file in os.listdir(one_dir_up):
if any_file == new_tex_file:
return os.path.join(one_dir_up, new_tex_file)
two_dirs_up = os.path.join(os.path.split(os.path.split(current_dir)[0])[0],
new_tex_folder)
if os.path.isdir(two_dirs_up):
for any_file in os.listdir(two_dirs_up):
if any_file == new_tex_file:
return os.path.join(two_dirs_up, new_tex_file)
if tex_location is None and not recurred:
return get_tex_location(new_tex_name + '.tex', current_tex_name,
recurred=True)
return tex_location
def get_name_from_path(full_path, root_path):
"""Create a filename by merging path after root directory."""
relative_image_path = os.path.relpath(full_path, root_path)
return "_".join(relative_image_path.split('.')[:-1]).replace('/', '_')\
.replace(';', '').replace(':', '')
|
inspirehep/plotextractor | plotextractor/output_utils.py | get_image_location | python | def get_image_location(image, sdir, image_list, recurred=False):
if isinstance(image, list):
# image is a list, not good
return None
image = image.encode('utf-8', 'ignore')
image = image.strip()
figure_or_file = '(figure=|file=)'
figure_or_file_in_image = re.findall(figure_or_file, image)
if len(figure_or_file_in_image) > 0:
image = image.replace(figure_or_file_in_image[0], '')
includegraphics = r'\\includegraphics{(.+)}'
includegraphics_in_image = re.findall(includegraphics, image)
if len(includegraphics_in_image) > 0:
image = includegraphics_in_image[0]
image = image.strip()
some_kind_of_tag = '\\\\\\w+ '
if image.startswith('./'):
image = image[2:]
if re.match(some_kind_of_tag, image):
image = image[len(image.split(' ')[0]) + 1:]
if image.startswith('='):
image = image[1:]
if len(image) == 1:
return None
image = image.strip()
converted_image_should_be = get_converted_image_name(image)
if image_list is None:
image_list = os.listdir(sdir)
for png_image in image_list:
png_image_rel = os.path.relpath(png_image, start=sdir)
if converted_image_should_be == png_image_rel:
return png_image
# maybe it's in a subfolder (TeX just understands that)
for prefix in ['eps', 'fig', 'figs', 'figures', 'figs', 'images']:
if os.path.isdir(os.path.join(sdir, prefix)):
image_list = os.listdir(os.path.join(sdir, prefix))
for png_image in image_list:
if converted_image_should_be == png_image:
return os.path.join(sdir, prefix, png_image)
# maybe it is actually just loose.
for png_image in os.listdir(sdir):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if os.path.isdir(os.path.join(sdir, png_image)):
# try that, too! we just do two levels, because that's all that's
# reasonable..
sub_dir = os.path.join(sdir, png_image)
for sub_dir_file in os.listdir(sub_dir):
if os.path.split(converted_image_should_be)[-1] == sub_dir_file: # noqa
return os.path.join(sub_dir, converted_image_should_be)
# maybe it's actually up a directory or two: this happens in nested
# tarballs where the TeX is stored in a different directory from the images
for png_image in os.listdir(os.path.split(sdir)[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
for png_image in os.listdir(os.path.split(os.path.split(sdir)[0])[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if recurred:
return None
# agh, this calls for drastic measures
for piece in image.split(' '):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split(','):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split('='):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
return None | Take a raw image name + directory and return the location of image.
:param: image (string): the name of the raw image from the TeX
:param: sdir (string): the directory where everything was unzipped to
:param: image_list ([string, string, ...]): the list of images that
were extracted from the tarball and possibly converted
:return: converted_image (string): the full path to the (possibly
converted) image file | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/output_utils.py#L214-L315 | [
"def get_converted_image_name(image):\n \"\"\"Return the name of the image after it has been converted to png format.\n\n Strips off the old extension.\n\n :param: image (string): The fullpath of the image before conversion\n\n :return: converted_image (string): the fullpath of the image after convert\n \"\"\"\n png_extension = '.png'\n\n if image[(0 - len(png_extension)):] == png_extension:\n # it already ends in png! we're golden\n return image\n\n img_dir = os.path.split(image)[0]\n image = os.path.split(image)[-1]\n\n # cut off the old extension\n if len(image.split('.')) > 1:\n old_extension = '.' + image.split('.')[-1]\n converted_image = image[:(0 - len(old_extension))] + png_extension\n else:\n # no extension... damn\n converted_image = image + png_extension\n\n return os.path.join(img_dir, converted_image)\n",
"def get_image_location(image, sdir, image_list, recurred=False):\n \"\"\"Take a raw image name + directory and return the location of image.\n\n :param: image (string): the name of the raw image from the TeX\n :param: sdir (string): the directory where everything was unzipped to\n :param: image_list ([string, string, ...]): the list of images that\n were extracted from the tarball and possibly converted\n\n :return: converted_image (string): the full path to the (possibly\n converted) image file\n \"\"\"\n if isinstance(image, list):\n # image is a list, not good\n return None\n\n image = image.encode('utf-8', 'ignore')\n image = image.strip()\n\n figure_or_file = '(figure=|file=)'\n figure_or_file_in_image = re.findall(figure_or_file, image)\n if len(figure_or_file_in_image) > 0:\n image = image.replace(figure_or_file_in_image[0], '')\n\n includegraphics = r'\\\\includegraphics{(.+)}'\n includegraphics_in_image = re.findall(includegraphics, image)\n if len(includegraphics_in_image) > 0:\n image = includegraphics_in_image[0]\n\n image = image.strip()\n\n some_kind_of_tag = '\\\\\\\\\\\\w+ '\n\n if image.startswith('./'):\n image = image[2:]\n if re.match(some_kind_of_tag, image):\n image = image[len(image.split(' ')[0]) + 1:]\n if image.startswith('='):\n image = image[1:]\n\n if len(image) == 1:\n return None\n\n image = image.strip()\n converted_image_should_be = get_converted_image_name(image)\n\n if image_list is None:\n image_list = os.listdir(sdir)\n\n for png_image in image_list:\n png_image_rel = os.path.relpath(png_image, start=sdir)\n if converted_image_should_be == png_image_rel:\n return png_image\n\n # maybe it's in a subfolder (TeX just understands that)\n for prefix in ['eps', 'fig', 'figs', 'figures', 'figs', 'images']:\n if os.path.isdir(os.path.join(sdir, prefix)):\n image_list = os.listdir(os.path.join(sdir, prefix))\n for png_image in image_list:\n if converted_image_should_be == png_image:\n return os.path.join(sdir, prefix, png_image)\n\n # maybe it is actually just loose.\n for png_image in os.listdir(sdir):\n if os.path.split(converted_image_should_be)[-1] == png_image:\n return converted_image_should_be\n if os.path.isdir(os.path.join(sdir, png_image)):\n # try that, too! we just do two levels, because that's all that's\n # reasonable..\n sub_dir = os.path.join(sdir, png_image)\n for sub_dir_file in os.listdir(sub_dir):\n if os.path.split(converted_image_should_be)[-1] == sub_dir_file: # noqa\n return os.path.join(sub_dir, converted_image_should_be)\n\n # maybe it's actually up a directory or two: this happens in nested\n # tarballs where the TeX is stored in a different directory from the images\n for png_image in os.listdir(os.path.split(sdir)[0]):\n if os.path.split(converted_image_should_be)[-1] == png_image:\n return converted_image_should_be\n for png_image in os.listdir(os.path.split(os.path.split(sdir)[0])[0]):\n if os.path.split(converted_image_should_be)[-1] == png_image:\n return converted_image_should_be\n\n if recurred:\n return None\n\n # agh, this calls for drastic measures\n for piece in image.split(' '):\n res = get_image_location(piece, sdir, image_list, recurred=True)\n if res is not None:\n return res\n\n for piece in image.split(','):\n res = get_image_location(piece, sdir, image_list, recurred=True)\n if res is not None:\n return res\n\n for piece in image.split('='):\n res = get_image_location(piece, sdir, image_list, recurred=True)\n if res is not None:\n return res\n\n return None\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015, 2016 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import os
import re
def find_open_and_close_braces(line_index, start, brace, lines):
"""
Take the line where we want to start and the index where we want to start
and find the first instance of matched open and close braces of the same
type as brace in file file.
:param: line (int): the index of the line we want to start searching at
:param: start (int): the index in the line we want to start searching at
:param: brace (string): one of the type of brace we are looking for ({, },
[, or ])
:param lines ([string, string, ...]): the array of lines in the file we
are looking in.
:return: (start, start_line, end, end_line): (int, int, int): the index
of the start and end of whatever braces we are looking for, and the
line number that the end is on (since it may be different than the line
we started on)
"""
if brace in ['[', ']']:
open_brace = '['
close_brace = ']'
elif brace in ['{', '}']:
open_brace = '{'
close_brace = '}'
elif brace in ['(', ')']:
open_brace = '('
close_brace = ')'
else:
# unacceptable brace type!
return (-1, -1, -1, -1)
open_braces = []
line = lines[line_index]
ret_open_index = line.find(open_brace, start)
line_index_cpy = line_index
# sometimes people don't put the braces on the same line
# as the tag
while ret_open_index == -1:
line_index = line_index + 1
if line_index >= len(lines):
# failed to find open braces...
return (0, line_index_cpy, 0, line_index_cpy)
line = lines[line_index]
ret_open_index = line.find(open_brace)
open_braces.append(open_brace)
ret_open_line = line_index
open_index = ret_open_index
close_index = ret_open_index
while len(open_braces) > 0:
if open_index == -1 and close_index == -1:
# we hit the end of the line! oh, noez!
line_index = line_index + 1
if line_index >= len(lines):
# hanging braces!
return (ret_open_index, ret_open_line,
ret_open_index, ret_open_line)
line = lines[line_index]
# to not skip things that are at the beginning of the line
close_index = line.find(close_brace)
open_index = line.find(open_brace)
else:
if close_index != -1:
close_index = line.find(close_brace, close_index + 1)
if open_index != -1:
open_index = line.find(open_brace, open_index + 1)
if close_index != -1:
open_braces.pop()
if len(open_braces) == 0 and \
(open_index > close_index or open_index == -1):
break
if open_index != -1:
open_braces.append(open_brace)
ret_close_index = close_index
return (ret_open_index, ret_open_line, ret_close_index, line_index)
def assemble_caption(begin_line, begin_index, end_line, end_index, lines):
"""
Take the caption of a picture and put it all together
in a nice way. If it spans multiple lines, put it on one line. If it
contains controlled characters, strip them out. If it has tags we don't
want to worry about, get rid of them, etc.
:param: begin_line (int): the index of the line where the caption begins
:param: begin_index (int): the index within the line where the caption
begins
:param: end_line (int): the index of the line where the caption ends
:param: end_index (int): the index within the line where the caption ends
:param: lines ([string, string, ...]): the line strings of the text
:return: caption (string): the caption, formatted and pieced together
"""
# stuff we don't like
label_head = '\\label{'
# reassemble that sucker
if end_line > begin_line:
# our caption spanned multiple lines
caption = lines[begin_line][begin_index:]
for included_line_index in range(begin_line + 1, end_line):
caption = caption + ' ' + lines[included_line_index]
caption = caption + ' ' + lines[end_line][:end_index]
caption = caption.replace('\n', ' ')
caption = caption.replace(' ', ' ')
else:
# it fit on one line
caption = lines[begin_line][begin_index:end_index]
# clean out a label tag, if there is one
label_begin = caption.find(label_head)
if label_begin > -1:
# we know that our caption is only one line, so if there's a label
# tag in it, it will be all on one line. so we make up some args
dummy_start, dummy_start_line, label_end, dummy_end = \
find_open_and_close_braces(0, label_begin, '{', [caption])
caption = caption[:label_begin] + caption[label_end + 1:]
caption = caption.strip()
if len(caption) > 1 and caption[0] == '{' and caption[-1] == '}':
caption = caption[1:-1]
return caption
def prepare_image_data(extracted_image_data, output_directory,
image_mapping):
"""Prepare and clean image-data from duplicates and other garbage.
:param: extracted_image_data ([(string, string, list, list) ...],
...])): the images and their captions + contexts, ordered
:param: tex_file (string): the location of the TeX (used for finding the
associated images; the TeX is assumed to be in the same directory
as the converted images)
:param: image_list ([string, string, ...]): a list of the converted
image file names
:return extracted_image_data ([(string, string, list, list) ...],
...])) again the list of image data cleaned for output
"""
img_list = {}
for image, caption, label in extracted_image_data:
if not image or image == 'ERROR':
continue
image_location = get_image_location(
image,
output_directory,
image_mapping.keys()
)
if not image_location or not os.path.exists(image_location) or \
len(image_location) < 3:
continue
image_location = os.path.normpath(image_location)
if image_location in img_list:
if caption not in img_list[image_location]['captions']:
img_list[image_location]['captions'].append(caption)
else:
img_list[image_location] = dict(
url=image_location,
original_url=image_mapping[image_location],
captions=[caption],
label=label,
name=get_name_from_path(image_location, output_directory)
)
return img_list.values()
def get_converted_image_name(image):
"""Return the name of the image after it has been converted to png format.
Strips off the old extension.
:param: image (string): The fullpath of the image before conversion
:return: converted_image (string): the fullpath of the image after convert
"""
png_extension = '.png'
if image[(0 - len(png_extension)):] == png_extension:
# it already ends in png! we're golden
return image
img_dir = os.path.split(image)[0]
image = os.path.split(image)[-1]
# cut off the old extension
if len(image.split('.')) > 1:
old_extension = '.' + image.split('.')[-1]
converted_image = image[:(0 - len(old_extension))] + png_extension
else:
# no extension... damn
converted_image = image + png_extension
return os.path.join(img_dir, converted_image)
def get_tex_location(new_tex_name, current_tex_name, recurred=False):
"""
Takes the name of a TeX file and attempts to match it to an actual file
in the tarball.
:param: new_tex_name (string): the name of the TeX file to find
:param: current_tex_name (string): the location of the TeX file where we
found the reference
:return: tex_location (string): the location of the other TeX file on
disk or None if it is not found
"""
tex_location = None
current_dir = os.path.split(current_tex_name)[0]
some_kind_of_tag = '\\\\\\w+ '
new_tex_name = new_tex_name.strip()
if new_tex_name.startswith('input'):
new_tex_name = new_tex_name[len('input'):]
if re.match(some_kind_of_tag, new_tex_name):
new_tex_name = new_tex_name[len(new_tex_name.split(' ')[0]) + 1:]
if new_tex_name.startswith('./'):
new_tex_name = new_tex_name[2:]
if len(new_tex_name) == 0:
return None
new_tex_name = new_tex_name.strip()
new_tex_file = os.path.split(new_tex_name)[-1]
new_tex_folder = os.path.split(new_tex_name)[0]
if new_tex_folder == new_tex_file:
new_tex_folder = ''
# could be in the current directory
for any_file in os.listdir(current_dir):
if any_file == new_tex_file:
return os.path.join(current_dir, new_tex_file)
# could be in a subfolder of the current directory
if os.path.isdir(os.path.join(current_dir, new_tex_folder)):
for any_file in os.listdir(os.path.join(current_dir, new_tex_folder)):
if any_file == new_tex_file:
return os.path.join(os.path.join(current_dir, new_tex_folder),
new_tex_file)
# could be in a subfolder of a higher directory
one_dir_up = os.path.join(os.path.split(current_dir)[0], new_tex_folder)
if os.path.isdir(one_dir_up):
for any_file in os.listdir(one_dir_up):
if any_file == new_tex_file:
return os.path.join(one_dir_up, new_tex_file)
two_dirs_up = os.path.join(os.path.split(os.path.split(current_dir)[0])[0],
new_tex_folder)
if os.path.isdir(two_dirs_up):
for any_file in os.listdir(two_dirs_up):
if any_file == new_tex_file:
return os.path.join(two_dirs_up, new_tex_file)
if tex_location is None and not recurred:
return get_tex_location(new_tex_name + '.tex', current_tex_name,
recurred=True)
return tex_location
def get_name_from_path(full_path, root_path):
"""Create a filename by merging path after root directory."""
relative_image_path = os.path.relpath(full_path, root_path)
return "_".join(relative_image_path.split('.')[:-1]).replace('/', '_')\
.replace(';', '').replace(':', '')
|
inspirehep/plotextractor | plotextractor/output_utils.py | get_converted_image_name | python | def get_converted_image_name(image):
png_extension = '.png'
if image[(0 - len(png_extension)):] == png_extension:
# it already ends in png! we're golden
return image
img_dir = os.path.split(image)[0]
image = os.path.split(image)[-1]
# cut off the old extension
if len(image.split('.')) > 1:
old_extension = '.' + image.split('.')[-1]
converted_image = image[:(0 - len(old_extension))] + png_extension
else:
# no extension... damn
converted_image = image + png_extension
return os.path.join(img_dir, converted_image) | Return the name of the image after it has been converted to png format.
Strips off the old extension.
:param: image (string): The fullpath of the image before conversion
:return: converted_image (string): the fullpath of the image after convert | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/output_utils.py#L318-L344 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015, 2016 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import os
import re
def find_open_and_close_braces(line_index, start, brace, lines):
"""
Take the line where we want to start and the index where we want to start
and find the first instance of matched open and close braces of the same
type as brace in file file.
:param: line (int): the index of the line we want to start searching at
:param: start (int): the index in the line we want to start searching at
:param: brace (string): one of the type of brace we are looking for ({, },
[, or ])
:param lines ([string, string, ...]): the array of lines in the file we
are looking in.
:return: (start, start_line, end, end_line): (int, int, int): the index
of the start and end of whatever braces we are looking for, and the
line number that the end is on (since it may be different than the line
we started on)
"""
if brace in ['[', ']']:
open_brace = '['
close_brace = ']'
elif brace in ['{', '}']:
open_brace = '{'
close_brace = '}'
elif brace in ['(', ')']:
open_brace = '('
close_brace = ')'
else:
# unacceptable brace type!
return (-1, -1, -1, -1)
open_braces = []
line = lines[line_index]
ret_open_index = line.find(open_brace, start)
line_index_cpy = line_index
# sometimes people don't put the braces on the same line
# as the tag
while ret_open_index == -1:
line_index = line_index + 1
if line_index >= len(lines):
# failed to find open braces...
return (0, line_index_cpy, 0, line_index_cpy)
line = lines[line_index]
ret_open_index = line.find(open_brace)
open_braces.append(open_brace)
ret_open_line = line_index
open_index = ret_open_index
close_index = ret_open_index
while len(open_braces) > 0:
if open_index == -1 and close_index == -1:
# we hit the end of the line! oh, noez!
line_index = line_index + 1
if line_index >= len(lines):
# hanging braces!
return (ret_open_index, ret_open_line,
ret_open_index, ret_open_line)
line = lines[line_index]
# to not skip things that are at the beginning of the line
close_index = line.find(close_brace)
open_index = line.find(open_brace)
else:
if close_index != -1:
close_index = line.find(close_brace, close_index + 1)
if open_index != -1:
open_index = line.find(open_brace, open_index + 1)
if close_index != -1:
open_braces.pop()
if len(open_braces) == 0 and \
(open_index > close_index or open_index == -1):
break
if open_index != -1:
open_braces.append(open_brace)
ret_close_index = close_index
return (ret_open_index, ret_open_line, ret_close_index, line_index)
def assemble_caption(begin_line, begin_index, end_line, end_index, lines):
"""
Take the caption of a picture and put it all together
in a nice way. If it spans multiple lines, put it on one line. If it
contains controlled characters, strip them out. If it has tags we don't
want to worry about, get rid of them, etc.
:param: begin_line (int): the index of the line where the caption begins
:param: begin_index (int): the index within the line where the caption
begins
:param: end_line (int): the index of the line where the caption ends
:param: end_index (int): the index within the line where the caption ends
:param: lines ([string, string, ...]): the line strings of the text
:return: caption (string): the caption, formatted and pieced together
"""
# stuff we don't like
label_head = '\\label{'
# reassemble that sucker
if end_line > begin_line:
# our caption spanned multiple lines
caption = lines[begin_line][begin_index:]
for included_line_index in range(begin_line + 1, end_line):
caption = caption + ' ' + lines[included_line_index]
caption = caption + ' ' + lines[end_line][:end_index]
caption = caption.replace('\n', ' ')
caption = caption.replace(' ', ' ')
else:
# it fit on one line
caption = lines[begin_line][begin_index:end_index]
# clean out a label tag, if there is one
label_begin = caption.find(label_head)
if label_begin > -1:
# we know that our caption is only one line, so if there's a label
# tag in it, it will be all on one line. so we make up some args
dummy_start, dummy_start_line, label_end, dummy_end = \
find_open_and_close_braces(0, label_begin, '{', [caption])
caption = caption[:label_begin] + caption[label_end + 1:]
caption = caption.strip()
if len(caption) > 1 and caption[0] == '{' and caption[-1] == '}':
caption = caption[1:-1]
return caption
def prepare_image_data(extracted_image_data, output_directory,
image_mapping):
"""Prepare and clean image-data from duplicates and other garbage.
:param: extracted_image_data ([(string, string, list, list) ...],
...])): the images and their captions + contexts, ordered
:param: tex_file (string): the location of the TeX (used for finding the
associated images; the TeX is assumed to be in the same directory
as the converted images)
:param: image_list ([string, string, ...]): a list of the converted
image file names
:return extracted_image_data ([(string, string, list, list) ...],
...])) again the list of image data cleaned for output
"""
img_list = {}
for image, caption, label in extracted_image_data:
if not image or image == 'ERROR':
continue
image_location = get_image_location(
image,
output_directory,
image_mapping.keys()
)
if not image_location or not os.path.exists(image_location) or \
len(image_location) < 3:
continue
image_location = os.path.normpath(image_location)
if image_location in img_list:
if caption not in img_list[image_location]['captions']:
img_list[image_location]['captions'].append(caption)
else:
img_list[image_location] = dict(
url=image_location,
original_url=image_mapping[image_location],
captions=[caption],
label=label,
name=get_name_from_path(image_location, output_directory)
)
return img_list.values()
def get_image_location(image, sdir, image_list, recurred=False):
"""Take a raw image name + directory and return the location of image.
:param: image (string): the name of the raw image from the TeX
:param: sdir (string): the directory where everything was unzipped to
:param: image_list ([string, string, ...]): the list of images that
were extracted from the tarball and possibly converted
:return: converted_image (string): the full path to the (possibly
converted) image file
"""
if isinstance(image, list):
# image is a list, not good
return None
image = image.encode('utf-8', 'ignore')
image = image.strip()
figure_or_file = '(figure=|file=)'
figure_or_file_in_image = re.findall(figure_or_file, image)
if len(figure_or_file_in_image) > 0:
image = image.replace(figure_or_file_in_image[0], '')
includegraphics = r'\\includegraphics{(.+)}'
includegraphics_in_image = re.findall(includegraphics, image)
if len(includegraphics_in_image) > 0:
image = includegraphics_in_image[0]
image = image.strip()
some_kind_of_tag = '\\\\\\w+ '
if image.startswith('./'):
image = image[2:]
if re.match(some_kind_of_tag, image):
image = image[len(image.split(' ')[0]) + 1:]
if image.startswith('='):
image = image[1:]
if len(image) == 1:
return None
image = image.strip()
converted_image_should_be = get_converted_image_name(image)
if image_list is None:
image_list = os.listdir(sdir)
for png_image in image_list:
png_image_rel = os.path.relpath(png_image, start=sdir)
if converted_image_should_be == png_image_rel:
return png_image
# maybe it's in a subfolder (TeX just understands that)
for prefix in ['eps', 'fig', 'figs', 'figures', 'figs', 'images']:
if os.path.isdir(os.path.join(sdir, prefix)):
image_list = os.listdir(os.path.join(sdir, prefix))
for png_image in image_list:
if converted_image_should_be == png_image:
return os.path.join(sdir, prefix, png_image)
# maybe it is actually just loose.
for png_image in os.listdir(sdir):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if os.path.isdir(os.path.join(sdir, png_image)):
# try that, too! we just do two levels, because that's all that's
# reasonable..
sub_dir = os.path.join(sdir, png_image)
for sub_dir_file in os.listdir(sub_dir):
if os.path.split(converted_image_should_be)[-1] == sub_dir_file: # noqa
return os.path.join(sub_dir, converted_image_should_be)
# maybe it's actually up a directory or two: this happens in nested
# tarballs where the TeX is stored in a different directory from the images
for png_image in os.listdir(os.path.split(sdir)[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
for png_image in os.listdir(os.path.split(os.path.split(sdir)[0])[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if recurred:
return None
# agh, this calls for drastic measures
for piece in image.split(' '):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split(','):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split('='):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
return None
def get_tex_location(new_tex_name, current_tex_name, recurred=False):
"""
Takes the name of a TeX file and attempts to match it to an actual file
in the tarball.
:param: new_tex_name (string): the name of the TeX file to find
:param: current_tex_name (string): the location of the TeX file where we
found the reference
:return: tex_location (string): the location of the other TeX file on
disk or None if it is not found
"""
tex_location = None
current_dir = os.path.split(current_tex_name)[0]
some_kind_of_tag = '\\\\\\w+ '
new_tex_name = new_tex_name.strip()
if new_tex_name.startswith('input'):
new_tex_name = new_tex_name[len('input'):]
if re.match(some_kind_of_tag, new_tex_name):
new_tex_name = new_tex_name[len(new_tex_name.split(' ')[0]) + 1:]
if new_tex_name.startswith('./'):
new_tex_name = new_tex_name[2:]
if len(new_tex_name) == 0:
return None
new_tex_name = new_tex_name.strip()
new_tex_file = os.path.split(new_tex_name)[-1]
new_tex_folder = os.path.split(new_tex_name)[0]
if new_tex_folder == new_tex_file:
new_tex_folder = ''
# could be in the current directory
for any_file in os.listdir(current_dir):
if any_file == new_tex_file:
return os.path.join(current_dir, new_tex_file)
# could be in a subfolder of the current directory
if os.path.isdir(os.path.join(current_dir, new_tex_folder)):
for any_file in os.listdir(os.path.join(current_dir, new_tex_folder)):
if any_file == new_tex_file:
return os.path.join(os.path.join(current_dir, new_tex_folder),
new_tex_file)
# could be in a subfolder of a higher directory
one_dir_up = os.path.join(os.path.split(current_dir)[0], new_tex_folder)
if os.path.isdir(one_dir_up):
for any_file in os.listdir(one_dir_up):
if any_file == new_tex_file:
return os.path.join(one_dir_up, new_tex_file)
two_dirs_up = os.path.join(os.path.split(os.path.split(current_dir)[0])[0],
new_tex_folder)
if os.path.isdir(two_dirs_up):
for any_file in os.listdir(two_dirs_up):
if any_file == new_tex_file:
return os.path.join(two_dirs_up, new_tex_file)
if tex_location is None and not recurred:
return get_tex_location(new_tex_name + '.tex', current_tex_name,
recurred=True)
return tex_location
def get_name_from_path(full_path, root_path):
"""Create a filename by merging path after root directory."""
relative_image_path = os.path.relpath(full_path, root_path)
return "_".join(relative_image_path.split('.')[:-1]).replace('/', '_')\
.replace(';', '').replace(':', '')
|
inspirehep/plotextractor | plotextractor/output_utils.py | get_tex_location | python | def get_tex_location(new_tex_name, current_tex_name, recurred=False):
tex_location = None
current_dir = os.path.split(current_tex_name)[0]
some_kind_of_tag = '\\\\\\w+ '
new_tex_name = new_tex_name.strip()
if new_tex_name.startswith('input'):
new_tex_name = new_tex_name[len('input'):]
if re.match(some_kind_of_tag, new_tex_name):
new_tex_name = new_tex_name[len(new_tex_name.split(' ')[0]) + 1:]
if new_tex_name.startswith('./'):
new_tex_name = new_tex_name[2:]
if len(new_tex_name) == 0:
return None
new_tex_name = new_tex_name.strip()
new_tex_file = os.path.split(new_tex_name)[-1]
new_tex_folder = os.path.split(new_tex_name)[0]
if new_tex_folder == new_tex_file:
new_tex_folder = ''
# could be in the current directory
for any_file in os.listdir(current_dir):
if any_file == new_tex_file:
return os.path.join(current_dir, new_tex_file)
# could be in a subfolder of the current directory
if os.path.isdir(os.path.join(current_dir, new_tex_folder)):
for any_file in os.listdir(os.path.join(current_dir, new_tex_folder)):
if any_file == new_tex_file:
return os.path.join(os.path.join(current_dir, new_tex_folder),
new_tex_file)
# could be in a subfolder of a higher directory
one_dir_up = os.path.join(os.path.split(current_dir)[0], new_tex_folder)
if os.path.isdir(one_dir_up):
for any_file in os.listdir(one_dir_up):
if any_file == new_tex_file:
return os.path.join(one_dir_up, new_tex_file)
two_dirs_up = os.path.join(os.path.split(os.path.split(current_dir)[0])[0],
new_tex_folder)
if os.path.isdir(two_dirs_up):
for any_file in os.listdir(two_dirs_up):
if any_file == new_tex_file:
return os.path.join(two_dirs_up, new_tex_file)
if tex_location is None and not recurred:
return get_tex_location(new_tex_name + '.tex', current_tex_name,
recurred=True)
return tex_location | Takes the name of a TeX file and attempts to match it to an actual file
in the tarball.
:param: new_tex_name (string): the name of the TeX file to find
:param: current_tex_name (string): the location of the TeX file where we
found the reference
:return: tex_location (string): the location of the other TeX file on
disk or None if it is not found | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/output_utils.py#L347-L412 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015, 2016 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import os
import re
def find_open_and_close_braces(line_index, start, brace, lines):
"""
Take the line where we want to start and the index where we want to start
and find the first instance of matched open and close braces of the same
type as brace in file file.
:param: line (int): the index of the line we want to start searching at
:param: start (int): the index in the line we want to start searching at
:param: brace (string): one of the type of brace we are looking for ({, },
[, or ])
:param lines ([string, string, ...]): the array of lines in the file we
are looking in.
:return: (start, start_line, end, end_line): (int, int, int): the index
of the start and end of whatever braces we are looking for, and the
line number that the end is on (since it may be different than the line
we started on)
"""
if brace in ['[', ']']:
open_brace = '['
close_brace = ']'
elif brace in ['{', '}']:
open_brace = '{'
close_brace = '}'
elif brace in ['(', ')']:
open_brace = '('
close_brace = ')'
else:
# unacceptable brace type!
return (-1, -1, -1, -1)
open_braces = []
line = lines[line_index]
ret_open_index = line.find(open_brace, start)
line_index_cpy = line_index
# sometimes people don't put the braces on the same line
# as the tag
while ret_open_index == -1:
line_index = line_index + 1
if line_index >= len(lines):
# failed to find open braces...
return (0, line_index_cpy, 0, line_index_cpy)
line = lines[line_index]
ret_open_index = line.find(open_brace)
open_braces.append(open_brace)
ret_open_line = line_index
open_index = ret_open_index
close_index = ret_open_index
while len(open_braces) > 0:
if open_index == -1 and close_index == -1:
# we hit the end of the line! oh, noez!
line_index = line_index + 1
if line_index >= len(lines):
# hanging braces!
return (ret_open_index, ret_open_line,
ret_open_index, ret_open_line)
line = lines[line_index]
# to not skip things that are at the beginning of the line
close_index = line.find(close_brace)
open_index = line.find(open_brace)
else:
if close_index != -1:
close_index = line.find(close_brace, close_index + 1)
if open_index != -1:
open_index = line.find(open_brace, open_index + 1)
if close_index != -1:
open_braces.pop()
if len(open_braces) == 0 and \
(open_index > close_index or open_index == -1):
break
if open_index != -1:
open_braces.append(open_brace)
ret_close_index = close_index
return (ret_open_index, ret_open_line, ret_close_index, line_index)
def assemble_caption(begin_line, begin_index, end_line, end_index, lines):
"""
Take the caption of a picture and put it all together
in a nice way. If it spans multiple lines, put it on one line. If it
contains controlled characters, strip them out. If it has tags we don't
want to worry about, get rid of them, etc.
:param: begin_line (int): the index of the line where the caption begins
:param: begin_index (int): the index within the line where the caption
begins
:param: end_line (int): the index of the line where the caption ends
:param: end_index (int): the index within the line where the caption ends
:param: lines ([string, string, ...]): the line strings of the text
:return: caption (string): the caption, formatted and pieced together
"""
# stuff we don't like
label_head = '\\label{'
# reassemble that sucker
if end_line > begin_line:
# our caption spanned multiple lines
caption = lines[begin_line][begin_index:]
for included_line_index in range(begin_line + 1, end_line):
caption = caption + ' ' + lines[included_line_index]
caption = caption + ' ' + lines[end_line][:end_index]
caption = caption.replace('\n', ' ')
caption = caption.replace(' ', ' ')
else:
# it fit on one line
caption = lines[begin_line][begin_index:end_index]
# clean out a label tag, if there is one
label_begin = caption.find(label_head)
if label_begin > -1:
# we know that our caption is only one line, so if there's a label
# tag in it, it will be all on one line. so we make up some args
dummy_start, dummy_start_line, label_end, dummy_end = \
find_open_and_close_braces(0, label_begin, '{', [caption])
caption = caption[:label_begin] + caption[label_end + 1:]
caption = caption.strip()
if len(caption) > 1 and caption[0] == '{' and caption[-1] == '}':
caption = caption[1:-1]
return caption
def prepare_image_data(extracted_image_data, output_directory,
image_mapping):
"""Prepare and clean image-data from duplicates and other garbage.
:param: extracted_image_data ([(string, string, list, list) ...],
...])): the images and their captions + contexts, ordered
:param: tex_file (string): the location of the TeX (used for finding the
associated images; the TeX is assumed to be in the same directory
as the converted images)
:param: image_list ([string, string, ...]): a list of the converted
image file names
:return extracted_image_data ([(string, string, list, list) ...],
...])) again the list of image data cleaned for output
"""
img_list = {}
for image, caption, label in extracted_image_data:
if not image or image == 'ERROR':
continue
image_location = get_image_location(
image,
output_directory,
image_mapping.keys()
)
if not image_location or not os.path.exists(image_location) or \
len(image_location) < 3:
continue
image_location = os.path.normpath(image_location)
if image_location in img_list:
if caption not in img_list[image_location]['captions']:
img_list[image_location]['captions'].append(caption)
else:
img_list[image_location] = dict(
url=image_location,
original_url=image_mapping[image_location],
captions=[caption],
label=label,
name=get_name_from_path(image_location, output_directory)
)
return img_list.values()
def get_image_location(image, sdir, image_list, recurred=False):
"""Take a raw image name + directory and return the location of image.
:param: image (string): the name of the raw image from the TeX
:param: sdir (string): the directory where everything was unzipped to
:param: image_list ([string, string, ...]): the list of images that
were extracted from the tarball and possibly converted
:return: converted_image (string): the full path to the (possibly
converted) image file
"""
if isinstance(image, list):
# image is a list, not good
return None
image = image.encode('utf-8', 'ignore')
image = image.strip()
figure_or_file = '(figure=|file=)'
figure_or_file_in_image = re.findall(figure_or_file, image)
if len(figure_or_file_in_image) > 0:
image = image.replace(figure_or_file_in_image[0], '')
includegraphics = r'\\includegraphics{(.+)}'
includegraphics_in_image = re.findall(includegraphics, image)
if len(includegraphics_in_image) > 0:
image = includegraphics_in_image[0]
image = image.strip()
some_kind_of_tag = '\\\\\\w+ '
if image.startswith('./'):
image = image[2:]
if re.match(some_kind_of_tag, image):
image = image[len(image.split(' ')[0]) + 1:]
if image.startswith('='):
image = image[1:]
if len(image) == 1:
return None
image = image.strip()
converted_image_should_be = get_converted_image_name(image)
if image_list is None:
image_list = os.listdir(sdir)
for png_image in image_list:
png_image_rel = os.path.relpath(png_image, start=sdir)
if converted_image_should_be == png_image_rel:
return png_image
# maybe it's in a subfolder (TeX just understands that)
for prefix in ['eps', 'fig', 'figs', 'figures', 'figs', 'images']:
if os.path.isdir(os.path.join(sdir, prefix)):
image_list = os.listdir(os.path.join(sdir, prefix))
for png_image in image_list:
if converted_image_should_be == png_image:
return os.path.join(sdir, prefix, png_image)
# maybe it is actually just loose.
for png_image in os.listdir(sdir):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if os.path.isdir(os.path.join(sdir, png_image)):
# try that, too! we just do two levels, because that's all that's
# reasonable..
sub_dir = os.path.join(sdir, png_image)
for sub_dir_file in os.listdir(sub_dir):
if os.path.split(converted_image_should_be)[-1] == sub_dir_file: # noqa
return os.path.join(sub_dir, converted_image_should_be)
# maybe it's actually up a directory or two: this happens in nested
# tarballs where the TeX is stored in a different directory from the images
for png_image in os.listdir(os.path.split(sdir)[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
for png_image in os.listdir(os.path.split(os.path.split(sdir)[0])[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if recurred:
return None
# agh, this calls for drastic measures
for piece in image.split(' '):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split(','):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split('='):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
return None
def get_converted_image_name(image):
"""Return the name of the image after it has been converted to png format.
Strips off the old extension.
:param: image (string): The fullpath of the image before conversion
:return: converted_image (string): the fullpath of the image after convert
"""
png_extension = '.png'
if image[(0 - len(png_extension)):] == png_extension:
# it already ends in png! we're golden
return image
img_dir = os.path.split(image)[0]
image = os.path.split(image)[-1]
# cut off the old extension
if len(image.split('.')) > 1:
old_extension = '.' + image.split('.')[-1]
converted_image = image[:(0 - len(old_extension))] + png_extension
else:
# no extension... damn
converted_image = image + png_extension
return os.path.join(img_dir, converted_image)
def get_name_from_path(full_path, root_path):
"""Create a filename by merging path after root directory."""
relative_image_path = os.path.relpath(full_path, root_path)
return "_".join(relative_image_path.split('.')[:-1]).replace('/', '_')\
.replace(';', '').replace(':', '')
|
inspirehep/plotextractor | plotextractor/output_utils.py | get_name_from_path | python | def get_name_from_path(full_path, root_path):
relative_image_path = os.path.relpath(full_path, root_path)
return "_".join(relative_image_path.split('.')[:-1]).replace('/', '_')\
.replace(';', '').replace(':', '') | Create a filename by merging path after root directory. | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/output_utils.py#L415-L419 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2014, 2015, 2016 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import os
import re
def find_open_and_close_braces(line_index, start, brace, lines):
"""
Take the line where we want to start and the index where we want to start
and find the first instance of matched open and close braces of the same
type as brace in file file.
:param: line (int): the index of the line we want to start searching at
:param: start (int): the index in the line we want to start searching at
:param: brace (string): one of the type of brace we are looking for ({, },
[, or ])
:param lines ([string, string, ...]): the array of lines in the file we
are looking in.
:return: (start, start_line, end, end_line): (int, int, int): the index
of the start and end of whatever braces we are looking for, and the
line number that the end is on (since it may be different than the line
we started on)
"""
if brace in ['[', ']']:
open_brace = '['
close_brace = ']'
elif brace in ['{', '}']:
open_brace = '{'
close_brace = '}'
elif brace in ['(', ')']:
open_brace = '('
close_brace = ')'
else:
# unacceptable brace type!
return (-1, -1, -1, -1)
open_braces = []
line = lines[line_index]
ret_open_index = line.find(open_brace, start)
line_index_cpy = line_index
# sometimes people don't put the braces on the same line
# as the tag
while ret_open_index == -1:
line_index = line_index + 1
if line_index >= len(lines):
# failed to find open braces...
return (0, line_index_cpy, 0, line_index_cpy)
line = lines[line_index]
ret_open_index = line.find(open_brace)
open_braces.append(open_brace)
ret_open_line = line_index
open_index = ret_open_index
close_index = ret_open_index
while len(open_braces) > 0:
if open_index == -1 and close_index == -1:
# we hit the end of the line! oh, noez!
line_index = line_index + 1
if line_index >= len(lines):
# hanging braces!
return (ret_open_index, ret_open_line,
ret_open_index, ret_open_line)
line = lines[line_index]
# to not skip things that are at the beginning of the line
close_index = line.find(close_brace)
open_index = line.find(open_brace)
else:
if close_index != -1:
close_index = line.find(close_brace, close_index + 1)
if open_index != -1:
open_index = line.find(open_brace, open_index + 1)
if close_index != -1:
open_braces.pop()
if len(open_braces) == 0 and \
(open_index > close_index or open_index == -1):
break
if open_index != -1:
open_braces.append(open_brace)
ret_close_index = close_index
return (ret_open_index, ret_open_line, ret_close_index, line_index)
def assemble_caption(begin_line, begin_index, end_line, end_index, lines):
"""
Take the caption of a picture and put it all together
in a nice way. If it spans multiple lines, put it on one line. If it
contains controlled characters, strip them out. If it has tags we don't
want to worry about, get rid of them, etc.
:param: begin_line (int): the index of the line where the caption begins
:param: begin_index (int): the index within the line where the caption
begins
:param: end_line (int): the index of the line where the caption ends
:param: end_index (int): the index within the line where the caption ends
:param: lines ([string, string, ...]): the line strings of the text
:return: caption (string): the caption, formatted and pieced together
"""
# stuff we don't like
label_head = '\\label{'
# reassemble that sucker
if end_line > begin_line:
# our caption spanned multiple lines
caption = lines[begin_line][begin_index:]
for included_line_index in range(begin_line + 1, end_line):
caption = caption + ' ' + lines[included_line_index]
caption = caption + ' ' + lines[end_line][:end_index]
caption = caption.replace('\n', ' ')
caption = caption.replace(' ', ' ')
else:
# it fit on one line
caption = lines[begin_line][begin_index:end_index]
# clean out a label tag, if there is one
label_begin = caption.find(label_head)
if label_begin > -1:
# we know that our caption is only one line, so if there's a label
# tag in it, it will be all on one line. so we make up some args
dummy_start, dummy_start_line, label_end, dummy_end = \
find_open_and_close_braces(0, label_begin, '{', [caption])
caption = caption[:label_begin] + caption[label_end + 1:]
caption = caption.strip()
if len(caption) > 1 and caption[0] == '{' and caption[-1] == '}':
caption = caption[1:-1]
return caption
def prepare_image_data(extracted_image_data, output_directory,
image_mapping):
"""Prepare and clean image-data from duplicates and other garbage.
:param: extracted_image_data ([(string, string, list, list) ...],
...])): the images and their captions + contexts, ordered
:param: tex_file (string): the location of the TeX (used for finding the
associated images; the TeX is assumed to be in the same directory
as the converted images)
:param: image_list ([string, string, ...]): a list of the converted
image file names
:return extracted_image_data ([(string, string, list, list) ...],
...])) again the list of image data cleaned for output
"""
img_list = {}
for image, caption, label in extracted_image_data:
if not image or image == 'ERROR':
continue
image_location = get_image_location(
image,
output_directory,
image_mapping.keys()
)
if not image_location or not os.path.exists(image_location) or \
len(image_location) < 3:
continue
image_location = os.path.normpath(image_location)
if image_location in img_list:
if caption not in img_list[image_location]['captions']:
img_list[image_location]['captions'].append(caption)
else:
img_list[image_location] = dict(
url=image_location,
original_url=image_mapping[image_location],
captions=[caption],
label=label,
name=get_name_from_path(image_location, output_directory)
)
return img_list.values()
def get_image_location(image, sdir, image_list, recurred=False):
"""Take a raw image name + directory and return the location of image.
:param: image (string): the name of the raw image from the TeX
:param: sdir (string): the directory where everything was unzipped to
:param: image_list ([string, string, ...]): the list of images that
were extracted from the tarball and possibly converted
:return: converted_image (string): the full path to the (possibly
converted) image file
"""
if isinstance(image, list):
# image is a list, not good
return None
image = image.encode('utf-8', 'ignore')
image = image.strip()
figure_or_file = '(figure=|file=)'
figure_or_file_in_image = re.findall(figure_or_file, image)
if len(figure_or_file_in_image) > 0:
image = image.replace(figure_or_file_in_image[0], '')
includegraphics = r'\\includegraphics{(.+)}'
includegraphics_in_image = re.findall(includegraphics, image)
if len(includegraphics_in_image) > 0:
image = includegraphics_in_image[0]
image = image.strip()
some_kind_of_tag = '\\\\\\w+ '
if image.startswith('./'):
image = image[2:]
if re.match(some_kind_of_tag, image):
image = image[len(image.split(' ')[0]) + 1:]
if image.startswith('='):
image = image[1:]
if len(image) == 1:
return None
image = image.strip()
converted_image_should_be = get_converted_image_name(image)
if image_list is None:
image_list = os.listdir(sdir)
for png_image in image_list:
png_image_rel = os.path.relpath(png_image, start=sdir)
if converted_image_should_be == png_image_rel:
return png_image
# maybe it's in a subfolder (TeX just understands that)
for prefix in ['eps', 'fig', 'figs', 'figures', 'figs', 'images']:
if os.path.isdir(os.path.join(sdir, prefix)):
image_list = os.listdir(os.path.join(sdir, prefix))
for png_image in image_list:
if converted_image_should_be == png_image:
return os.path.join(sdir, prefix, png_image)
# maybe it is actually just loose.
for png_image in os.listdir(sdir):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if os.path.isdir(os.path.join(sdir, png_image)):
# try that, too! we just do two levels, because that's all that's
# reasonable..
sub_dir = os.path.join(sdir, png_image)
for sub_dir_file in os.listdir(sub_dir):
if os.path.split(converted_image_should_be)[-1] == sub_dir_file: # noqa
return os.path.join(sub_dir, converted_image_should_be)
# maybe it's actually up a directory or two: this happens in nested
# tarballs where the TeX is stored in a different directory from the images
for png_image in os.listdir(os.path.split(sdir)[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
for png_image in os.listdir(os.path.split(os.path.split(sdir)[0])[0]):
if os.path.split(converted_image_should_be)[-1] == png_image:
return converted_image_should_be
if recurred:
return None
# agh, this calls for drastic measures
for piece in image.split(' '):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split(','):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
for piece in image.split('='):
res = get_image_location(piece, sdir, image_list, recurred=True)
if res is not None:
return res
return None
def get_converted_image_name(image):
"""Return the name of the image after it has been converted to png format.
Strips off the old extension.
:param: image (string): The fullpath of the image before conversion
:return: converted_image (string): the fullpath of the image after convert
"""
png_extension = '.png'
if image[(0 - len(png_extension)):] == png_extension:
# it already ends in png! we're golden
return image
img_dir = os.path.split(image)[0]
image = os.path.split(image)[-1]
# cut off the old extension
if len(image.split('.')) > 1:
old_extension = '.' + image.split('.')[-1]
converted_image = image[:(0 - len(old_extension))] + png_extension
else:
# no extension... damn
converted_image = image + png_extension
return os.path.join(img_dir, converted_image)
def get_tex_location(new_tex_name, current_tex_name, recurred=False):
"""
Takes the name of a TeX file and attempts to match it to an actual file
in the tarball.
:param: new_tex_name (string): the name of the TeX file to find
:param: current_tex_name (string): the location of the TeX file where we
found the reference
:return: tex_location (string): the location of the other TeX file on
disk or None if it is not found
"""
tex_location = None
current_dir = os.path.split(current_tex_name)[0]
some_kind_of_tag = '\\\\\\w+ '
new_tex_name = new_tex_name.strip()
if new_tex_name.startswith('input'):
new_tex_name = new_tex_name[len('input'):]
if re.match(some_kind_of_tag, new_tex_name):
new_tex_name = new_tex_name[len(new_tex_name.split(' ')[0]) + 1:]
if new_tex_name.startswith('./'):
new_tex_name = new_tex_name[2:]
if len(new_tex_name) == 0:
return None
new_tex_name = new_tex_name.strip()
new_tex_file = os.path.split(new_tex_name)[-1]
new_tex_folder = os.path.split(new_tex_name)[0]
if new_tex_folder == new_tex_file:
new_tex_folder = ''
# could be in the current directory
for any_file in os.listdir(current_dir):
if any_file == new_tex_file:
return os.path.join(current_dir, new_tex_file)
# could be in a subfolder of the current directory
if os.path.isdir(os.path.join(current_dir, new_tex_folder)):
for any_file in os.listdir(os.path.join(current_dir, new_tex_folder)):
if any_file == new_tex_file:
return os.path.join(os.path.join(current_dir, new_tex_folder),
new_tex_file)
# could be in a subfolder of a higher directory
one_dir_up = os.path.join(os.path.split(current_dir)[0], new_tex_folder)
if os.path.isdir(one_dir_up):
for any_file in os.listdir(one_dir_up):
if any_file == new_tex_file:
return os.path.join(one_dir_up, new_tex_file)
two_dirs_up = os.path.join(os.path.split(os.path.split(current_dir)[0])[0],
new_tex_folder)
if os.path.isdir(two_dirs_up):
for any_file in os.listdir(two_dirs_up):
if any_file == new_tex_file:
return os.path.join(two_dirs_up, new_tex_file)
if tex_location is None and not recurred:
return get_tex_location(new_tex_name + '.tex', current_tex_name,
recurred=True)
return tex_location
|
inspirehep/plotextractor | plotextractor/api.py | process_tarball | python | def process_tarball(tarball, output_directory=None, context=False):
if not output_directory:
# No directory given, so we use the same path as the tarball
output_directory = os.path.abspath("{0}_files".format(tarball))
extracted_files_list = untar(tarball, output_directory)
image_list, tex_files = detect_images_and_tex(extracted_files_list)
if tex_files == [] or tex_files is None:
raise NoTexFilesFound("No TeX files found in {0}".format(tarball))
converted_image_mapping = convert_images(image_list)
return map_images_in_tex(
tex_files,
converted_image_mapping,
output_directory,
context
) | Process one tarball end-to-end.
If output directory is given, the tarball will be extracted there.
Otherwise, it will extract it in a folder next to the tarball file.
The function returns a list of dictionaries:
.. code-block:: python
[{
'url': '/path/to/tarball_files/d15-120f3d.png',
'captions': ['The $\\rho^0$ meson properties: (a) Mass ...'],
'name': 'd15-120f3d',
'label': 'fig:mass'
}, ... ]
:param: tarball (string): the absolute location of the tarball we wish
to process
:param: output_directory (string): path of file processing and extraction
(optional)
:param: context: if True, also try to extract context where images are
referenced in the text. (optional)
:return: images(list): list of dictionaries for each image with captions. | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/api.py#L42-L83 | [
"def convert_images(image_list, image_format=\"png\", timeout=20):\n \"\"\"Convert images from list of images to given format, if needed.\n\n Figure out the types of the images that were extracted from\n the tarball and determine how to convert them into PNG.\n\n :param: image_list ([string, string, ...]): the list of image files\n extracted from the tarball in step 1\n :param: image_format (string): which image format to convert to.\n (PNG by default)\n :param: timeout (int): the timeout value on shell commands.\n\n :return: image_mapping ({new_image: original_image, ...]): The mapping of\n image files when all have been converted to PNG format.\n \"\"\"\n png_output_contains = 'PNG image'\n image_mapping = {}\n for image_file in image_list:\n if os.path.isdir(image_file):\n continue\n\n if not os.path.exists(image_file):\n continue\n\n cmd_out = check_output(['file', image_file], timeout=timeout)\n if cmd_out.find(png_output_contains) > -1:\n # Already PNG\n image_mapping[image_file] = image_file\n else:\n # we're just going to assume that ImageMagick can convert all\n # the image types that we may be faced with\n # for sure it can do EPS->PNG and JPG->PNG and PS->PNG\n # and PSTEX->PNG\n converted_image_file = get_converted_image_name(image_file)\n try:\n convert_image(image_file, converted_image_file, image_format)\n except (MissingDelegateError, ResourceLimitError):\n # Too bad, cannot convert image format.\n continue\n if os.path.exists(converted_image_file):\n image_mapping[converted_image_file] = image_file\n\n return image_mapping\n",
"def untar(original_tarball, output_directory):\n \"\"\"Untar given tarball file into directory.\n\n Here we decide if our file is actually a tarball, then\n we untar it and return a list of extracted files.\n\n :param: tarball (string): the name of the tar file from arXiv\n :param: output_directory (string): the directory to untar in\n\n :return: list of absolute file paths\n \"\"\"\n if not tarfile.is_tarfile(original_tarball):\n raise InvalidTarball\n\n tarball = tarfile.open(original_tarball)\n # set mtimes of members to now\n epochsecs = int(time())\n for member in tarball.getmembers():\n member.mtime = epochsecs\n tarball.extractall(output_directory)\n\n file_list = []\n\n for extracted_file in tarball.getnames():\n if extracted_file == '':\n break\n if extracted_file.startswith('./'):\n extracted_file = extracted_file[2:]\n # ensure we are actually looking at the right file\n extracted_file = os.path.join(output_directory, extracted_file)\n\n # Add to full list of extracted files\n file_list.append(extracted_file)\n\n return file_list\n",
"def detect_images_and_tex(\n file_list,\n allowed_image_types=('eps', 'png', 'ps', 'jpg', 'pdf'),\n timeout=20):\n \"\"\"Detect from a list of files which are TeX or images.\n\n :param: file_list (list): list of absolute file paths\n :param: allowed_image_types (list): list of allows image formats\n :param: timeout (int): the timeout value on shell commands.\n\n :return: (image_list, tex_file) (([string, string, ...], string)):\n list of images in the tarball and the name of the TeX file in the\n tarball.\n \"\"\"\n tex_file_extension = 'tex'\n\n image_list = []\n might_be_tex = []\n\n for extracted_file in file_list:\n # Ignore directories and hidden (metadata) files\n if os.path.isdir(extracted_file) \\\n or os.path.basename(extracted_file).startswith('.'):\n continue\n\n magic_str = magic.from_file(extracted_file, mime=True)\n\n if magic_str == \"application/x-tex\":\n might_be_tex.append(extracted_file)\n elif magic_str.startswith('image/') \\\n or magic_str == \"application/postscript\":\n image_list.append(extracted_file)\n\n # If neither, maybe it is TeX or an image anyway, otherwise,\n # we don't care.\n else:\n _, dotted_file_extension = os.path.splitext(extracted_file)\n file_extension = dotted_file_extension[1:]\n\n if file_extension == tex_file_extension:\n might_be_tex.append(extracted_file)\n elif file_extension in allowed_image_types:\n image_list.append(extracted_file)\n\n return image_list, might_be_tex\n",
"def map_images_in_tex(tex_files, image_mapping,\n output_directory, context=False):\n \"\"\"Return caption and context for image references found in TeX sources.\"\"\"\n extracted_image_data = []\n for tex_file in tex_files:\n # Extract images, captions and labels based on tex file and images\n partly_extracted_image_data = extract_captions(\n tex_file,\n output_directory,\n image_mapping.keys()\n )\n if partly_extracted_image_data:\n # Convert to dict, add proper filepaths and do various cleaning\n cleaned_image_data = prepare_image_data(\n partly_extracted_image_data,\n output_directory,\n image_mapping,\n )\n if context:\n # Using prev. extracted info, get contexts for each image found\n extract_context(tex_file, cleaned_image_data)\n\n extracted_image_data.extend(cleaned_image_data)\n\n return extracted_image_data\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2015 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""API for plotextractor utility."""
from __future__ import absolute_import, print_function
import os
from .extractor import (
extract_captions,
extract_context,
)
from .converter import convert_images, untar, detect_images_and_tex
from .output_utils import (
prepare_image_data,
)
from .errors import NoTexFilesFound
def map_images_in_tex(tex_files, image_mapping,
output_directory, context=False):
"""Return caption and context for image references found in TeX sources."""
extracted_image_data = []
for tex_file in tex_files:
# Extract images, captions and labels based on tex file and images
partly_extracted_image_data = extract_captions(
tex_file,
output_directory,
image_mapping.keys()
)
if partly_extracted_image_data:
# Convert to dict, add proper filepaths and do various cleaning
cleaned_image_data = prepare_image_data(
partly_extracted_image_data,
output_directory,
image_mapping,
)
if context:
# Using prev. extracted info, get contexts for each image found
extract_context(tex_file, cleaned_image_data)
extracted_image_data.extend(cleaned_image_data)
return extracted_image_data
|
inspirehep/plotextractor | plotextractor/api.py | map_images_in_tex | python | def map_images_in_tex(tex_files, image_mapping,
output_directory, context=False):
extracted_image_data = []
for tex_file in tex_files:
# Extract images, captions and labels based on tex file and images
partly_extracted_image_data = extract_captions(
tex_file,
output_directory,
image_mapping.keys()
)
if partly_extracted_image_data:
# Convert to dict, add proper filepaths and do various cleaning
cleaned_image_data = prepare_image_data(
partly_extracted_image_data,
output_directory,
image_mapping,
)
if context:
# Using prev. extracted info, get contexts for each image found
extract_context(tex_file, cleaned_image_data)
extracted_image_data.extend(cleaned_image_data)
return extracted_image_data | Return caption and context for image references found in TeX sources. | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/api.py#L86-L110 | [
"def extract_captions(tex_file, sdir, image_list, primary=True):\n \"\"\"Extract captions.\n\n Take the TeX file and the list of images in the tarball (which all,\n presumably, are used in the TeX file) and figure out which captions\n in the text are associated with which images\n :param: lines (list): list of lines of the TeX file\n\n :param: tex_file (string): the name of the TeX file which mentions\n the images\n :param: sdir (string): path to current sub-directory\n :param: image_list (list): list of images in tarball\n :param: primary (bool): is this the primary call to extract_caption?\n\n :return: images_and_captions_and_labels ([(string, string, list),\n (string, string, list), ...]):\n a list of tuples representing the names of images and their\n corresponding figure labels from the TeX file\n \"\"\"\n if os.path.isdir(tex_file) or not os.path.exists(tex_file):\n return []\n\n lines = get_lines_from_file(tex_file)\n\n # possible figure lead-ins\n figure_head = u'\\\\begin{figure' # also matches figure*\n figure_wrap_head = u'\\\\begin{wrapfigure'\n figure_tail = u'\\\\end{figure' # also matches figure*\n figure_wrap_tail = u'\\\\end{wrapfigure'\n picture_head = u'\\\\begin{picture}'\n displaymath_head = u'\\\\begin{displaymath}'\n subfloat_head = u'\\\\subfloat'\n subfig_head = u'\\\\subfigure'\n includegraphics_head = u'\\\\includegraphics'\n epsfig_head = u'\\\\epsfig'\n input_head = u'\\\\input'\n # possible caption lead-ins\n caption_head = u'\\\\caption'\n figcaption_head = u'\\\\figcaption'\n label_head = u'\\\\label'\n rotate = u'rotate='\n angle = u'angle='\n eps_tail = u'.eps'\n ps_tail = u'.ps'\n\n doc_head = u'\\\\begin{document}'\n doc_tail = u'\\\\end{document}'\n\n extracted_image_data = []\n cur_image = ''\n caption = ''\n labels = []\n active_label = \"\"\n\n # cut out shit before the doc head\n if primary:\n for line_index in range(len(lines)):\n if lines[line_index].find(doc_head) < 0:\n lines[line_index] = ''\n else:\n break\n\n # are we using commas in filenames here?\n commas_okay = False\n\n for dummy1, dummy2, filenames in \\\n os.walk(os.path.split(os.path.split(tex_file)[0])[0]):\n for filename in filenames:\n if filename.find(',') > -1:\n commas_okay = True\n break\n\n # a comment is a % not preceded by a \\\n comment = re.compile(\"(?<!\\\\\\\\)%\")\n\n for line_index in range(len(lines)):\n # get rid of pesky comments by splitting where the comment is\n # and keeping only the part before the %\n line = comment.split(lines[line_index])[0]\n line = line.strip()\n lines[line_index] = line\n\n in_figure_tag = 0\n\n for line_index in range(len(lines)):\n line = lines[line_index]\n\n if line == '':\n continue\n if line.find(doc_tail) > -1:\n break\n\n \"\"\"\n FIGURE -\n structure of a figure:\n \\begin{figure}\n \\formatting...\n \\includegraphics[someoptions]{FILENAME}\n \\caption{CAPTION} %caption and includegraphics may be switched!\n \\end{figure}\n \"\"\"\n\n index = max([line.find(figure_head), line.find(figure_wrap_head)])\n if index > -1:\n in_figure_tag = 1\n # some punks don't like to put things in the figure tag. so we\n # just want to see if there is anything that is sitting outside\n # of it when we find it\n cur_image, caption, extracted_image_data = put_it_together(\n cur_image, caption,\n active_label, extracted_image_data,\n line_index, lines)\n\n # here, you jerks, just make it so that it's fecking impossible to\n # figure out your damn inclusion types\n\n index = max([line.find(eps_tail), line.find(ps_tail),\n line.find(epsfig_head)])\n if index > -1:\n if line.find(eps_tail) > -1 or line.find(ps_tail) > -1:\n ext = True\n else:\n ext = False\n filenames = intelligently_find_filenames(line, ext=ext,\n commas_okay=commas_okay)\n\n # try to look ahead! sometimes there are better matches after\n if line_index < len(lines) - 1:\n filenames.extend(intelligently_find_filenames(\n lines[line_index + 1],\n commas_okay=commas_okay))\n if line_index < len(lines) - 2:\n filenames.extend(intelligently_find_filenames(\n lines[line_index + 2],\n commas_okay=commas_okay))\n\n for filename in filenames:\n filename = filename.encode('utf-8', 'ignore')\n if cur_image == '':\n cur_image = filename\n elif type(cur_image) == list:\n if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:\n cur_image[SUB_CAPTION_OR_IMAGE].append(filename)\n else:\n cur_image[SUB_CAPTION_OR_IMAGE] = [filename]\n else:\n cur_image = ['', [cur_image, filename]]\n\n \"\"\"\n Rotate and angle\n \"\"\"\n index = max(line.find(rotate), line.find(angle))\n if index > -1:\n # which is the image associated to it?\n filenames = intelligently_find_filenames(line,\n commas_okay=commas_okay)\n # try the line after and the line before\n if line_index + 1 < len(lines):\n filenames.extend(intelligently_find_filenames(\n lines[line_index + 1],\n commas_okay=commas_okay))\n if line_index > 1:\n filenames.extend(intelligently_find_filenames(\n lines[line_index - 1],\n commas_okay=commas_okay))\n already_tried = []\n for filename in filenames:\n if filename != 'ERROR' and filename not in already_tried:\n if rotate_image(filename, line, sdir, image_list):\n break\n already_tried.append(filename)\n\n \"\"\"\n INCLUDEGRAPHICS -\n structure of includegraphics:\n \\includegraphics[someoptions]{FILENAME}\n \"\"\"\n index = line.find(includegraphics_head)\n if index > -1:\n open_curly, open_curly_line, close_curly, dummy = \\\n find_open_and_close_braces(line_index, index, '{', lines)\n filename = lines[open_curly_line][open_curly + 1:close_curly]\n if cur_image == '':\n cur_image = filename\n elif type(cur_image) == list:\n if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:\n cur_image[SUB_CAPTION_OR_IMAGE].append(filename)\n else:\n cur_image[SUB_CAPTION_OR_IMAGE] = [filename]\n else:\n cur_image = ['', [cur_image, filename]]\n\n \"\"\"\n {\\input{FILENAME}}\n \\caption{CAPTION}\n\n This input is ambiguous, since input is also used for things like\n inclusion of data from other LaTeX files directly.\n \"\"\"\n index = line.find(input_head)\n if index > -1:\n new_tex_names = intelligently_find_filenames(\n line, TeX=True,\n commas_okay=commas_okay)\n for new_tex_name in new_tex_names:\n if new_tex_name != 'ERROR':\n new_tex_file = get_tex_location(new_tex_name, tex_file)\n if new_tex_file and primary: # to kill recursion\n extracted_image_data.extend(extract_captions(\n new_tex_file, sdir,\n image_list,\n primary=False\n ))\n\n \"\"\"PICTURE\"\"\"\n\n index = line.find(picture_head)\n if index > -1:\n # structure of a picture:\n # \\begin{picture}\n # ....not worrying about this now\n # print('found picture tag')\n # FIXME\n pass\n\n \"\"\"DISPLAYMATH\"\"\"\n\n index = line.find(displaymath_head)\n if index > -1:\n # structure of a displaymath:\n # \\begin{displaymath}\n # ....not worrying about this now\n # print('found displaymath tag')\n # FIXME\n pass\n\n \"\"\"\n CAPTIONS -\n structure of a caption:\n \\caption[someoptions]{CAPTION}\n or\n \\caption{CAPTION}\n or\n \\caption{{options}{CAPTION}}\n \"\"\"\n\n index = max([line.find(caption_head), line.find(figcaption_head)])\n if index > -1:\n open_curly, open_curly_line, close_curly, close_curly_line = \\\n find_open_and_close_braces(line_index, index, '{', lines)\n\n cap_begin = open_curly + 1\n\n cur_caption = assemble_caption(\n open_curly_line, cap_begin,\n close_curly_line, close_curly, lines)\n\n if caption == '':\n caption = cur_caption\n elif type(caption) == list:\n if type(caption[SUB_CAPTION_OR_IMAGE]) == list:\n caption[SUB_CAPTION_OR_IMAGE].append(cur_caption)\n else:\n caption[SUB_CAPTION_OR_IMAGE] = [cur_caption]\n elif caption != cur_caption:\n caption = ['', [caption, cur_caption]]\n\n \"\"\"\n SUBFLOATS -\n structure of a subfloat (inside of a figure tag):\n \\subfloat[CAPTION]{options{FILENAME}}\n\n also associated with the overall caption of the enclosing figure\n \"\"\"\n\n index = line.find(subfloat_head)\n if index > -1:\n # if we are dealing with subfloats, we need a different\n # sort of structure to keep track of captions and subcaptions\n if not isinstance(cur_image, list):\n cur_image = [cur_image, []]\n if not isinstance(caption, list):\n caption = [caption, []]\n\n open_square, open_square_line, close_square, close_square_line = \\\n find_open_and_close_braces(line_index, index, '[', lines)\n cap_begin = open_square + 1\n\n sub_caption = assemble_caption(\n open_square_line,\n cap_begin, close_square_line, close_square, lines)\n caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)\n\n open_curly, open_curly_line, close_curly, dummy = \\\n find_open_and_close_braces(close_square_line,\n close_square, '{', lines)\n sub_image = lines[open_curly_line][open_curly + 1:close_curly]\n\n cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)\n\n \"\"\"\n SUBFIGURES -\n structure of a subfigure (inside a figure tag):\n \\subfigure[CAPTION]{\n \\includegraphics[options]{FILENAME}}\n\n also associated with the overall caption of the enclosing figure\n \"\"\"\n\n index = line.find(subfig_head)\n if index > -1:\n # like with subfloats, we need a different structure for keepin\n # track of this stuff\n if type(cur_image) != list:\n cur_image = [cur_image, []]\n if type(caption) != list:\n caption = [caption, []]\n\n open_square, open_square_line, close_square, close_square_line = \\\n find_open_and_close_braces(line_index, index, '[', lines)\n cap_begin = open_square + 1\n\n sub_caption = assemble_caption(open_square_line,\n cap_begin, close_square_line,\n close_square, lines)\n caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)\n\n index_cpy = index\n\n # find the graphics tag to get the filename\n # it is okay if we eat lines here\n index = line.find(includegraphics_head)\n while index == -1 and (line_index + 1) < len(lines):\n line_index += 1\n line = lines[line_index]\n index = line.find(includegraphics_head)\n if line_index == len(lines):\n # didn't find the image name on line\n line_index = index_cpy\n\n open_curly, open_curly_line, close_curly, dummy = \\\n find_open_and_close_braces(line_index,\n index, '{', lines)\n sub_image = lines[open_curly_line][open_curly + 1:close_curly]\n\n cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)\n\n \"\"\"\n LABELS -\n structure of a label:\n \\label{somelabelnamewhichprobablyincludesacolon}\n\n Labels are used to tag images and will later be used in ref tags\n to reference them. This is interesting because in effect the refs\n to a plot are additional caption for it.\n\n Notes: labels can be used for many more things than just plots.\n We'll have to experiment with how to best associate a label with an\n image.. if it's in the caption, it's easy. If it's in a figure, it's\n still okay... but the images that aren't in figure tags are numerous.\n \"\"\"\n index = line.find(label_head)\n if index > -1 and in_figure_tag:\n open_curly, open_curly_line, close_curly, dummy =\\\n find_open_and_close_braces(line_index,\n index, '{', lines)\n label = lines[open_curly_line][open_curly + 1:close_curly]\n if label not in labels:\n active_label = label\n labels.append(label)\n\n \"\"\"\n FIGURE\n\n important: we put the check for the end of the figure at the end\n of the loop in case some pathological person puts everything in one\n line\n \"\"\"\n index = max([\n line.find(figure_tail),\n line.find(figure_wrap_tail),\n line.find(doc_tail)\n ])\n if index > -1:\n in_figure_tag = 0\n cur_image, caption, extracted_image_data = \\\n put_it_together(cur_image, caption, active_label,\n extracted_image_data,\n line_index, lines)\n \"\"\"\n END DOCUMENT\n\n we shouldn't look at anything after the end document tag is found\n \"\"\"\n\n index = line.find(doc_tail)\n if index > -1:\n break\n\n return extracted_image_data\n",
"def extract_context(tex_file, extracted_image_data):\n \"\"\"Extract context.\n\n Given a .tex file and a label name, this function will extract the text\n before and after for all the references made to this label in the text.\n The number of characters to extract before and after is configurable.\n\n :param tex_file (list): path to .tex file\n :param extracted_image_data ([(string, string, list), ...]):\n a list of tuples of images matched to labels and captions from\n this document.\n\n :return extracted_image_data ([(string, string, list, list),\n (string, string, list, list),...)]: the same list, but now containing\n extracted contexts\n \"\"\"\n if os.path.isdir(tex_file) or not os.path.exists(tex_file):\n return []\n\n lines = \"\".join(get_lines_from_file(tex_file))\n\n # Generate context for each image and its assoc. labels\n for data in extracted_image_data:\n context_list = []\n\n # Generate a list of index tuples for all matches\n indicies = [match.span()\n for match in re.finditer(r\"(\\\\(?:fig|ref)\\{%s\\})\" %\n (re.escape(data['label']),),\n lines)]\n for startindex, endindex in indicies:\n # Retrive all lines before label until beginning of file\n i = startindex - CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT\n if i < 0:\n text_before = lines[:startindex]\n else:\n text_before = lines[i:startindex]\n context_before = get_context(text_before, backwards=True)\n\n # Retrive all lines from label until end of file and get context\n i = endindex + CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT\n text_after = lines[endindex:i]\n context_after = get_context(text_after)\n context_list.append(\n context_before + ' \\\\ref{' + data['label'] + '} ' +\n context_after\n )\n data['contexts'] = context_list\n",
"def prepare_image_data(extracted_image_data, output_directory,\n image_mapping):\n \"\"\"Prepare and clean image-data from duplicates and other garbage.\n\n :param: extracted_image_data ([(string, string, list, list) ...],\n ...])): the images and their captions + contexts, ordered\n :param: tex_file (string): the location of the TeX (used for finding the\n associated images; the TeX is assumed to be in the same directory\n as the converted images)\n :param: image_list ([string, string, ...]): a list of the converted\n image file names\n :return extracted_image_data ([(string, string, list, list) ...],\n ...])) again the list of image data cleaned for output\n \"\"\"\n img_list = {}\n for image, caption, label in extracted_image_data:\n if not image or image == 'ERROR':\n continue\n image_location = get_image_location(\n image,\n output_directory,\n image_mapping.keys()\n )\n\n if not image_location or not os.path.exists(image_location) or \\\n len(image_location) < 3:\n continue\n\n image_location = os.path.normpath(image_location)\n if image_location in img_list:\n if caption not in img_list[image_location]['captions']:\n img_list[image_location]['captions'].append(caption)\n else:\n img_list[image_location] = dict(\n url=image_location,\n original_url=image_mapping[image_location],\n captions=[caption],\n label=label,\n name=get_name_from_path(image_location, output_directory)\n )\n return img_list.values()\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2015 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""API for plotextractor utility."""
from __future__ import absolute_import, print_function
import os
from .extractor import (
extract_captions,
extract_context,
)
from .converter import convert_images, untar, detect_images_and_tex
from .output_utils import (
prepare_image_data,
)
from .errors import NoTexFilesFound
def process_tarball(tarball, output_directory=None, context=False):
"""Process one tarball end-to-end.
If output directory is given, the tarball will be extracted there.
Otherwise, it will extract it in a folder next to the tarball file.
The function returns a list of dictionaries:
.. code-block:: python
[{
'url': '/path/to/tarball_files/d15-120f3d.png',
'captions': ['The $\\rho^0$ meson properties: (a) Mass ...'],
'name': 'd15-120f3d',
'label': 'fig:mass'
}, ... ]
:param: tarball (string): the absolute location of the tarball we wish
to process
:param: output_directory (string): path of file processing and extraction
(optional)
:param: context: if True, also try to extract context where images are
referenced in the text. (optional)
:return: images(list): list of dictionaries for each image with captions.
"""
if not output_directory:
# No directory given, so we use the same path as the tarball
output_directory = os.path.abspath("{0}_files".format(tarball))
extracted_files_list = untar(tarball, output_directory)
image_list, tex_files = detect_images_and_tex(extracted_files_list)
if tex_files == [] or tex_files is None:
raise NoTexFilesFound("No TeX files found in {0}".format(tarball))
converted_image_mapping = convert_images(image_list)
return map_images_in_tex(
tex_files,
converted_image_mapping,
output_directory,
context
)
|
inspirehep/plotextractor | plotextractor/converter.py | untar | python | def untar(original_tarball, output_directory):
if not tarfile.is_tarfile(original_tarball):
raise InvalidTarball
tarball = tarfile.open(original_tarball)
# set mtimes of members to now
epochsecs = int(time())
for member in tarball.getmembers():
member.mtime = epochsecs
tarball.extractall(output_directory)
file_list = []
for extracted_file in tarball.getnames():
if extracted_file == '':
break
if extracted_file.startswith('./'):
extracted_file = extracted_file[2:]
# ensure we are actually looking at the right file
extracted_file = os.path.join(output_directory, extracted_file)
# Add to full list of extracted files
file_list.append(extracted_file)
return file_list | Untar given tarball file into directory.
Here we decide if our file is actually a tarball, then
we untar it and return a list of extracted files.
:param: tarball (string): the name of the tar file from arXiv
:param: output_directory (string): the directory to untar in
:return: list of absolute file paths | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/converter.py#L45-L79 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2015, 2016 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Functions related to conversion and untarring."""
from __future__ import absolute_import, print_function
import os
import tarfile
import re
from time import time
from subprocess32 import check_output, TimeoutExpired
import magic
from wand.exceptions import MissingDelegateError, ResourceLimitError
from wand.image import Image
from .errors import InvalidTarball
from .output_utils import get_converted_image_name, get_image_location
def detect_images_and_tex(
file_list,
allowed_image_types=('eps', 'png', 'ps', 'jpg', 'pdf'),
timeout=20):
"""Detect from a list of files which are TeX or images.
:param: file_list (list): list of absolute file paths
:param: allowed_image_types (list): list of allows image formats
:param: timeout (int): the timeout value on shell commands.
:return: (image_list, tex_file) (([string, string, ...], string)):
list of images in the tarball and the name of the TeX file in the
tarball.
"""
tex_file_extension = 'tex'
image_list = []
might_be_tex = []
for extracted_file in file_list:
# Ignore directories and hidden (metadata) files
if os.path.isdir(extracted_file) \
or os.path.basename(extracted_file).startswith('.'):
continue
magic_str = magic.from_file(extracted_file, mime=True)
if magic_str == "application/x-tex":
might_be_tex.append(extracted_file)
elif magic_str.startswith('image/') \
or magic_str == "application/postscript":
image_list.append(extracted_file)
# If neither, maybe it is TeX or an image anyway, otherwise,
# we don't care.
else:
_, dotted_file_extension = os.path.splitext(extracted_file)
file_extension = dotted_file_extension[1:]
if file_extension == tex_file_extension:
might_be_tex.append(extracted_file)
elif file_extension in allowed_image_types:
image_list.append(extracted_file)
return image_list, might_be_tex
def convert_images(image_list, image_format="png", timeout=20):
"""Convert images from list of images to given format, if needed.
Figure out the types of the images that were extracted from
the tarball and determine how to convert them into PNG.
:param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
:param: image_format (string): which image format to convert to.
(PNG by default)
:param: timeout (int): the timeout value on shell commands.
:return: image_mapping ({new_image: original_image, ...]): The mapping of
image files when all have been converted to PNG format.
"""
png_output_contains = 'PNG image'
image_mapping = {}
for image_file in image_list:
if os.path.isdir(image_file):
continue
if not os.path.exists(image_file):
continue
cmd_out = check_output(['file', image_file], timeout=timeout)
if cmd_out.find(png_output_contains) > -1:
# Already PNG
image_mapping[image_file] = image_file
else:
# we're just going to assume that ImageMagick can convert all
# the image types that we may be faced with
# for sure it can do EPS->PNG and JPG->PNG and PS->PNG
# and PSTEX->PNG
converted_image_file = get_converted_image_name(image_file)
try:
convert_image(image_file, converted_image_file, image_format)
except (MissingDelegateError, ResourceLimitError):
# Too bad, cannot convert image format.
continue
if os.path.exists(converted_image_file):
image_mapping[converted_image_file] = image_file
return image_mapping
def convert_image(from_file, to_file, image_format):
"""Convert an image to given format."""
with Image(filename=from_file) as original:
with original.convert(image_format) as converted:
converted.save(filename=to_file)
return to_file
def rotate_image(filename, line, sdir, image_list):
"""Rotate a image.
Given a filename and a line, figure out what it is that the author
wanted to do wrt changing the rotation of the image and convert the
file so that this rotation is reflected in its presentation.
:param: filename (string): the name of the file as specified in the TeX
:param: line (string): the line where the rotate command was found
:output: the image file rotated in accordance with the rotate command
:return: True if something was rotated
"""
file_loc = get_image_location(filename, sdir, image_list)
degrees = re.findall('(angle=[-\\d]+|rotate=[-\\d]+)', line)
if len(degrees) < 1:
return False
degrees = degrees[0].split('=')[-1].strip()
if file_loc is None or file_loc == 'ERROR' or\
not re.match('-*\\d+', degrees):
return False
if degrees:
try:
degrees = int(degrees)
except (ValueError, TypeError):
return False
if not os.path.exists(file_loc):
return False
with Image(filename=file_loc) as image:
with image.clone() as rotated:
rotated.rotate(degrees)
rotated.save(filename=file_loc)
return True
return False
|
inspirehep/plotextractor | plotextractor/converter.py | detect_images_and_tex | python | def detect_images_and_tex(
file_list,
allowed_image_types=('eps', 'png', 'ps', 'jpg', 'pdf'),
timeout=20):
tex_file_extension = 'tex'
image_list = []
might_be_tex = []
for extracted_file in file_list:
# Ignore directories and hidden (metadata) files
if os.path.isdir(extracted_file) \
or os.path.basename(extracted_file).startswith('.'):
continue
magic_str = magic.from_file(extracted_file, mime=True)
if magic_str == "application/x-tex":
might_be_tex.append(extracted_file)
elif magic_str.startswith('image/') \
or magic_str == "application/postscript":
image_list.append(extracted_file)
# If neither, maybe it is TeX or an image anyway, otherwise,
# we don't care.
else:
_, dotted_file_extension = os.path.splitext(extracted_file)
file_extension = dotted_file_extension[1:]
if file_extension == tex_file_extension:
might_be_tex.append(extracted_file)
elif file_extension in allowed_image_types:
image_list.append(extracted_file)
return image_list, might_be_tex | Detect from a list of files which are TeX or images.
:param: file_list (list): list of absolute file paths
:param: allowed_image_types (list): list of allows image formats
:param: timeout (int): the timeout value on shell commands.
:return: (image_list, tex_file) (([string, string, ...], string)):
list of images in the tarball and the name of the TeX file in the
tarball. | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/converter.py#L82-L126 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2015, 2016 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Functions related to conversion and untarring."""
from __future__ import absolute_import, print_function
import os
import tarfile
import re
from time import time
from subprocess32 import check_output, TimeoutExpired
import magic
from wand.exceptions import MissingDelegateError, ResourceLimitError
from wand.image import Image
from .errors import InvalidTarball
from .output_utils import get_converted_image_name, get_image_location
def untar(original_tarball, output_directory):
"""Untar given tarball file into directory.
Here we decide if our file is actually a tarball, then
we untar it and return a list of extracted files.
:param: tarball (string): the name of the tar file from arXiv
:param: output_directory (string): the directory to untar in
:return: list of absolute file paths
"""
if not tarfile.is_tarfile(original_tarball):
raise InvalidTarball
tarball = tarfile.open(original_tarball)
# set mtimes of members to now
epochsecs = int(time())
for member in tarball.getmembers():
member.mtime = epochsecs
tarball.extractall(output_directory)
file_list = []
for extracted_file in tarball.getnames():
if extracted_file == '':
break
if extracted_file.startswith('./'):
extracted_file = extracted_file[2:]
# ensure we are actually looking at the right file
extracted_file = os.path.join(output_directory, extracted_file)
# Add to full list of extracted files
file_list.append(extracted_file)
return file_list
def convert_images(image_list, image_format="png", timeout=20):
"""Convert images from list of images to given format, if needed.
Figure out the types of the images that were extracted from
the tarball and determine how to convert them into PNG.
:param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
:param: image_format (string): which image format to convert to.
(PNG by default)
:param: timeout (int): the timeout value on shell commands.
:return: image_mapping ({new_image: original_image, ...]): The mapping of
image files when all have been converted to PNG format.
"""
png_output_contains = 'PNG image'
image_mapping = {}
for image_file in image_list:
if os.path.isdir(image_file):
continue
if not os.path.exists(image_file):
continue
cmd_out = check_output(['file', image_file], timeout=timeout)
if cmd_out.find(png_output_contains) > -1:
# Already PNG
image_mapping[image_file] = image_file
else:
# we're just going to assume that ImageMagick can convert all
# the image types that we may be faced with
# for sure it can do EPS->PNG and JPG->PNG and PS->PNG
# and PSTEX->PNG
converted_image_file = get_converted_image_name(image_file)
try:
convert_image(image_file, converted_image_file, image_format)
except (MissingDelegateError, ResourceLimitError):
# Too bad, cannot convert image format.
continue
if os.path.exists(converted_image_file):
image_mapping[converted_image_file] = image_file
return image_mapping
def convert_image(from_file, to_file, image_format):
"""Convert an image to given format."""
with Image(filename=from_file) as original:
with original.convert(image_format) as converted:
converted.save(filename=to_file)
return to_file
def rotate_image(filename, line, sdir, image_list):
"""Rotate a image.
Given a filename and a line, figure out what it is that the author
wanted to do wrt changing the rotation of the image and convert the
file so that this rotation is reflected in its presentation.
:param: filename (string): the name of the file as specified in the TeX
:param: line (string): the line where the rotate command was found
:output: the image file rotated in accordance with the rotate command
:return: True if something was rotated
"""
file_loc = get_image_location(filename, sdir, image_list)
degrees = re.findall('(angle=[-\\d]+|rotate=[-\\d]+)', line)
if len(degrees) < 1:
return False
degrees = degrees[0].split('=')[-1].strip()
if file_loc is None or file_loc == 'ERROR' or\
not re.match('-*\\d+', degrees):
return False
if degrees:
try:
degrees = int(degrees)
except (ValueError, TypeError):
return False
if not os.path.exists(file_loc):
return False
with Image(filename=file_loc) as image:
with image.clone() as rotated:
rotated.rotate(degrees)
rotated.save(filename=file_loc)
return True
return False
|
inspirehep/plotextractor | plotextractor/converter.py | convert_images | python | def convert_images(image_list, image_format="png", timeout=20):
png_output_contains = 'PNG image'
image_mapping = {}
for image_file in image_list:
if os.path.isdir(image_file):
continue
if not os.path.exists(image_file):
continue
cmd_out = check_output(['file', image_file], timeout=timeout)
if cmd_out.find(png_output_contains) > -1:
# Already PNG
image_mapping[image_file] = image_file
else:
# we're just going to assume that ImageMagick can convert all
# the image types that we may be faced with
# for sure it can do EPS->PNG and JPG->PNG and PS->PNG
# and PSTEX->PNG
converted_image_file = get_converted_image_name(image_file)
try:
convert_image(image_file, converted_image_file, image_format)
except (MissingDelegateError, ResourceLimitError):
# Too bad, cannot convert image format.
continue
if os.path.exists(converted_image_file):
image_mapping[converted_image_file] = image_file
return image_mapping | Convert images from list of images to given format, if needed.
Figure out the types of the images that were extracted from
the tarball and determine how to convert them into PNG.
:param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
:param: image_format (string): which image format to convert to.
(PNG by default)
:param: timeout (int): the timeout value on shell commands.
:return: image_mapping ({new_image: original_image, ...]): The mapping of
image files when all have been converted to PNG format. | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/converter.py#L129-L171 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2015, 2016 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Functions related to conversion and untarring."""
from __future__ import absolute_import, print_function
import os
import tarfile
import re
from time import time
from subprocess32 import check_output, TimeoutExpired
import magic
from wand.exceptions import MissingDelegateError, ResourceLimitError
from wand.image import Image
from .errors import InvalidTarball
from .output_utils import get_converted_image_name, get_image_location
def untar(original_tarball, output_directory):
"""Untar given tarball file into directory.
Here we decide if our file is actually a tarball, then
we untar it and return a list of extracted files.
:param: tarball (string): the name of the tar file from arXiv
:param: output_directory (string): the directory to untar in
:return: list of absolute file paths
"""
if not tarfile.is_tarfile(original_tarball):
raise InvalidTarball
tarball = tarfile.open(original_tarball)
# set mtimes of members to now
epochsecs = int(time())
for member in tarball.getmembers():
member.mtime = epochsecs
tarball.extractall(output_directory)
file_list = []
for extracted_file in tarball.getnames():
if extracted_file == '':
break
if extracted_file.startswith('./'):
extracted_file = extracted_file[2:]
# ensure we are actually looking at the right file
extracted_file = os.path.join(output_directory, extracted_file)
# Add to full list of extracted files
file_list.append(extracted_file)
return file_list
def detect_images_and_tex(
file_list,
allowed_image_types=('eps', 'png', 'ps', 'jpg', 'pdf'),
timeout=20):
"""Detect from a list of files which are TeX or images.
:param: file_list (list): list of absolute file paths
:param: allowed_image_types (list): list of allows image formats
:param: timeout (int): the timeout value on shell commands.
:return: (image_list, tex_file) (([string, string, ...], string)):
list of images in the tarball and the name of the TeX file in the
tarball.
"""
tex_file_extension = 'tex'
image_list = []
might_be_tex = []
for extracted_file in file_list:
# Ignore directories and hidden (metadata) files
if os.path.isdir(extracted_file) \
or os.path.basename(extracted_file).startswith('.'):
continue
magic_str = magic.from_file(extracted_file, mime=True)
if magic_str == "application/x-tex":
might_be_tex.append(extracted_file)
elif magic_str.startswith('image/') \
or magic_str == "application/postscript":
image_list.append(extracted_file)
# If neither, maybe it is TeX or an image anyway, otherwise,
# we don't care.
else:
_, dotted_file_extension = os.path.splitext(extracted_file)
file_extension = dotted_file_extension[1:]
if file_extension == tex_file_extension:
might_be_tex.append(extracted_file)
elif file_extension in allowed_image_types:
image_list.append(extracted_file)
return image_list, might_be_tex
def convert_image(from_file, to_file, image_format):
"""Convert an image to given format."""
with Image(filename=from_file) as original:
with original.convert(image_format) as converted:
converted.save(filename=to_file)
return to_file
def rotate_image(filename, line, sdir, image_list):
"""Rotate a image.
Given a filename and a line, figure out what it is that the author
wanted to do wrt changing the rotation of the image and convert the
file so that this rotation is reflected in its presentation.
:param: filename (string): the name of the file as specified in the TeX
:param: line (string): the line where the rotate command was found
:output: the image file rotated in accordance with the rotate command
:return: True if something was rotated
"""
file_loc = get_image_location(filename, sdir, image_list)
degrees = re.findall('(angle=[-\\d]+|rotate=[-\\d]+)', line)
if len(degrees) < 1:
return False
degrees = degrees[0].split('=')[-1].strip()
if file_loc is None or file_loc == 'ERROR' or\
not re.match('-*\\d+', degrees):
return False
if degrees:
try:
degrees = int(degrees)
except (ValueError, TypeError):
return False
if not os.path.exists(file_loc):
return False
with Image(filename=file_loc) as image:
with image.clone() as rotated:
rotated.rotate(degrees)
rotated.save(filename=file_loc)
return True
return False
|
inspirehep/plotextractor | plotextractor/converter.py | convert_image | python | def convert_image(from_file, to_file, image_format):
with Image(filename=from_file) as original:
with original.convert(image_format) as converted:
converted.save(filename=to_file)
return to_file | Convert an image to given format. | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/converter.py#L174-L179 | null | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2015, 2016 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Functions related to conversion and untarring."""
from __future__ import absolute_import, print_function
import os
import tarfile
import re
from time import time
from subprocess32 import check_output, TimeoutExpired
import magic
from wand.exceptions import MissingDelegateError, ResourceLimitError
from wand.image import Image
from .errors import InvalidTarball
from .output_utils import get_converted_image_name, get_image_location
def untar(original_tarball, output_directory):
"""Untar given tarball file into directory.
Here we decide if our file is actually a tarball, then
we untar it and return a list of extracted files.
:param: tarball (string): the name of the tar file from arXiv
:param: output_directory (string): the directory to untar in
:return: list of absolute file paths
"""
if not tarfile.is_tarfile(original_tarball):
raise InvalidTarball
tarball = tarfile.open(original_tarball)
# set mtimes of members to now
epochsecs = int(time())
for member in tarball.getmembers():
member.mtime = epochsecs
tarball.extractall(output_directory)
file_list = []
for extracted_file in tarball.getnames():
if extracted_file == '':
break
if extracted_file.startswith('./'):
extracted_file = extracted_file[2:]
# ensure we are actually looking at the right file
extracted_file = os.path.join(output_directory, extracted_file)
# Add to full list of extracted files
file_list.append(extracted_file)
return file_list
def detect_images_and_tex(
file_list,
allowed_image_types=('eps', 'png', 'ps', 'jpg', 'pdf'),
timeout=20):
"""Detect from a list of files which are TeX or images.
:param: file_list (list): list of absolute file paths
:param: allowed_image_types (list): list of allows image formats
:param: timeout (int): the timeout value on shell commands.
:return: (image_list, tex_file) (([string, string, ...], string)):
list of images in the tarball and the name of the TeX file in the
tarball.
"""
tex_file_extension = 'tex'
image_list = []
might_be_tex = []
for extracted_file in file_list:
# Ignore directories and hidden (metadata) files
if os.path.isdir(extracted_file) \
or os.path.basename(extracted_file).startswith('.'):
continue
magic_str = magic.from_file(extracted_file, mime=True)
if magic_str == "application/x-tex":
might_be_tex.append(extracted_file)
elif magic_str.startswith('image/') \
or magic_str == "application/postscript":
image_list.append(extracted_file)
# If neither, maybe it is TeX or an image anyway, otherwise,
# we don't care.
else:
_, dotted_file_extension = os.path.splitext(extracted_file)
file_extension = dotted_file_extension[1:]
if file_extension == tex_file_extension:
might_be_tex.append(extracted_file)
elif file_extension in allowed_image_types:
image_list.append(extracted_file)
return image_list, might_be_tex
def convert_images(image_list, image_format="png", timeout=20):
"""Convert images from list of images to given format, if needed.
Figure out the types of the images that were extracted from
the tarball and determine how to convert them into PNG.
:param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
:param: image_format (string): which image format to convert to.
(PNG by default)
:param: timeout (int): the timeout value on shell commands.
:return: image_mapping ({new_image: original_image, ...]): The mapping of
image files when all have been converted to PNG format.
"""
png_output_contains = 'PNG image'
image_mapping = {}
for image_file in image_list:
if os.path.isdir(image_file):
continue
if not os.path.exists(image_file):
continue
cmd_out = check_output(['file', image_file], timeout=timeout)
if cmd_out.find(png_output_contains) > -1:
# Already PNG
image_mapping[image_file] = image_file
else:
# we're just going to assume that ImageMagick can convert all
# the image types that we may be faced with
# for sure it can do EPS->PNG and JPG->PNG and PS->PNG
# and PSTEX->PNG
converted_image_file = get_converted_image_name(image_file)
try:
convert_image(image_file, converted_image_file, image_format)
except (MissingDelegateError, ResourceLimitError):
# Too bad, cannot convert image format.
continue
if os.path.exists(converted_image_file):
image_mapping[converted_image_file] = image_file
return image_mapping
def rotate_image(filename, line, sdir, image_list):
"""Rotate a image.
Given a filename and a line, figure out what it is that the author
wanted to do wrt changing the rotation of the image and convert the
file so that this rotation is reflected in its presentation.
:param: filename (string): the name of the file as specified in the TeX
:param: line (string): the line where the rotate command was found
:output: the image file rotated in accordance with the rotate command
:return: True if something was rotated
"""
file_loc = get_image_location(filename, sdir, image_list)
degrees = re.findall('(angle=[-\\d]+|rotate=[-\\d]+)', line)
if len(degrees) < 1:
return False
degrees = degrees[0].split('=')[-1].strip()
if file_loc is None or file_loc == 'ERROR' or\
not re.match('-*\\d+', degrees):
return False
if degrees:
try:
degrees = int(degrees)
except (ValueError, TypeError):
return False
if not os.path.exists(file_loc):
return False
with Image(filename=file_loc) as image:
with image.clone() as rotated:
rotated.rotate(degrees)
rotated.save(filename=file_loc)
return True
return False
|
inspirehep/plotextractor | plotextractor/converter.py | rotate_image | python | def rotate_image(filename, line, sdir, image_list):
file_loc = get_image_location(filename, sdir, image_list)
degrees = re.findall('(angle=[-\\d]+|rotate=[-\\d]+)', line)
if len(degrees) < 1:
return False
degrees = degrees[0].split('=')[-1].strip()
if file_loc is None or file_loc == 'ERROR' or\
not re.match('-*\\d+', degrees):
return False
if degrees:
try:
degrees = int(degrees)
except (ValueError, TypeError):
return False
if not os.path.exists(file_loc):
return False
with Image(filename=file_loc) as image:
with image.clone() as rotated:
rotated.rotate(degrees)
rotated.save(filename=file_loc)
return True
return False | Rotate a image.
Given a filename and a line, figure out what it is that the author
wanted to do wrt changing the rotation of the image and convert the
file so that this rotation is reflected in its presentation.
:param: filename (string): the name of the file as specified in the TeX
:param: line (string): the line where the rotate command was found
:output: the image file rotated in accordance with the rotate command
:return: True if something was rotated | train | https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/converter.py#L182-L221 | [
"def get_image_location(image, sdir, image_list, recurred=False):\n \"\"\"Take a raw image name + directory and return the location of image.\n\n :param: image (string): the name of the raw image from the TeX\n :param: sdir (string): the directory where everything was unzipped to\n :param: image_list ([string, string, ...]): the list of images that\n were extracted from the tarball and possibly converted\n\n :return: converted_image (string): the full path to the (possibly\n converted) image file\n \"\"\"\n if isinstance(image, list):\n # image is a list, not good\n return None\n\n image = image.encode('utf-8', 'ignore')\n image = image.strip()\n\n figure_or_file = '(figure=|file=)'\n figure_or_file_in_image = re.findall(figure_or_file, image)\n if len(figure_or_file_in_image) > 0:\n image = image.replace(figure_or_file_in_image[0], '')\n\n includegraphics = r'\\\\includegraphics{(.+)}'\n includegraphics_in_image = re.findall(includegraphics, image)\n if len(includegraphics_in_image) > 0:\n image = includegraphics_in_image[0]\n\n image = image.strip()\n\n some_kind_of_tag = '\\\\\\\\\\\\w+ '\n\n if image.startswith('./'):\n image = image[2:]\n if re.match(some_kind_of_tag, image):\n image = image[len(image.split(' ')[0]) + 1:]\n if image.startswith('='):\n image = image[1:]\n\n if len(image) == 1:\n return None\n\n image = image.strip()\n converted_image_should_be = get_converted_image_name(image)\n\n if image_list is None:\n image_list = os.listdir(sdir)\n\n for png_image in image_list:\n png_image_rel = os.path.relpath(png_image, start=sdir)\n if converted_image_should_be == png_image_rel:\n return png_image\n\n # maybe it's in a subfolder (TeX just understands that)\n for prefix in ['eps', 'fig', 'figs', 'figures', 'figs', 'images']:\n if os.path.isdir(os.path.join(sdir, prefix)):\n image_list = os.listdir(os.path.join(sdir, prefix))\n for png_image in image_list:\n if converted_image_should_be == png_image:\n return os.path.join(sdir, prefix, png_image)\n\n # maybe it is actually just loose.\n for png_image in os.listdir(sdir):\n if os.path.split(converted_image_should_be)[-1] == png_image:\n return converted_image_should_be\n if os.path.isdir(os.path.join(sdir, png_image)):\n # try that, too! we just do two levels, because that's all that's\n # reasonable..\n sub_dir = os.path.join(sdir, png_image)\n for sub_dir_file in os.listdir(sub_dir):\n if os.path.split(converted_image_should_be)[-1] == sub_dir_file: # noqa\n return os.path.join(sub_dir, converted_image_should_be)\n\n # maybe it's actually up a directory or two: this happens in nested\n # tarballs where the TeX is stored in a different directory from the images\n for png_image in os.listdir(os.path.split(sdir)[0]):\n if os.path.split(converted_image_should_be)[-1] == png_image:\n return converted_image_should_be\n for png_image in os.listdir(os.path.split(os.path.split(sdir)[0])[0]):\n if os.path.split(converted_image_should_be)[-1] == png_image:\n return converted_image_should_be\n\n if recurred:\n return None\n\n # agh, this calls for drastic measures\n for piece in image.split(' '):\n res = get_image_location(piece, sdir, image_list, recurred=True)\n if res is not None:\n return res\n\n for piece in image.split(','):\n res = get_image_location(piece, sdir, image_list, recurred=True)\n if res is not None:\n return res\n\n for piece in image.split('='):\n res = get_image_location(piece, sdir, image_list, recurred=True)\n if res is not None:\n return res\n\n return None\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of plotextractor.
# Copyright (C) 2010, 2011, 2015, 2016 CERN.
#
# plotextractor is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# plotextractor is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with plotextractor; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Functions related to conversion and untarring."""
from __future__ import absolute_import, print_function
import os
import tarfile
import re
from time import time
from subprocess32 import check_output, TimeoutExpired
import magic
from wand.exceptions import MissingDelegateError, ResourceLimitError
from wand.image import Image
from .errors import InvalidTarball
from .output_utils import get_converted_image_name, get_image_location
def untar(original_tarball, output_directory):
"""Untar given tarball file into directory.
Here we decide if our file is actually a tarball, then
we untar it and return a list of extracted files.
:param: tarball (string): the name of the tar file from arXiv
:param: output_directory (string): the directory to untar in
:return: list of absolute file paths
"""
if not tarfile.is_tarfile(original_tarball):
raise InvalidTarball
tarball = tarfile.open(original_tarball)
# set mtimes of members to now
epochsecs = int(time())
for member in tarball.getmembers():
member.mtime = epochsecs
tarball.extractall(output_directory)
file_list = []
for extracted_file in tarball.getnames():
if extracted_file == '':
break
if extracted_file.startswith('./'):
extracted_file = extracted_file[2:]
# ensure we are actually looking at the right file
extracted_file = os.path.join(output_directory, extracted_file)
# Add to full list of extracted files
file_list.append(extracted_file)
return file_list
def detect_images_and_tex(
file_list,
allowed_image_types=('eps', 'png', 'ps', 'jpg', 'pdf'),
timeout=20):
"""Detect from a list of files which are TeX or images.
:param: file_list (list): list of absolute file paths
:param: allowed_image_types (list): list of allows image formats
:param: timeout (int): the timeout value on shell commands.
:return: (image_list, tex_file) (([string, string, ...], string)):
list of images in the tarball and the name of the TeX file in the
tarball.
"""
tex_file_extension = 'tex'
image_list = []
might_be_tex = []
for extracted_file in file_list:
# Ignore directories and hidden (metadata) files
if os.path.isdir(extracted_file) \
or os.path.basename(extracted_file).startswith('.'):
continue
magic_str = magic.from_file(extracted_file, mime=True)
if magic_str == "application/x-tex":
might_be_tex.append(extracted_file)
elif magic_str.startswith('image/') \
or magic_str == "application/postscript":
image_list.append(extracted_file)
# If neither, maybe it is TeX or an image anyway, otherwise,
# we don't care.
else:
_, dotted_file_extension = os.path.splitext(extracted_file)
file_extension = dotted_file_extension[1:]
if file_extension == tex_file_extension:
might_be_tex.append(extracted_file)
elif file_extension in allowed_image_types:
image_list.append(extracted_file)
return image_list, might_be_tex
def convert_images(image_list, image_format="png", timeout=20):
"""Convert images from list of images to given format, if needed.
Figure out the types of the images that were extracted from
the tarball and determine how to convert them into PNG.
:param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
:param: image_format (string): which image format to convert to.
(PNG by default)
:param: timeout (int): the timeout value on shell commands.
:return: image_mapping ({new_image: original_image, ...]): The mapping of
image files when all have been converted to PNG format.
"""
png_output_contains = 'PNG image'
image_mapping = {}
for image_file in image_list:
if os.path.isdir(image_file):
continue
if not os.path.exists(image_file):
continue
cmd_out = check_output(['file', image_file], timeout=timeout)
if cmd_out.find(png_output_contains) > -1:
# Already PNG
image_mapping[image_file] = image_file
else:
# we're just going to assume that ImageMagick can convert all
# the image types that we may be faced with
# for sure it can do EPS->PNG and JPG->PNG and PS->PNG
# and PSTEX->PNG
converted_image_file = get_converted_image_name(image_file)
try:
convert_image(image_file, converted_image_file, image_format)
except (MissingDelegateError, ResourceLimitError):
# Too bad, cannot convert image format.
continue
if os.path.exists(converted_image_file):
image_mapping[converted_image_file] = image_file
return image_mapping
def convert_image(from_file, to_file, image_format):
"""Convert an image to given format."""
with Image(filename=from_file) as original:
with original.convert(image_format) as converted:
converted.save(filename=to_file)
return to_file
|
klmitch/framer | framer/transport.py | FramerAdaptor.factory | python | def factory(cls, client, *args, **kwargs):
# Some basic sanity checks
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# And a final, basic sanity check on the argument structure
if not args and ('send' not in kwargs or 'recv' not in kwargs):
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
return lambda: cls(client, *args, **kwargs) | Generates and returns a callable suitable for passing as the
``protocol_factory`` parameter of the ``create_connection()``
or ``create_server()`` loop methods. This class method
performs some sanity checks on the arguments, and is preferred
over using a manually constructed ``lambda``.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L52-L88 | null | class FramerAdaptor(object):
"""
The Framer transport adaptor class. Instances of this
class--initialized with an appropriate ``FramedProtocol``
subclass, as well as send and receive framers--should be returned
by the factory passed to the ``create_connection()`` or
``create_server()`` loop methods.
"""
# Handlers for obtaining extra data from this transport using
# get_extra_info()
_handlers = {
'send_framer': lambda p: p._send_framer,
'send_state': lambda p: p._send_state,
'recv_framer': lambda p: p._recv_framer,
'recv_state': lambda p: p._recv_state,
'recv_buf': lambda p: six.binary_type(p._recv_buf),
'recv_paused': lambda p: p._recv_paused,
'client_protocol': lambda p: p._client,
'transport': lambda p: p._transport,
}
@classmethod
def __init__(self, client, *args, **kwargs):
"""
Initialize a ``FramerProtocol`` instance.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# A basic sanity check
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Initialize the framer stack for _interpret_framer()
self._framers = [FramerElement(None, None, None, None)]
# Interpret the framer arguments
elem = self._interpret_framer(args, kwargs)
if not elem.send or not elem.recv:
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
# Set the framers
self._framers = [elem]
# Instantiate and save the client protocol, now that we have
# framers
self._client = client()
# Remember the underlying transport
self._transport = None
# And initialize the receive buffer and read paused state
self._recv_buf = bytearray()
self._recv_paused = False
def _interpret_framer(self, args, kwargs):
"""
Interprets positional and keyword arguments related to
framers.
:param args: A tuple of positional arguments. The first such
argument will be interpreted as a framer object,
and the second will be interpreted as a framer
state.
:param kwargs: A dictionary of keyword arguments. The
``send`` and ``recv`` keyword arguments are
interpreted as send and receive framers,
respectively, and the ``send_state`` and
``recv_state`` keyword arguments are
interpreted as states for those framers.
:returns: An instance of ``FramerElement``, which may be
pushed onto the framer stack.
"""
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# Start with the current send and receive framers
send = self._send_framer
recv = self._recv_framer
send_state = self._send_state
recv_state = self._recv_state
# Now, is it positional style?
if args:
send = args[0]
recv = args[0]
# Do we have a state?
if len(args) > 1:
send_state = args[1]
recv_state = args[1]
else:
# Allocate one
state = framers.FramerState()
# Initialize it
send.initialize_state(state)
send_state = state
recv_state = state
else:
# OK, it's keyword style; do we have a send framer?
if 'send' in kwargs:
send = kwargs['send']
# Do we have a send state?
if 'send_state' in kwargs:
send_state = kwargs['send_state']
else:
# Allocate one and initialize it
send_state = framers.FramerState()
send.initialize_state(send_state)
# How about a receive framer?
if 'recv' in kwargs:
recv = kwargs['recv']
# Do we have a recv state?
if 'recv_state' in kwargs:
recv_state = kwargs['recv_state']
else:
# Allocate one and initialize it
recv_state = framers.FramerState()
recv.initialize_state(recv_state)
# Create and return a FramerElement
return FramerElement(send, recv, send_state, recv_state)
def connection_made(self, transport):
"""
Called by the underlying transport when a connection is made.
:param transport: The transport representing the connection.
"""
# Save the underlying transport
self._transport = transport
# Call connection_made() on the client protocol, passing
# ourself as the transport
self._client.connection_made(self)
def connection_lost(self, exc):
"""
Called by the underlying transport when a connection is lost.
:param exc: Either an exception object or ``None``. If the
latter, indicates an EOF was received, or that the
connection was aborted or closed by this side of
the connection.
"""
# Call connection_lost() on the client protocol
self._client.connection_lost(exc)
def pause_writing(self):
"""
Called by the underlying transport when the buffer goes over
the high-water mark.
"""
# Call pause_writing() on the client protocol
self._client.pause_writing()
def resume_writing(self):
"""
Called by the underlying transport when the buffer drains
below the low-water mark.
"""
# Call resume_writing() on the client protocol
self._client.resume_writing()
def data_received(self, data):
"""
Called by the underlying transport when data is received.
:param data: The data received on the connection.
"""
# First, add the data to the receive buffer
self._recv_buf += data
# Now, pass all frames we can find to the client protocol
while self._recv_buf and not self._recv_paused:
try:
# Extract one frame
frame = self._recv_framer.to_frame(self._recv_buf,
self._recv_state)
except exc.NoFrames:
# There's data in the buffer, but no complete frames
break
# Now call the client protocol's frame_received() method
self._client.frame_received(frame)
def eof_received(self):
"""
Called by the underlying transport when the other end signals
it won't send any more data.
:returns: A ``False`` value (including ``None``, the default
return value) to cause the transport to close
itself, and ``True`` to leave the connection
half-open.
"""
# Call eof_received() on the client protocol
return self._client.eof_received()
def close(self):
"""
Called by the client protocol to close the connection. If the
transport has a buffer for outgoing data, buffered data will
be flushed asynchronously. No more data will be received.
After all buffered data is flushed, the protocol's
``connection_lost()`` method will be called with ``None`` as
its argument.
"""
# Call close() on the transport
self._transport.close()
def get_extra_info(self, name, default=None):
"""
Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data.
"""
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default)
def pause_reading(self):
"""
Called by the client protocol to pause the receiving end of
the transport. No data will be passed to the protocol's
``frame_received()`` method until ``resume_reading()`` is
called.
"""
# Remember that reading is paused
self._recv_paused = True
# Call pause_reading() on the transport
self._transport.pause_reading()
def resume_reading(self):
"""
Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading.
"""
# Clear the read pause status
self._recv_paused = False
# Call resume_reading() on the transport
self._transport.resume_reading()
# If there's data in the receive buffer, pass it on to the
# client protocol
if self._recv_buf:
self.data_received(b'')
def abort(self):
"""
Called by the client protocol to close the transport
immediately, without waiting for pending operations to
complete. Buffered data will be lost. No more data will be
received. The protocol's ``connection_lost()`` method will
eventually be called with ``None`` as its argument.
"""
# Call abort() on the transport
self._transport.abort()
def can_write_eof(self):
"""
Called by the client protocol to determine if the transport
supports half-closed operations through the ``write_eof()``
method.
:returns: A ``True`` value if ``write_eof()`` is supported,
``False`` otherwise.
"""
# Call can_write_eof() on the transport
return self._transport.can_write_eof()
def get_write_buffer_size(self):
"""
Called by the client protocol to return the current size of
the output buffer used by the transport.
:returns: The current size of the output buffer used by the
transport.
"""
# Call get_write_buffer_size() on the transport
return self._transport.get_write_buffer_size()
def set_write_buffer_limits(self, high=None, low=None):
"""
Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``.
"""
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low)
def write_eof(self):
"""
Called by the client protocol to close the write end of the
transport after flushing buffered data. Data may still be
received. This method may raise ``NotImplementedError`` if
the transport (e.g., SSL) doesn't support half-closed
connections.
"""
# Call write_eof() on the transport
self._transport.write_eof()
def send_frame(self, frame):
"""
Called by the client protocol to send a frame to the remote
peer. This method does not block; it buffers the data and
arranges for it to be sent out asynchronously.
:param frame: The frame to send to the peer. Must be in the
format expected by the currently active send
framer.
"""
# Convert the frame to bytes and write them to the connection
data = self._send_framer.to_bytes(frame, self._send_state)
self._transport.write(data)
def push_framer(self, *args, **kwargs):
"""
Called by the client protocol to temporarily switch to a new
send framer, receive framer, or both. Can be called multiple
times. Each call to ``push_framer()`` must be paired with a
call to ``pop_framer()``, which restores to the previously set
framer.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Append the element to the framer stack
self._framers.append(elem)
def pop_framer(self):
"""
Called by the client protocol to revert to the set of framers
in use prior to the corresponding ``push_framer()`` call.
Raises an ``IndexError`` if the framer stack cannot be popped.
"""
# If the framer stack has only one element, raise an
# IndexError
if len(self._framers) <= 1:
raise IndexError('pop from empty stack')
# Pop an element off
self._framers.pop()
def set_framer(self, *args, **kwargs):
"""
Called by the client protocol to replace the current send
framer, receive framer, or both. This does not alter the
stack maintained by ``push_framer()`` and ``pop_framer()``; if
this method is called after ``push_framer()``, then
``pop_framer()`` is called, the framers in force at the time
``push_framer()`` was called will be restored.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Now, replace the current top of the framer stack
self._framers[-1] = elem
@property
def _send_framer(self):
"""
Retrieve the current send framer.
"""
return self._framers[-1].send
@property
def _send_state(self):
"""
Retrieve the current send framer state.
"""
return self._framers[-1].send_state
@property
def _recv_framer(self):
"""
Retrieve the current receive framer.
"""
return self._framers[-1].recv
@property
def _recv_state(self):
"""
Retrieve the current receive framer state.
"""
return self._framers[-1].recv_state
|
klmitch/framer | framer/transport.py | FramerAdaptor._interpret_framer | python | def _interpret_framer(self, args, kwargs):
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# Start with the current send and receive framers
send = self._send_framer
recv = self._recv_framer
send_state = self._send_state
recv_state = self._recv_state
# Now, is it positional style?
if args:
send = args[0]
recv = args[0]
# Do we have a state?
if len(args) > 1:
send_state = args[1]
recv_state = args[1]
else:
# Allocate one
state = framers.FramerState()
# Initialize it
send.initialize_state(state)
send_state = state
recv_state = state
else:
# OK, it's keyword style; do we have a send framer?
if 'send' in kwargs:
send = kwargs['send']
# Do we have a send state?
if 'send_state' in kwargs:
send_state = kwargs['send_state']
else:
# Allocate one and initialize it
send_state = framers.FramerState()
send.initialize_state(send_state)
# How about a receive framer?
if 'recv' in kwargs:
recv = kwargs['recv']
# Do we have a recv state?
if 'recv_state' in kwargs:
recv_state = kwargs['recv_state']
else:
# Allocate one and initialize it
recv_state = framers.FramerState()
recv.initialize_state(recv_state)
# Create and return a FramerElement
return FramerElement(send, recv, send_state, recv_state) | Interprets positional and keyword arguments related to
framers.
:param args: A tuple of positional arguments. The first such
argument will be interpreted as a framer object,
and the second will be interpreted as a framer
state.
:param kwargs: A dictionary of keyword arguments. The
``send`` and ``recv`` keyword arguments are
interpreted as send and receive framers,
respectively, and the ``send_state`` and
``recv_state`` keyword arguments are
interpreted as states for those framers.
:returns: An instance of ``FramerElement``, which may be
pushed onto the framer stack. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L131-L210 | null | class FramerAdaptor(object):
"""
The Framer transport adaptor class. Instances of this
class--initialized with an appropriate ``FramedProtocol``
subclass, as well as send and receive framers--should be returned
by the factory passed to the ``create_connection()`` or
``create_server()`` loop methods.
"""
# Handlers for obtaining extra data from this transport using
# get_extra_info()
_handlers = {
'send_framer': lambda p: p._send_framer,
'send_state': lambda p: p._send_state,
'recv_framer': lambda p: p._recv_framer,
'recv_state': lambda p: p._recv_state,
'recv_buf': lambda p: six.binary_type(p._recv_buf),
'recv_paused': lambda p: p._recv_paused,
'client_protocol': lambda p: p._client,
'transport': lambda p: p._transport,
}
@classmethod
def factory(cls, client, *args, **kwargs):
"""
Generates and returns a callable suitable for passing as the
``protocol_factory`` parameter of the ``create_connection()``
or ``create_server()`` loop methods. This class method
performs some sanity checks on the arguments, and is preferred
over using a manually constructed ``lambda``.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# Some basic sanity checks
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# And a final, basic sanity check on the argument structure
if not args and ('send' not in kwargs or 'recv' not in kwargs):
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
return lambda: cls(client, *args, **kwargs)
def __init__(self, client, *args, **kwargs):
"""
Initialize a ``FramerProtocol`` instance.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# A basic sanity check
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Initialize the framer stack for _interpret_framer()
self._framers = [FramerElement(None, None, None, None)]
# Interpret the framer arguments
elem = self._interpret_framer(args, kwargs)
if not elem.send or not elem.recv:
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
# Set the framers
self._framers = [elem]
# Instantiate and save the client protocol, now that we have
# framers
self._client = client()
# Remember the underlying transport
self._transport = None
# And initialize the receive buffer and read paused state
self._recv_buf = bytearray()
self._recv_paused = False
def connection_made(self, transport):
"""
Called by the underlying transport when a connection is made.
:param transport: The transport representing the connection.
"""
# Save the underlying transport
self._transport = transport
# Call connection_made() on the client protocol, passing
# ourself as the transport
self._client.connection_made(self)
def connection_lost(self, exc):
"""
Called by the underlying transport when a connection is lost.
:param exc: Either an exception object or ``None``. If the
latter, indicates an EOF was received, or that the
connection was aborted or closed by this side of
the connection.
"""
# Call connection_lost() on the client protocol
self._client.connection_lost(exc)
def pause_writing(self):
"""
Called by the underlying transport when the buffer goes over
the high-water mark.
"""
# Call pause_writing() on the client protocol
self._client.pause_writing()
def resume_writing(self):
"""
Called by the underlying transport when the buffer drains
below the low-water mark.
"""
# Call resume_writing() on the client protocol
self._client.resume_writing()
def data_received(self, data):
"""
Called by the underlying transport when data is received.
:param data: The data received on the connection.
"""
# First, add the data to the receive buffer
self._recv_buf += data
# Now, pass all frames we can find to the client protocol
while self._recv_buf and not self._recv_paused:
try:
# Extract one frame
frame = self._recv_framer.to_frame(self._recv_buf,
self._recv_state)
except exc.NoFrames:
# There's data in the buffer, but no complete frames
break
# Now call the client protocol's frame_received() method
self._client.frame_received(frame)
def eof_received(self):
"""
Called by the underlying transport when the other end signals
it won't send any more data.
:returns: A ``False`` value (including ``None``, the default
return value) to cause the transport to close
itself, and ``True`` to leave the connection
half-open.
"""
# Call eof_received() on the client protocol
return self._client.eof_received()
def close(self):
"""
Called by the client protocol to close the connection. If the
transport has a buffer for outgoing data, buffered data will
be flushed asynchronously. No more data will be received.
After all buffered data is flushed, the protocol's
``connection_lost()`` method will be called with ``None`` as
its argument.
"""
# Call close() on the transport
self._transport.close()
def get_extra_info(self, name, default=None):
"""
Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data.
"""
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default)
def pause_reading(self):
"""
Called by the client protocol to pause the receiving end of
the transport. No data will be passed to the protocol's
``frame_received()`` method until ``resume_reading()`` is
called.
"""
# Remember that reading is paused
self._recv_paused = True
# Call pause_reading() on the transport
self._transport.pause_reading()
def resume_reading(self):
"""
Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading.
"""
# Clear the read pause status
self._recv_paused = False
# Call resume_reading() on the transport
self._transport.resume_reading()
# If there's data in the receive buffer, pass it on to the
# client protocol
if self._recv_buf:
self.data_received(b'')
def abort(self):
"""
Called by the client protocol to close the transport
immediately, without waiting for pending operations to
complete. Buffered data will be lost. No more data will be
received. The protocol's ``connection_lost()`` method will
eventually be called with ``None`` as its argument.
"""
# Call abort() on the transport
self._transport.abort()
def can_write_eof(self):
"""
Called by the client protocol to determine if the transport
supports half-closed operations through the ``write_eof()``
method.
:returns: A ``True`` value if ``write_eof()`` is supported,
``False`` otherwise.
"""
# Call can_write_eof() on the transport
return self._transport.can_write_eof()
def get_write_buffer_size(self):
"""
Called by the client protocol to return the current size of
the output buffer used by the transport.
:returns: The current size of the output buffer used by the
transport.
"""
# Call get_write_buffer_size() on the transport
return self._transport.get_write_buffer_size()
def set_write_buffer_limits(self, high=None, low=None):
"""
Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``.
"""
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low)
def write_eof(self):
"""
Called by the client protocol to close the write end of the
transport after flushing buffered data. Data may still be
received. This method may raise ``NotImplementedError`` if
the transport (e.g., SSL) doesn't support half-closed
connections.
"""
# Call write_eof() on the transport
self._transport.write_eof()
def send_frame(self, frame):
"""
Called by the client protocol to send a frame to the remote
peer. This method does not block; it buffers the data and
arranges for it to be sent out asynchronously.
:param frame: The frame to send to the peer. Must be in the
format expected by the currently active send
framer.
"""
# Convert the frame to bytes and write them to the connection
data = self._send_framer.to_bytes(frame, self._send_state)
self._transport.write(data)
def push_framer(self, *args, **kwargs):
"""
Called by the client protocol to temporarily switch to a new
send framer, receive framer, or both. Can be called multiple
times. Each call to ``push_framer()`` must be paired with a
call to ``pop_framer()``, which restores to the previously set
framer.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Append the element to the framer stack
self._framers.append(elem)
def pop_framer(self):
"""
Called by the client protocol to revert to the set of framers
in use prior to the corresponding ``push_framer()`` call.
Raises an ``IndexError`` if the framer stack cannot be popped.
"""
# If the framer stack has only one element, raise an
# IndexError
if len(self._framers) <= 1:
raise IndexError('pop from empty stack')
# Pop an element off
self._framers.pop()
def set_framer(self, *args, **kwargs):
"""
Called by the client protocol to replace the current send
framer, receive framer, or both. This does not alter the
stack maintained by ``push_framer()`` and ``pop_framer()``; if
this method is called after ``push_framer()``, then
``pop_framer()`` is called, the framers in force at the time
``push_framer()`` was called will be restored.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Now, replace the current top of the framer stack
self._framers[-1] = elem
@property
def _send_framer(self):
"""
Retrieve the current send framer.
"""
return self._framers[-1].send
@property
def _send_state(self):
"""
Retrieve the current send framer state.
"""
return self._framers[-1].send_state
@property
def _recv_framer(self):
"""
Retrieve the current receive framer.
"""
return self._framers[-1].recv
@property
def _recv_state(self):
"""
Retrieve the current receive framer state.
"""
return self._framers[-1].recv_state
|
klmitch/framer | framer/transport.py | FramerAdaptor.connection_made | python | def connection_made(self, transport):
# Save the underlying transport
self._transport = transport
# Call connection_made() on the client protocol, passing
# ourself as the transport
self._client.connection_made(self) | Called by the underlying transport when a connection is made.
:param transport: The transport representing the connection. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L212-L224 | null | class FramerAdaptor(object):
"""
The Framer transport adaptor class. Instances of this
class--initialized with an appropriate ``FramedProtocol``
subclass, as well as send and receive framers--should be returned
by the factory passed to the ``create_connection()`` or
``create_server()`` loop methods.
"""
# Handlers for obtaining extra data from this transport using
# get_extra_info()
_handlers = {
'send_framer': lambda p: p._send_framer,
'send_state': lambda p: p._send_state,
'recv_framer': lambda p: p._recv_framer,
'recv_state': lambda p: p._recv_state,
'recv_buf': lambda p: six.binary_type(p._recv_buf),
'recv_paused': lambda p: p._recv_paused,
'client_protocol': lambda p: p._client,
'transport': lambda p: p._transport,
}
@classmethod
def factory(cls, client, *args, **kwargs):
"""
Generates and returns a callable suitable for passing as the
``protocol_factory`` parameter of the ``create_connection()``
or ``create_server()`` loop methods. This class method
performs some sanity checks on the arguments, and is preferred
over using a manually constructed ``lambda``.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# Some basic sanity checks
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# And a final, basic sanity check on the argument structure
if not args and ('send' not in kwargs or 'recv' not in kwargs):
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
return lambda: cls(client, *args, **kwargs)
def __init__(self, client, *args, **kwargs):
"""
Initialize a ``FramerProtocol`` instance.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# A basic sanity check
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Initialize the framer stack for _interpret_framer()
self._framers = [FramerElement(None, None, None, None)]
# Interpret the framer arguments
elem = self._interpret_framer(args, kwargs)
if not elem.send or not elem.recv:
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
# Set the framers
self._framers = [elem]
# Instantiate and save the client protocol, now that we have
# framers
self._client = client()
# Remember the underlying transport
self._transport = None
# And initialize the receive buffer and read paused state
self._recv_buf = bytearray()
self._recv_paused = False
def _interpret_framer(self, args, kwargs):
"""
Interprets positional and keyword arguments related to
framers.
:param args: A tuple of positional arguments. The first such
argument will be interpreted as a framer object,
and the second will be interpreted as a framer
state.
:param kwargs: A dictionary of keyword arguments. The
``send`` and ``recv`` keyword arguments are
interpreted as send and receive framers,
respectively, and the ``send_state`` and
``recv_state`` keyword arguments are
interpreted as states for those framers.
:returns: An instance of ``FramerElement``, which may be
pushed onto the framer stack.
"""
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# Start with the current send and receive framers
send = self._send_framer
recv = self._recv_framer
send_state = self._send_state
recv_state = self._recv_state
# Now, is it positional style?
if args:
send = args[0]
recv = args[0]
# Do we have a state?
if len(args) > 1:
send_state = args[1]
recv_state = args[1]
else:
# Allocate one
state = framers.FramerState()
# Initialize it
send.initialize_state(state)
send_state = state
recv_state = state
else:
# OK, it's keyword style; do we have a send framer?
if 'send' in kwargs:
send = kwargs['send']
# Do we have a send state?
if 'send_state' in kwargs:
send_state = kwargs['send_state']
else:
# Allocate one and initialize it
send_state = framers.FramerState()
send.initialize_state(send_state)
# How about a receive framer?
if 'recv' in kwargs:
recv = kwargs['recv']
# Do we have a recv state?
if 'recv_state' in kwargs:
recv_state = kwargs['recv_state']
else:
# Allocate one and initialize it
recv_state = framers.FramerState()
recv.initialize_state(recv_state)
# Create and return a FramerElement
return FramerElement(send, recv, send_state, recv_state)
def connection_lost(self, exc):
"""
Called by the underlying transport when a connection is lost.
:param exc: Either an exception object or ``None``. If the
latter, indicates an EOF was received, or that the
connection was aborted or closed by this side of
the connection.
"""
# Call connection_lost() on the client protocol
self._client.connection_lost(exc)
def pause_writing(self):
"""
Called by the underlying transport when the buffer goes over
the high-water mark.
"""
# Call pause_writing() on the client protocol
self._client.pause_writing()
def resume_writing(self):
"""
Called by the underlying transport when the buffer drains
below the low-water mark.
"""
# Call resume_writing() on the client protocol
self._client.resume_writing()
def data_received(self, data):
"""
Called by the underlying transport when data is received.
:param data: The data received on the connection.
"""
# First, add the data to the receive buffer
self._recv_buf += data
# Now, pass all frames we can find to the client protocol
while self._recv_buf and not self._recv_paused:
try:
# Extract one frame
frame = self._recv_framer.to_frame(self._recv_buf,
self._recv_state)
except exc.NoFrames:
# There's data in the buffer, but no complete frames
break
# Now call the client protocol's frame_received() method
self._client.frame_received(frame)
def eof_received(self):
"""
Called by the underlying transport when the other end signals
it won't send any more data.
:returns: A ``False`` value (including ``None``, the default
return value) to cause the transport to close
itself, and ``True`` to leave the connection
half-open.
"""
# Call eof_received() on the client protocol
return self._client.eof_received()
def close(self):
"""
Called by the client protocol to close the connection. If the
transport has a buffer for outgoing data, buffered data will
be flushed asynchronously. No more data will be received.
After all buffered data is flushed, the protocol's
``connection_lost()`` method will be called with ``None`` as
its argument.
"""
# Call close() on the transport
self._transport.close()
def get_extra_info(self, name, default=None):
"""
Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data.
"""
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default)
def pause_reading(self):
"""
Called by the client protocol to pause the receiving end of
the transport. No data will be passed to the protocol's
``frame_received()`` method until ``resume_reading()`` is
called.
"""
# Remember that reading is paused
self._recv_paused = True
# Call pause_reading() on the transport
self._transport.pause_reading()
def resume_reading(self):
"""
Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading.
"""
# Clear the read pause status
self._recv_paused = False
# Call resume_reading() on the transport
self._transport.resume_reading()
# If there's data in the receive buffer, pass it on to the
# client protocol
if self._recv_buf:
self.data_received(b'')
def abort(self):
"""
Called by the client protocol to close the transport
immediately, without waiting for pending operations to
complete. Buffered data will be lost. No more data will be
received. The protocol's ``connection_lost()`` method will
eventually be called with ``None`` as its argument.
"""
# Call abort() on the transport
self._transport.abort()
def can_write_eof(self):
"""
Called by the client protocol to determine if the transport
supports half-closed operations through the ``write_eof()``
method.
:returns: A ``True`` value if ``write_eof()`` is supported,
``False`` otherwise.
"""
# Call can_write_eof() on the transport
return self._transport.can_write_eof()
def get_write_buffer_size(self):
"""
Called by the client protocol to return the current size of
the output buffer used by the transport.
:returns: The current size of the output buffer used by the
transport.
"""
# Call get_write_buffer_size() on the transport
return self._transport.get_write_buffer_size()
def set_write_buffer_limits(self, high=None, low=None):
"""
Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``.
"""
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low)
def write_eof(self):
"""
Called by the client protocol to close the write end of the
transport after flushing buffered data. Data may still be
received. This method may raise ``NotImplementedError`` if
the transport (e.g., SSL) doesn't support half-closed
connections.
"""
# Call write_eof() on the transport
self._transport.write_eof()
def send_frame(self, frame):
"""
Called by the client protocol to send a frame to the remote
peer. This method does not block; it buffers the data and
arranges for it to be sent out asynchronously.
:param frame: The frame to send to the peer. Must be in the
format expected by the currently active send
framer.
"""
# Convert the frame to bytes and write them to the connection
data = self._send_framer.to_bytes(frame, self._send_state)
self._transport.write(data)
def push_framer(self, *args, **kwargs):
"""
Called by the client protocol to temporarily switch to a new
send framer, receive framer, or both. Can be called multiple
times. Each call to ``push_framer()`` must be paired with a
call to ``pop_framer()``, which restores to the previously set
framer.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Append the element to the framer stack
self._framers.append(elem)
def pop_framer(self):
"""
Called by the client protocol to revert to the set of framers
in use prior to the corresponding ``push_framer()`` call.
Raises an ``IndexError`` if the framer stack cannot be popped.
"""
# If the framer stack has only one element, raise an
# IndexError
if len(self._framers) <= 1:
raise IndexError('pop from empty stack')
# Pop an element off
self._framers.pop()
def set_framer(self, *args, **kwargs):
"""
Called by the client protocol to replace the current send
framer, receive framer, or both. This does not alter the
stack maintained by ``push_framer()`` and ``pop_framer()``; if
this method is called after ``push_framer()``, then
``pop_framer()`` is called, the framers in force at the time
``push_framer()`` was called will be restored.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Now, replace the current top of the framer stack
self._framers[-1] = elem
@property
def _send_framer(self):
"""
Retrieve the current send framer.
"""
return self._framers[-1].send
@property
def _send_state(self):
"""
Retrieve the current send framer state.
"""
return self._framers[-1].send_state
@property
def _recv_framer(self):
"""
Retrieve the current receive framer.
"""
return self._framers[-1].recv
@property
def _recv_state(self):
"""
Retrieve the current receive framer state.
"""
return self._framers[-1].recv_state
|
klmitch/framer | framer/transport.py | FramerAdaptor.data_received | python | def data_received(self, data):
# First, add the data to the receive buffer
self._recv_buf += data
# Now, pass all frames we can find to the client protocol
while self._recv_buf and not self._recv_paused:
try:
# Extract one frame
frame = self._recv_framer.to_frame(self._recv_buf,
self._recv_state)
except exc.NoFrames:
# There's data in the buffer, but no complete frames
break
# Now call the client protocol's frame_received() method
self._client.frame_received(frame) | Called by the underlying transport when data is received.
:param data: The data received on the connection. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L257-L278 | null | class FramerAdaptor(object):
"""
The Framer transport adaptor class. Instances of this
class--initialized with an appropriate ``FramedProtocol``
subclass, as well as send and receive framers--should be returned
by the factory passed to the ``create_connection()`` or
``create_server()`` loop methods.
"""
# Handlers for obtaining extra data from this transport using
# get_extra_info()
_handlers = {
'send_framer': lambda p: p._send_framer,
'send_state': lambda p: p._send_state,
'recv_framer': lambda p: p._recv_framer,
'recv_state': lambda p: p._recv_state,
'recv_buf': lambda p: six.binary_type(p._recv_buf),
'recv_paused': lambda p: p._recv_paused,
'client_protocol': lambda p: p._client,
'transport': lambda p: p._transport,
}
@classmethod
def factory(cls, client, *args, **kwargs):
"""
Generates and returns a callable suitable for passing as the
``protocol_factory`` parameter of the ``create_connection()``
or ``create_server()`` loop methods. This class method
performs some sanity checks on the arguments, and is preferred
over using a manually constructed ``lambda``.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# Some basic sanity checks
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# And a final, basic sanity check on the argument structure
if not args and ('send' not in kwargs or 'recv' not in kwargs):
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
return lambda: cls(client, *args, **kwargs)
def __init__(self, client, *args, **kwargs):
"""
Initialize a ``FramerProtocol`` instance.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# A basic sanity check
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Initialize the framer stack for _interpret_framer()
self._framers = [FramerElement(None, None, None, None)]
# Interpret the framer arguments
elem = self._interpret_framer(args, kwargs)
if not elem.send or not elem.recv:
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
# Set the framers
self._framers = [elem]
# Instantiate and save the client protocol, now that we have
# framers
self._client = client()
# Remember the underlying transport
self._transport = None
# And initialize the receive buffer and read paused state
self._recv_buf = bytearray()
self._recv_paused = False
def _interpret_framer(self, args, kwargs):
"""
Interprets positional and keyword arguments related to
framers.
:param args: A tuple of positional arguments. The first such
argument will be interpreted as a framer object,
and the second will be interpreted as a framer
state.
:param kwargs: A dictionary of keyword arguments. The
``send`` and ``recv`` keyword arguments are
interpreted as send and receive framers,
respectively, and the ``send_state`` and
``recv_state`` keyword arguments are
interpreted as states for those framers.
:returns: An instance of ``FramerElement``, which may be
pushed onto the framer stack.
"""
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# Start with the current send and receive framers
send = self._send_framer
recv = self._recv_framer
send_state = self._send_state
recv_state = self._recv_state
# Now, is it positional style?
if args:
send = args[0]
recv = args[0]
# Do we have a state?
if len(args) > 1:
send_state = args[1]
recv_state = args[1]
else:
# Allocate one
state = framers.FramerState()
# Initialize it
send.initialize_state(state)
send_state = state
recv_state = state
else:
# OK, it's keyword style; do we have a send framer?
if 'send' in kwargs:
send = kwargs['send']
# Do we have a send state?
if 'send_state' in kwargs:
send_state = kwargs['send_state']
else:
# Allocate one and initialize it
send_state = framers.FramerState()
send.initialize_state(send_state)
# How about a receive framer?
if 'recv' in kwargs:
recv = kwargs['recv']
# Do we have a recv state?
if 'recv_state' in kwargs:
recv_state = kwargs['recv_state']
else:
# Allocate one and initialize it
recv_state = framers.FramerState()
recv.initialize_state(recv_state)
# Create and return a FramerElement
return FramerElement(send, recv, send_state, recv_state)
def connection_made(self, transport):
"""
Called by the underlying transport when a connection is made.
:param transport: The transport representing the connection.
"""
# Save the underlying transport
self._transport = transport
# Call connection_made() on the client protocol, passing
# ourself as the transport
self._client.connection_made(self)
def connection_lost(self, exc):
"""
Called by the underlying transport when a connection is lost.
:param exc: Either an exception object or ``None``. If the
latter, indicates an EOF was received, or that the
connection was aborted or closed by this side of
the connection.
"""
# Call connection_lost() on the client protocol
self._client.connection_lost(exc)
def pause_writing(self):
"""
Called by the underlying transport when the buffer goes over
the high-water mark.
"""
# Call pause_writing() on the client protocol
self._client.pause_writing()
def resume_writing(self):
"""
Called by the underlying transport when the buffer drains
below the low-water mark.
"""
# Call resume_writing() on the client protocol
self._client.resume_writing()
def eof_received(self):
"""
Called by the underlying transport when the other end signals
it won't send any more data.
:returns: A ``False`` value (including ``None``, the default
return value) to cause the transport to close
itself, and ``True`` to leave the connection
half-open.
"""
# Call eof_received() on the client protocol
return self._client.eof_received()
def close(self):
"""
Called by the client protocol to close the connection. If the
transport has a buffer for outgoing data, buffered data will
be flushed asynchronously. No more data will be received.
After all buffered data is flushed, the protocol's
``connection_lost()`` method will be called with ``None`` as
its argument.
"""
# Call close() on the transport
self._transport.close()
def get_extra_info(self, name, default=None):
"""
Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data.
"""
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default)
def pause_reading(self):
"""
Called by the client protocol to pause the receiving end of
the transport. No data will be passed to the protocol's
``frame_received()`` method until ``resume_reading()`` is
called.
"""
# Remember that reading is paused
self._recv_paused = True
# Call pause_reading() on the transport
self._transport.pause_reading()
def resume_reading(self):
"""
Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading.
"""
# Clear the read pause status
self._recv_paused = False
# Call resume_reading() on the transport
self._transport.resume_reading()
# If there's data in the receive buffer, pass it on to the
# client protocol
if self._recv_buf:
self.data_received(b'')
def abort(self):
"""
Called by the client protocol to close the transport
immediately, without waiting for pending operations to
complete. Buffered data will be lost. No more data will be
received. The protocol's ``connection_lost()`` method will
eventually be called with ``None`` as its argument.
"""
# Call abort() on the transport
self._transport.abort()
def can_write_eof(self):
"""
Called by the client protocol to determine if the transport
supports half-closed operations through the ``write_eof()``
method.
:returns: A ``True`` value if ``write_eof()`` is supported,
``False`` otherwise.
"""
# Call can_write_eof() on the transport
return self._transport.can_write_eof()
def get_write_buffer_size(self):
"""
Called by the client protocol to return the current size of
the output buffer used by the transport.
:returns: The current size of the output buffer used by the
transport.
"""
# Call get_write_buffer_size() on the transport
return self._transport.get_write_buffer_size()
def set_write_buffer_limits(self, high=None, low=None):
"""
Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``.
"""
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low)
def write_eof(self):
"""
Called by the client protocol to close the write end of the
transport after flushing buffered data. Data may still be
received. This method may raise ``NotImplementedError`` if
the transport (e.g., SSL) doesn't support half-closed
connections.
"""
# Call write_eof() on the transport
self._transport.write_eof()
def send_frame(self, frame):
"""
Called by the client protocol to send a frame to the remote
peer. This method does not block; it buffers the data and
arranges for it to be sent out asynchronously.
:param frame: The frame to send to the peer. Must be in the
format expected by the currently active send
framer.
"""
# Convert the frame to bytes and write them to the connection
data = self._send_framer.to_bytes(frame, self._send_state)
self._transport.write(data)
def push_framer(self, *args, **kwargs):
"""
Called by the client protocol to temporarily switch to a new
send framer, receive framer, or both. Can be called multiple
times. Each call to ``push_framer()`` must be paired with a
call to ``pop_framer()``, which restores to the previously set
framer.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Append the element to the framer stack
self._framers.append(elem)
def pop_framer(self):
"""
Called by the client protocol to revert to the set of framers
in use prior to the corresponding ``push_framer()`` call.
Raises an ``IndexError`` if the framer stack cannot be popped.
"""
# If the framer stack has only one element, raise an
# IndexError
if len(self._framers) <= 1:
raise IndexError('pop from empty stack')
# Pop an element off
self._framers.pop()
def set_framer(self, *args, **kwargs):
"""
Called by the client protocol to replace the current send
framer, receive framer, or both. This does not alter the
stack maintained by ``push_framer()`` and ``pop_framer()``; if
this method is called after ``push_framer()``, then
``pop_framer()`` is called, the framers in force at the time
``push_framer()`` was called will be restored.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Now, replace the current top of the framer stack
self._framers[-1] = elem
@property
def _send_framer(self):
"""
Retrieve the current send framer.
"""
return self._framers[-1].send
@property
def _send_state(self):
"""
Retrieve the current send framer state.
"""
return self._framers[-1].send_state
@property
def _recv_framer(self):
"""
Retrieve the current receive framer.
"""
return self._framers[-1].recv
@property
def _recv_state(self):
"""
Retrieve the current receive framer state.
"""
return self._framers[-1].recv_state
|
klmitch/framer | framer/transport.py | FramerAdaptor.get_extra_info | python | def get_extra_info(self, name, default=None):
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default) | Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L307-L342 | null | class FramerAdaptor(object):
"""
The Framer transport adaptor class. Instances of this
class--initialized with an appropriate ``FramedProtocol``
subclass, as well as send and receive framers--should be returned
by the factory passed to the ``create_connection()`` or
``create_server()`` loop methods.
"""
# Handlers for obtaining extra data from this transport using
# get_extra_info()
_handlers = {
'send_framer': lambda p: p._send_framer,
'send_state': lambda p: p._send_state,
'recv_framer': lambda p: p._recv_framer,
'recv_state': lambda p: p._recv_state,
'recv_buf': lambda p: six.binary_type(p._recv_buf),
'recv_paused': lambda p: p._recv_paused,
'client_protocol': lambda p: p._client,
'transport': lambda p: p._transport,
}
@classmethod
def factory(cls, client, *args, **kwargs):
"""
Generates and returns a callable suitable for passing as the
``protocol_factory`` parameter of the ``create_connection()``
or ``create_server()`` loop methods. This class method
performs some sanity checks on the arguments, and is preferred
over using a manually constructed ``lambda``.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# Some basic sanity checks
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# And a final, basic sanity check on the argument structure
if not args and ('send' not in kwargs or 'recv' not in kwargs):
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
return lambda: cls(client, *args, **kwargs)
def __init__(self, client, *args, **kwargs):
"""
Initialize a ``FramerProtocol`` instance.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# A basic sanity check
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Initialize the framer stack for _interpret_framer()
self._framers = [FramerElement(None, None, None, None)]
# Interpret the framer arguments
elem = self._interpret_framer(args, kwargs)
if not elem.send or not elem.recv:
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
# Set the framers
self._framers = [elem]
# Instantiate and save the client protocol, now that we have
# framers
self._client = client()
# Remember the underlying transport
self._transport = None
# And initialize the receive buffer and read paused state
self._recv_buf = bytearray()
self._recv_paused = False
def _interpret_framer(self, args, kwargs):
"""
Interprets positional and keyword arguments related to
framers.
:param args: A tuple of positional arguments. The first such
argument will be interpreted as a framer object,
and the second will be interpreted as a framer
state.
:param kwargs: A dictionary of keyword arguments. The
``send`` and ``recv`` keyword arguments are
interpreted as send and receive framers,
respectively, and the ``send_state`` and
``recv_state`` keyword arguments are
interpreted as states for those framers.
:returns: An instance of ``FramerElement``, which may be
pushed onto the framer stack.
"""
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# Start with the current send and receive framers
send = self._send_framer
recv = self._recv_framer
send_state = self._send_state
recv_state = self._recv_state
# Now, is it positional style?
if args:
send = args[0]
recv = args[0]
# Do we have a state?
if len(args) > 1:
send_state = args[1]
recv_state = args[1]
else:
# Allocate one
state = framers.FramerState()
# Initialize it
send.initialize_state(state)
send_state = state
recv_state = state
else:
# OK, it's keyword style; do we have a send framer?
if 'send' in kwargs:
send = kwargs['send']
# Do we have a send state?
if 'send_state' in kwargs:
send_state = kwargs['send_state']
else:
# Allocate one and initialize it
send_state = framers.FramerState()
send.initialize_state(send_state)
# How about a receive framer?
if 'recv' in kwargs:
recv = kwargs['recv']
# Do we have a recv state?
if 'recv_state' in kwargs:
recv_state = kwargs['recv_state']
else:
# Allocate one and initialize it
recv_state = framers.FramerState()
recv.initialize_state(recv_state)
# Create and return a FramerElement
return FramerElement(send, recv, send_state, recv_state)
def connection_made(self, transport):
"""
Called by the underlying transport when a connection is made.
:param transport: The transport representing the connection.
"""
# Save the underlying transport
self._transport = transport
# Call connection_made() on the client protocol, passing
# ourself as the transport
self._client.connection_made(self)
def connection_lost(self, exc):
"""
Called by the underlying transport when a connection is lost.
:param exc: Either an exception object or ``None``. If the
latter, indicates an EOF was received, or that the
connection was aborted or closed by this side of
the connection.
"""
# Call connection_lost() on the client protocol
self._client.connection_lost(exc)
def pause_writing(self):
"""
Called by the underlying transport when the buffer goes over
the high-water mark.
"""
# Call pause_writing() on the client protocol
self._client.pause_writing()
def resume_writing(self):
"""
Called by the underlying transport when the buffer drains
below the low-water mark.
"""
# Call resume_writing() on the client protocol
self._client.resume_writing()
def data_received(self, data):
"""
Called by the underlying transport when data is received.
:param data: The data received on the connection.
"""
# First, add the data to the receive buffer
self._recv_buf += data
# Now, pass all frames we can find to the client protocol
while self._recv_buf and not self._recv_paused:
try:
# Extract one frame
frame = self._recv_framer.to_frame(self._recv_buf,
self._recv_state)
except exc.NoFrames:
# There's data in the buffer, but no complete frames
break
# Now call the client protocol's frame_received() method
self._client.frame_received(frame)
def eof_received(self):
"""
Called by the underlying transport when the other end signals
it won't send any more data.
:returns: A ``False`` value (including ``None``, the default
return value) to cause the transport to close
itself, and ``True`` to leave the connection
half-open.
"""
# Call eof_received() on the client protocol
return self._client.eof_received()
def close(self):
"""
Called by the client protocol to close the connection. If the
transport has a buffer for outgoing data, buffered data will
be flushed asynchronously. No more data will be received.
After all buffered data is flushed, the protocol's
``connection_lost()`` method will be called with ``None`` as
its argument.
"""
# Call close() on the transport
self._transport.close()
def pause_reading(self):
"""
Called by the client protocol to pause the receiving end of
the transport. No data will be passed to the protocol's
``frame_received()`` method until ``resume_reading()`` is
called.
"""
# Remember that reading is paused
self._recv_paused = True
# Call pause_reading() on the transport
self._transport.pause_reading()
def resume_reading(self):
"""
Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading.
"""
# Clear the read pause status
self._recv_paused = False
# Call resume_reading() on the transport
self._transport.resume_reading()
# If there's data in the receive buffer, pass it on to the
# client protocol
if self._recv_buf:
self.data_received(b'')
def abort(self):
"""
Called by the client protocol to close the transport
immediately, without waiting for pending operations to
complete. Buffered data will be lost. No more data will be
received. The protocol's ``connection_lost()`` method will
eventually be called with ``None`` as its argument.
"""
# Call abort() on the transport
self._transport.abort()
def can_write_eof(self):
"""
Called by the client protocol to determine if the transport
supports half-closed operations through the ``write_eof()``
method.
:returns: A ``True`` value if ``write_eof()`` is supported,
``False`` otherwise.
"""
# Call can_write_eof() on the transport
return self._transport.can_write_eof()
def get_write_buffer_size(self):
"""
Called by the client protocol to return the current size of
the output buffer used by the transport.
:returns: The current size of the output buffer used by the
transport.
"""
# Call get_write_buffer_size() on the transport
return self._transport.get_write_buffer_size()
def set_write_buffer_limits(self, high=None, low=None):
"""
Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``.
"""
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low)
def write_eof(self):
"""
Called by the client protocol to close the write end of the
transport after flushing buffered data. Data may still be
received. This method may raise ``NotImplementedError`` if
the transport (e.g., SSL) doesn't support half-closed
connections.
"""
# Call write_eof() on the transport
self._transport.write_eof()
def send_frame(self, frame):
"""
Called by the client protocol to send a frame to the remote
peer. This method does not block; it buffers the data and
arranges for it to be sent out asynchronously.
:param frame: The frame to send to the peer. Must be in the
format expected by the currently active send
framer.
"""
# Convert the frame to bytes and write them to the connection
data = self._send_framer.to_bytes(frame, self._send_state)
self._transport.write(data)
def push_framer(self, *args, **kwargs):
"""
Called by the client protocol to temporarily switch to a new
send framer, receive framer, or both. Can be called multiple
times. Each call to ``push_framer()`` must be paired with a
call to ``pop_framer()``, which restores to the previously set
framer.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Append the element to the framer stack
self._framers.append(elem)
def pop_framer(self):
"""
Called by the client protocol to revert to the set of framers
in use prior to the corresponding ``push_framer()`` call.
Raises an ``IndexError`` if the framer stack cannot be popped.
"""
# If the framer stack has only one element, raise an
# IndexError
if len(self._framers) <= 1:
raise IndexError('pop from empty stack')
# Pop an element off
self._framers.pop()
def set_framer(self, *args, **kwargs):
"""
Called by the client protocol to replace the current send
framer, receive framer, or both. This does not alter the
stack maintained by ``push_framer()`` and ``pop_framer()``; if
this method is called after ``push_framer()``, then
``pop_framer()`` is called, the framers in force at the time
``push_framer()`` was called will be restored.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Now, replace the current top of the framer stack
self._framers[-1] = elem
@property
def _send_framer(self):
"""
Retrieve the current send framer.
"""
return self._framers[-1].send
@property
def _send_state(self):
"""
Retrieve the current send framer state.
"""
return self._framers[-1].send_state
@property
def _recv_framer(self):
"""
Retrieve the current receive framer.
"""
return self._framers[-1].recv
@property
def _recv_state(self):
"""
Retrieve the current receive framer state.
"""
return self._framers[-1].recv_state
|
klmitch/framer | framer/transport.py | FramerAdaptor.resume_reading | python | def resume_reading(self):
# Clear the read pause status
self._recv_paused = False
# Call resume_reading() on the transport
self._transport.resume_reading()
# If there's data in the receive buffer, pass it on to the
# client protocol
if self._recv_buf:
self.data_received(b'') | Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L358-L374 | [
"def data_received(self, data):\n \"\"\"\n Called by the underlying transport when data is received.\n\n :param data: The data received on the connection.\n \"\"\"\n\n # First, add the data to the receive buffer\n self._recv_buf += data\n\n # Now, pass all frames we can find to the client protocol\n while self._recv_buf and not self._recv_paused:\n try:\n # Extract one frame\n frame = self._recv_framer.to_frame(self._recv_buf,\n self._recv_state)\n except exc.NoFrames:\n # There's data in the buffer, but no complete frames\n break\n\n # Now call the client protocol's frame_received() method\n self._client.frame_received(frame)\n"
] | class FramerAdaptor(object):
"""
The Framer transport adaptor class. Instances of this
class--initialized with an appropriate ``FramedProtocol``
subclass, as well as send and receive framers--should be returned
by the factory passed to the ``create_connection()`` or
``create_server()`` loop methods.
"""
# Handlers for obtaining extra data from this transport using
# get_extra_info()
_handlers = {
'send_framer': lambda p: p._send_framer,
'send_state': lambda p: p._send_state,
'recv_framer': lambda p: p._recv_framer,
'recv_state': lambda p: p._recv_state,
'recv_buf': lambda p: six.binary_type(p._recv_buf),
'recv_paused': lambda p: p._recv_paused,
'client_protocol': lambda p: p._client,
'transport': lambda p: p._transport,
}
@classmethod
def factory(cls, client, *args, **kwargs):
"""
Generates and returns a callable suitable for passing as the
``protocol_factory`` parameter of the ``create_connection()``
or ``create_server()`` loop methods. This class method
performs some sanity checks on the arguments, and is preferred
over using a manually constructed ``lambda``.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# Some basic sanity checks
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# And a final, basic sanity check on the argument structure
if not args and ('send' not in kwargs or 'recv' not in kwargs):
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
return lambda: cls(client, *args, **kwargs)
def __init__(self, client, *args, **kwargs):
"""
Initialize a ``FramerProtocol`` instance.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# A basic sanity check
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Initialize the framer stack for _interpret_framer()
self._framers = [FramerElement(None, None, None, None)]
# Interpret the framer arguments
elem = self._interpret_framer(args, kwargs)
if not elem.send or not elem.recv:
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
# Set the framers
self._framers = [elem]
# Instantiate and save the client protocol, now that we have
# framers
self._client = client()
# Remember the underlying transport
self._transport = None
# And initialize the receive buffer and read paused state
self._recv_buf = bytearray()
self._recv_paused = False
def _interpret_framer(self, args, kwargs):
"""
Interprets positional and keyword arguments related to
framers.
:param args: A tuple of positional arguments. The first such
argument will be interpreted as a framer object,
and the second will be interpreted as a framer
state.
:param kwargs: A dictionary of keyword arguments. The
``send`` and ``recv`` keyword arguments are
interpreted as send and receive framers,
respectively, and the ``send_state`` and
``recv_state`` keyword arguments are
interpreted as states for those framers.
:returns: An instance of ``FramerElement``, which may be
pushed onto the framer stack.
"""
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# Start with the current send and receive framers
send = self._send_framer
recv = self._recv_framer
send_state = self._send_state
recv_state = self._recv_state
# Now, is it positional style?
if args:
send = args[0]
recv = args[0]
# Do we have a state?
if len(args) > 1:
send_state = args[1]
recv_state = args[1]
else:
# Allocate one
state = framers.FramerState()
# Initialize it
send.initialize_state(state)
send_state = state
recv_state = state
else:
# OK, it's keyword style; do we have a send framer?
if 'send' in kwargs:
send = kwargs['send']
# Do we have a send state?
if 'send_state' in kwargs:
send_state = kwargs['send_state']
else:
# Allocate one and initialize it
send_state = framers.FramerState()
send.initialize_state(send_state)
# How about a receive framer?
if 'recv' in kwargs:
recv = kwargs['recv']
# Do we have a recv state?
if 'recv_state' in kwargs:
recv_state = kwargs['recv_state']
else:
# Allocate one and initialize it
recv_state = framers.FramerState()
recv.initialize_state(recv_state)
# Create and return a FramerElement
return FramerElement(send, recv, send_state, recv_state)
def connection_made(self, transport):
"""
Called by the underlying transport when a connection is made.
:param transport: The transport representing the connection.
"""
# Save the underlying transport
self._transport = transport
# Call connection_made() on the client protocol, passing
# ourself as the transport
self._client.connection_made(self)
def connection_lost(self, exc):
"""
Called by the underlying transport when a connection is lost.
:param exc: Either an exception object or ``None``. If the
latter, indicates an EOF was received, or that the
connection was aborted or closed by this side of
the connection.
"""
# Call connection_lost() on the client protocol
self._client.connection_lost(exc)
def pause_writing(self):
"""
Called by the underlying transport when the buffer goes over
the high-water mark.
"""
# Call pause_writing() on the client protocol
self._client.pause_writing()
def resume_writing(self):
"""
Called by the underlying transport when the buffer drains
below the low-water mark.
"""
# Call resume_writing() on the client protocol
self._client.resume_writing()
def data_received(self, data):
"""
Called by the underlying transport when data is received.
:param data: The data received on the connection.
"""
# First, add the data to the receive buffer
self._recv_buf += data
# Now, pass all frames we can find to the client protocol
while self._recv_buf and not self._recv_paused:
try:
# Extract one frame
frame = self._recv_framer.to_frame(self._recv_buf,
self._recv_state)
except exc.NoFrames:
# There's data in the buffer, but no complete frames
break
# Now call the client protocol's frame_received() method
self._client.frame_received(frame)
def eof_received(self):
"""
Called by the underlying transport when the other end signals
it won't send any more data.
:returns: A ``False`` value (including ``None``, the default
return value) to cause the transport to close
itself, and ``True`` to leave the connection
half-open.
"""
# Call eof_received() on the client protocol
return self._client.eof_received()
def close(self):
"""
Called by the client protocol to close the connection. If the
transport has a buffer for outgoing data, buffered data will
be flushed asynchronously. No more data will be received.
After all buffered data is flushed, the protocol's
``connection_lost()`` method will be called with ``None`` as
its argument.
"""
# Call close() on the transport
self._transport.close()
def get_extra_info(self, name, default=None):
"""
Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data.
"""
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default)
def pause_reading(self):
"""
Called by the client protocol to pause the receiving end of
the transport. No data will be passed to the protocol's
``frame_received()`` method until ``resume_reading()`` is
called.
"""
# Remember that reading is paused
self._recv_paused = True
# Call pause_reading() on the transport
self._transport.pause_reading()
def abort(self):
"""
Called by the client protocol to close the transport
immediately, without waiting for pending operations to
complete. Buffered data will be lost. No more data will be
received. The protocol's ``connection_lost()`` method will
eventually be called with ``None`` as its argument.
"""
# Call abort() on the transport
self._transport.abort()
def can_write_eof(self):
"""
Called by the client protocol to determine if the transport
supports half-closed operations through the ``write_eof()``
method.
:returns: A ``True`` value if ``write_eof()`` is supported,
``False`` otherwise.
"""
# Call can_write_eof() on the transport
return self._transport.can_write_eof()
def get_write_buffer_size(self):
"""
Called by the client protocol to return the current size of
the output buffer used by the transport.
:returns: The current size of the output buffer used by the
transport.
"""
# Call get_write_buffer_size() on the transport
return self._transport.get_write_buffer_size()
def set_write_buffer_limits(self, high=None, low=None):
"""
Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``.
"""
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low)
def write_eof(self):
"""
Called by the client protocol to close the write end of the
transport after flushing buffered data. Data may still be
received. This method may raise ``NotImplementedError`` if
the transport (e.g., SSL) doesn't support half-closed
connections.
"""
# Call write_eof() on the transport
self._transport.write_eof()
def send_frame(self, frame):
"""
Called by the client protocol to send a frame to the remote
peer. This method does not block; it buffers the data and
arranges for it to be sent out asynchronously.
:param frame: The frame to send to the peer. Must be in the
format expected by the currently active send
framer.
"""
# Convert the frame to bytes and write them to the connection
data = self._send_framer.to_bytes(frame, self._send_state)
self._transport.write(data)
def push_framer(self, *args, **kwargs):
"""
Called by the client protocol to temporarily switch to a new
send framer, receive framer, or both. Can be called multiple
times. Each call to ``push_framer()`` must be paired with a
call to ``pop_framer()``, which restores to the previously set
framer.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Append the element to the framer stack
self._framers.append(elem)
def pop_framer(self):
"""
Called by the client protocol to revert to the set of framers
in use prior to the corresponding ``push_framer()`` call.
Raises an ``IndexError`` if the framer stack cannot be popped.
"""
# If the framer stack has only one element, raise an
# IndexError
if len(self._framers) <= 1:
raise IndexError('pop from empty stack')
# Pop an element off
self._framers.pop()
def set_framer(self, *args, **kwargs):
"""
Called by the client protocol to replace the current send
framer, receive framer, or both. This does not alter the
stack maintained by ``push_framer()`` and ``pop_framer()``; if
this method is called after ``push_framer()``, then
``pop_framer()`` is called, the framers in force at the time
``push_framer()`` was called will be restored.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Now, replace the current top of the framer stack
self._framers[-1] = elem
@property
def _send_framer(self):
"""
Retrieve the current send framer.
"""
return self._framers[-1].send
@property
def _send_state(self):
"""
Retrieve the current send framer state.
"""
return self._framers[-1].send_state
@property
def _recv_framer(self):
"""
Retrieve the current receive framer.
"""
return self._framers[-1].recv
@property
def _recv_state(self):
"""
Retrieve the current receive framer state.
"""
return self._framers[-1].recv_state
|
klmitch/framer | framer/transport.py | FramerAdaptor.set_write_buffer_limits | python | def set_write_buffer_limits(self, high=None, low=None):
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low) | Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L413-L433 | null | class FramerAdaptor(object):
"""
The Framer transport adaptor class. Instances of this
class--initialized with an appropriate ``FramedProtocol``
subclass, as well as send and receive framers--should be returned
by the factory passed to the ``create_connection()`` or
``create_server()`` loop methods.
"""
# Handlers for obtaining extra data from this transport using
# get_extra_info()
_handlers = {
'send_framer': lambda p: p._send_framer,
'send_state': lambda p: p._send_state,
'recv_framer': lambda p: p._recv_framer,
'recv_state': lambda p: p._recv_state,
'recv_buf': lambda p: six.binary_type(p._recv_buf),
'recv_paused': lambda p: p._recv_paused,
'client_protocol': lambda p: p._client,
'transport': lambda p: p._transport,
}
@classmethod
def factory(cls, client, *args, **kwargs):
"""
Generates and returns a callable suitable for passing as the
``protocol_factory`` parameter of the ``create_connection()``
or ``create_server()`` loop methods. This class method
performs some sanity checks on the arguments, and is preferred
over using a manually constructed ``lambda``.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# Some basic sanity checks
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# And a final, basic sanity check on the argument structure
if not args and ('send' not in kwargs or 'recv' not in kwargs):
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
return lambda: cls(client, *args, **kwargs)
def __init__(self, client, *args, **kwargs):
"""
Initialize a ``FramerProtocol`` instance.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# A basic sanity check
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Initialize the framer stack for _interpret_framer()
self._framers = [FramerElement(None, None, None, None)]
# Interpret the framer arguments
elem = self._interpret_framer(args, kwargs)
if not elem.send or not elem.recv:
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
# Set the framers
self._framers = [elem]
# Instantiate and save the client protocol, now that we have
# framers
self._client = client()
# Remember the underlying transport
self._transport = None
# And initialize the receive buffer and read paused state
self._recv_buf = bytearray()
self._recv_paused = False
def _interpret_framer(self, args, kwargs):
"""
Interprets positional and keyword arguments related to
framers.
:param args: A tuple of positional arguments. The first such
argument will be interpreted as a framer object,
and the second will be interpreted as a framer
state.
:param kwargs: A dictionary of keyword arguments. The
``send`` and ``recv`` keyword arguments are
interpreted as send and receive framers,
respectively, and the ``send_state`` and
``recv_state`` keyword arguments are
interpreted as states for those framers.
:returns: An instance of ``FramerElement``, which may be
pushed onto the framer stack.
"""
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# Start with the current send and receive framers
send = self._send_framer
recv = self._recv_framer
send_state = self._send_state
recv_state = self._recv_state
# Now, is it positional style?
if args:
send = args[0]
recv = args[0]
# Do we have a state?
if len(args) > 1:
send_state = args[1]
recv_state = args[1]
else:
# Allocate one
state = framers.FramerState()
# Initialize it
send.initialize_state(state)
send_state = state
recv_state = state
else:
# OK, it's keyword style; do we have a send framer?
if 'send' in kwargs:
send = kwargs['send']
# Do we have a send state?
if 'send_state' in kwargs:
send_state = kwargs['send_state']
else:
# Allocate one and initialize it
send_state = framers.FramerState()
send.initialize_state(send_state)
# How about a receive framer?
if 'recv' in kwargs:
recv = kwargs['recv']
# Do we have a recv state?
if 'recv_state' in kwargs:
recv_state = kwargs['recv_state']
else:
# Allocate one and initialize it
recv_state = framers.FramerState()
recv.initialize_state(recv_state)
# Create and return a FramerElement
return FramerElement(send, recv, send_state, recv_state)
def connection_made(self, transport):
"""
Called by the underlying transport when a connection is made.
:param transport: The transport representing the connection.
"""
# Save the underlying transport
self._transport = transport
# Call connection_made() on the client protocol, passing
# ourself as the transport
self._client.connection_made(self)
def connection_lost(self, exc):
"""
Called by the underlying transport when a connection is lost.
:param exc: Either an exception object or ``None``. If the
latter, indicates an EOF was received, or that the
connection was aborted or closed by this side of
the connection.
"""
# Call connection_lost() on the client protocol
self._client.connection_lost(exc)
def pause_writing(self):
"""
Called by the underlying transport when the buffer goes over
the high-water mark.
"""
# Call pause_writing() on the client protocol
self._client.pause_writing()
def resume_writing(self):
"""
Called by the underlying transport when the buffer drains
below the low-water mark.
"""
# Call resume_writing() on the client protocol
self._client.resume_writing()
def data_received(self, data):
"""
Called by the underlying transport when data is received.
:param data: The data received on the connection.
"""
# First, add the data to the receive buffer
self._recv_buf += data
# Now, pass all frames we can find to the client protocol
while self._recv_buf and not self._recv_paused:
try:
# Extract one frame
frame = self._recv_framer.to_frame(self._recv_buf,
self._recv_state)
except exc.NoFrames:
# There's data in the buffer, but no complete frames
break
# Now call the client protocol's frame_received() method
self._client.frame_received(frame)
def eof_received(self):
"""
Called by the underlying transport when the other end signals
it won't send any more data.
:returns: A ``False`` value (including ``None``, the default
return value) to cause the transport to close
itself, and ``True`` to leave the connection
half-open.
"""
# Call eof_received() on the client protocol
return self._client.eof_received()
def close(self):
"""
Called by the client protocol to close the connection. If the
transport has a buffer for outgoing data, buffered data will
be flushed asynchronously. No more data will be received.
After all buffered data is flushed, the protocol's
``connection_lost()`` method will be called with ``None`` as
its argument.
"""
# Call close() on the transport
self._transport.close()
def get_extra_info(self, name, default=None):
"""
Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data.
"""
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default)
def pause_reading(self):
"""
Called by the client protocol to pause the receiving end of
the transport. No data will be passed to the protocol's
``frame_received()`` method until ``resume_reading()`` is
called.
"""
# Remember that reading is paused
self._recv_paused = True
# Call pause_reading() on the transport
self._transport.pause_reading()
def resume_reading(self):
"""
Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading.
"""
# Clear the read pause status
self._recv_paused = False
# Call resume_reading() on the transport
self._transport.resume_reading()
# If there's data in the receive buffer, pass it on to the
# client protocol
if self._recv_buf:
self.data_received(b'')
def abort(self):
"""
Called by the client protocol to close the transport
immediately, without waiting for pending operations to
complete. Buffered data will be lost. No more data will be
received. The protocol's ``connection_lost()`` method will
eventually be called with ``None`` as its argument.
"""
# Call abort() on the transport
self._transport.abort()
def can_write_eof(self):
"""
Called by the client protocol to determine if the transport
supports half-closed operations through the ``write_eof()``
method.
:returns: A ``True`` value if ``write_eof()`` is supported,
``False`` otherwise.
"""
# Call can_write_eof() on the transport
return self._transport.can_write_eof()
def get_write_buffer_size(self):
"""
Called by the client protocol to return the current size of
the output buffer used by the transport.
:returns: The current size of the output buffer used by the
transport.
"""
# Call get_write_buffer_size() on the transport
return self._transport.get_write_buffer_size()
def write_eof(self):
"""
Called by the client protocol to close the write end of the
transport after flushing buffered data. Data may still be
received. This method may raise ``NotImplementedError`` if
the transport (e.g., SSL) doesn't support half-closed
connections.
"""
# Call write_eof() on the transport
self._transport.write_eof()
def send_frame(self, frame):
"""
Called by the client protocol to send a frame to the remote
peer. This method does not block; it buffers the data and
arranges for it to be sent out asynchronously.
:param frame: The frame to send to the peer. Must be in the
format expected by the currently active send
framer.
"""
# Convert the frame to bytes and write them to the connection
data = self._send_framer.to_bytes(frame, self._send_state)
self._transport.write(data)
def push_framer(self, *args, **kwargs):
"""
Called by the client protocol to temporarily switch to a new
send framer, receive framer, or both. Can be called multiple
times. Each call to ``push_framer()`` must be paired with a
call to ``pop_framer()``, which restores to the previously set
framer.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Append the element to the framer stack
self._framers.append(elem)
def pop_framer(self):
"""
Called by the client protocol to revert to the set of framers
in use prior to the corresponding ``push_framer()`` call.
Raises an ``IndexError`` if the framer stack cannot be popped.
"""
# If the framer stack has only one element, raise an
# IndexError
if len(self._framers) <= 1:
raise IndexError('pop from empty stack')
# Pop an element off
self._framers.pop()
def set_framer(self, *args, **kwargs):
"""
Called by the client protocol to replace the current send
framer, receive framer, or both. This does not alter the
stack maintained by ``push_framer()`` and ``pop_framer()``; if
this method is called after ``push_framer()``, then
``pop_framer()`` is called, the framers in force at the time
``push_framer()`` was called will be restored.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Now, replace the current top of the framer stack
self._framers[-1] = elem
@property
def _send_framer(self):
"""
Retrieve the current send framer.
"""
return self._framers[-1].send
@property
def _send_state(self):
"""
Retrieve the current send framer state.
"""
return self._framers[-1].send_state
@property
def _recv_framer(self):
"""
Retrieve the current receive framer.
"""
return self._framers[-1].recv
@property
def _recv_state(self):
"""
Retrieve the current receive framer state.
"""
return self._framers[-1].recv_state
|
klmitch/framer | framer/transport.py | FramerAdaptor.send_frame | python | def send_frame(self, frame):
# Convert the frame to bytes and write them to the connection
data = self._send_framer.to_bytes(frame, self._send_state)
self._transport.write(data) | Called by the client protocol to send a frame to the remote
peer. This method does not block; it buffers the data and
arranges for it to be sent out asynchronously.
:param frame: The frame to send to the peer. Must be in the
format expected by the currently active send
framer. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L447-L460 | null | class FramerAdaptor(object):
"""
The Framer transport adaptor class. Instances of this
class--initialized with an appropriate ``FramedProtocol``
subclass, as well as send and receive framers--should be returned
by the factory passed to the ``create_connection()`` or
``create_server()`` loop methods.
"""
# Handlers for obtaining extra data from this transport using
# get_extra_info()
_handlers = {
'send_framer': lambda p: p._send_framer,
'send_state': lambda p: p._send_state,
'recv_framer': lambda p: p._recv_framer,
'recv_state': lambda p: p._recv_state,
'recv_buf': lambda p: six.binary_type(p._recv_buf),
'recv_paused': lambda p: p._recv_paused,
'client_protocol': lambda p: p._client,
'transport': lambda p: p._transport,
}
@classmethod
def factory(cls, client, *args, **kwargs):
"""
Generates and returns a callable suitable for passing as the
``protocol_factory`` parameter of the ``create_connection()``
or ``create_server()`` loop methods. This class method
performs some sanity checks on the arguments, and is preferred
over using a manually constructed ``lambda``.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# Some basic sanity checks
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# And a final, basic sanity check on the argument structure
if not args and ('send' not in kwargs or 'recv' not in kwargs):
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
return lambda: cls(client, *args, **kwargs)
def __init__(self, client, *args, **kwargs):
"""
Initialize a ``FramerProtocol`` instance.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# A basic sanity check
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Initialize the framer stack for _interpret_framer()
self._framers = [FramerElement(None, None, None, None)]
# Interpret the framer arguments
elem = self._interpret_framer(args, kwargs)
if not elem.send or not elem.recv:
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
# Set the framers
self._framers = [elem]
# Instantiate and save the client protocol, now that we have
# framers
self._client = client()
# Remember the underlying transport
self._transport = None
# And initialize the receive buffer and read paused state
self._recv_buf = bytearray()
self._recv_paused = False
def _interpret_framer(self, args, kwargs):
"""
Interprets positional and keyword arguments related to
framers.
:param args: A tuple of positional arguments. The first such
argument will be interpreted as a framer object,
and the second will be interpreted as a framer
state.
:param kwargs: A dictionary of keyword arguments. The
``send`` and ``recv`` keyword arguments are
interpreted as send and receive framers,
respectively, and the ``send_state`` and
``recv_state`` keyword arguments are
interpreted as states for those framers.
:returns: An instance of ``FramerElement``, which may be
pushed onto the framer stack.
"""
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# Start with the current send and receive framers
send = self._send_framer
recv = self._recv_framer
send_state = self._send_state
recv_state = self._recv_state
# Now, is it positional style?
if args:
send = args[0]
recv = args[0]
# Do we have a state?
if len(args) > 1:
send_state = args[1]
recv_state = args[1]
else:
# Allocate one
state = framers.FramerState()
# Initialize it
send.initialize_state(state)
send_state = state
recv_state = state
else:
# OK, it's keyword style; do we have a send framer?
if 'send' in kwargs:
send = kwargs['send']
# Do we have a send state?
if 'send_state' in kwargs:
send_state = kwargs['send_state']
else:
# Allocate one and initialize it
send_state = framers.FramerState()
send.initialize_state(send_state)
# How about a receive framer?
if 'recv' in kwargs:
recv = kwargs['recv']
# Do we have a recv state?
if 'recv_state' in kwargs:
recv_state = kwargs['recv_state']
else:
# Allocate one and initialize it
recv_state = framers.FramerState()
recv.initialize_state(recv_state)
# Create and return a FramerElement
return FramerElement(send, recv, send_state, recv_state)
def connection_made(self, transport):
"""
Called by the underlying transport when a connection is made.
:param transport: The transport representing the connection.
"""
# Save the underlying transport
self._transport = transport
# Call connection_made() on the client protocol, passing
# ourself as the transport
self._client.connection_made(self)
def connection_lost(self, exc):
"""
Called by the underlying transport when a connection is lost.
:param exc: Either an exception object or ``None``. If the
latter, indicates an EOF was received, or that the
connection was aborted or closed by this side of
the connection.
"""
# Call connection_lost() on the client protocol
self._client.connection_lost(exc)
def pause_writing(self):
"""
Called by the underlying transport when the buffer goes over
the high-water mark.
"""
# Call pause_writing() on the client protocol
self._client.pause_writing()
def resume_writing(self):
"""
Called by the underlying transport when the buffer drains
below the low-water mark.
"""
# Call resume_writing() on the client protocol
self._client.resume_writing()
def data_received(self, data):
"""
Called by the underlying transport when data is received.
:param data: The data received on the connection.
"""
# First, add the data to the receive buffer
self._recv_buf += data
# Now, pass all frames we can find to the client protocol
while self._recv_buf and not self._recv_paused:
try:
# Extract one frame
frame = self._recv_framer.to_frame(self._recv_buf,
self._recv_state)
except exc.NoFrames:
# There's data in the buffer, but no complete frames
break
# Now call the client protocol's frame_received() method
self._client.frame_received(frame)
def eof_received(self):
"""
Called by the underlying transport when the other end signals
it won't send any more data.
:returns: A ``False`` value (including ``None``, the default
return value) to cause the transport to close
itself, and ``True`` to leave the connection
half-open.
"""
# Call eof_received() on the client protocol
return self._client.eof_received()
def close(self):
"""
Called by the client protocol to close the connection. If the
transport has a buffer for outgoing data, buffered data will
be flushed asynchronously. No more data will be received.
After all buffered data is flushed, the protocol's
``connection_lost()`` method will be called with ``None`` as
its argument.
"""
# Call close() on the transport
self._transport.close()
def get_extra_info(self, name, default=None):
"""
Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data.
"""
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default)
def pause_reading(self):
"""
Called by the client protocol to pause the receiving end of
the transport. No data will be passed to the protocol's
``frame_received()`` method until ``resume_reading()`` is
called.
"""
# Remember that reading is paused
self._recv_paused = True
# Call pause_reading() on the transport
self._transport.pause_reading()
def resume_reading(self):
"""
Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading.
"""
# Clear the read pause status
self._recv_paused = False
# Call resume_reading() on the transport
self._transport.resume_reading()
# If there's data in the receive buffer, pass it on to the
# client protocol
if self._recv_buf:
self.data_received(b'')
def abort(self):
"""
Called by the client protocol to close the transport
immediately, without waiting for pending operations to
complete. Buffered data will be lost. No more data will be
received. The protocol's ``connection_lost()`` method will
eventually be called with ``None`` as its argument.
"""
# Call abort() on the transport
self._transport.abort()
def can_write_eof(self):
"""
Called by the client protocol to determine if the transport
supports half-closed operations through the ``write_eof()``
method.
:returns: A ``True`` value if ``write_eof()`` is supported,
``False`` otherwise.
"""
# Call can_write_eof() on the transport
return self._transport.can_write_eof()
def get_write_buffer_size(self):
"""
Called by the client protocol to return the current size of
the output buffer used by the transport.
:returns: The current size of the output buffer used by the
transport.
"""
# Call get_write_buffer_size() on the transport
return self._transport.get_write_buffer_size()
def set_write_buffer_limits(self, high=None, low=None):
"""
Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``.
"""
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low)
def write_eof(self):
"""
Called by the client protocol to close the write end of the
transport after flushing buffered data. Data may still be
received. This method may raise ``NotImplementedError`` if
the transport (e.g., SSL) doesn't support half-closed
connections.
"""
# Call write_eof() on the transport
self._transport.write_eof()
def push_framer(self, *args, **kwargs):
"""
Called by the client protocol to temporarily switch to a new
send framer, receive framer, or both. Can be called multiple
times. Each call to ``push_framer()`` must be paired with a
call to ``pop_framer()``, which restores to the previously set
framer.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Append the element to the framer stack
self._framers.append(elem)
def pop_framer(self):
"""
Called by the client protocol to revert to the set of framers
in use prior to the corresponding ``push_framer()`` call.
Raises an ``IndexError`` if the framer stack cannot be popped.
"""
# If the framer stack has only one element, raise an
# IndexError
if len(self._framers) <= 1:
raise IndexError('pop from empty stack')
# Pop an element off
self._framers.pop()
def set_framer(self, *args, **kwargs):
"""
Called by the client protocol to replace the current send
framer, receive framer, or both. This does not alter the
stack maintained by ``push_framer()`` and ``pop_framer()``; if
this method is called after ``push_framer()``, then
``pop_framer()`` is called, the framers in force at the time
``push_framer()`` was called will be restored.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Now, replace the current top of the framer stack
self._framers[-1] = elem
@property
def _send_framer(self):
"""
Retrieve the current send framer.
"""
return self._framers[-1].send
@property
def _send_state(self):
"""
Retrieve the current send framer state.
"""
return self._framers[-1].send_state
@property
def _recv_framer(self):
"""
Retrieve the current receive framer.
"""
return self._framers[-1].recv
@property
def _recv_state(self):
"""
Retrieve the current receive framer state.
"""
return self._framers[-1].recv_state
|
klmitch/framer | framer/transport.py | FramerAdaptor.push_framer | python | def push_framer(self, *args, **kwargs):
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Append the element to the framer stack
self._framers.append(elem) | Called by the client protocol to temporarily switch to a new
send framer, receive framer, or both. Can be called multiple
times. Each call to ``push_framer()`` must be paired with a
call to ``pop_framer()``, which restores to the previously set
framer.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L462-L492 | [
"def _interpret_framer(self, args, kwargs):\n \"\"\"\n Interprets positional and keyword arguments related to\n framers.\n\n :param args: A tuple of positional arguments. The first such\n argument will be interpreted as a framer object,\n and the second will be interpreted as a framer\n state.\n :param kwargs: A dictionary of keyword arguments. The\n ``send`` and ``recv`` keyword arguments are\n interpreted as send and receive framers,\n respectively, and the ``send_state`` and\n ``recv_state`` keyword arguments are\n interpreted as states for those framers.\n\n :returns: An instance of ``FramerElement``, which may be\n pushed onto the framer stack.\n \"\"\"\n\n # Cannot specify both positional and keyword arguments, but\n # must provide one or the other\n if not args and not kwargs:\n raise exc.InvalidFramerSpecification(\n \"No framers specified\")\n elif args and kwargs:\n raise exc.InvalidFramerSpecification(\n \"Cannot mix positional and keyword framer specifications\")\n\n # Start with the current send and receive framers\n send = self._send_framer\n recv = self._recv_framer\n send_state = self._send_state\n recv_state = self._recv_state\n\n # Now, is it positional style?\n if args:\n send = args[0]\n recv = args[0]\n\n # Do we have a state?\n if len(args) > 1:\n send_state = args[1]\n recv_state = args[1]\n else:\n # Allocate one\n state = framers.FramerState()\n\n # Initialize it\n send.initialize_state(state)\n\n send_state = state\n recv_state = state\n else:\n # OK, it's keyword style; do we have a send framer?\n if 'send' in kwargs:\n send = kwargs['send']\n\n # Do we have a send state?\n if 'send_state' in kwargs:\n send_state = kwargs['send_state']\n else:\n # Allocate one and initialize it\n send_state = framers.FramerState()\n send.initialize_state(send_state)\n\n # How about a receive framer?\n if 'recv' in kwargs:\n recv = kwargs['recv']\n\n # Do we have a recv state?\n if 'recv_state' in kwargs:\n recv_state = kwargs['recv_state']\n else:\n # Allocate one and initialize it\n recv_state = framers.FramerState()\n recv.initialize_state(recv_state)\n\n # Create and return a FramerElement\n return FramerElement(send, recv, send_state, recv_state)\n"
] | class FramerAdaptor(object):
"""
The Framer transport adaptor class. Instances of this
class--initialized with an appropriate ``FramedProtocol``
subclass, as well as send and receive framers--should be returned
by the factory passed to the ``create_connection()`` or
``create_server()`` loop methods.
"""
# Handlers for obtaining extra data from this transport using
# get_extra_info()
_handlers = {
'send_framer': lambda p: p._send_framer,
'send_state': lambda p: p._send_state,
'recv_framer': lambda p: p._recv_framer,
'recv_state': lambda p: p._recv_state,
'recv_buf': lambda p: six.binary_type(p._recv_buf),
'recv_paused': lambda p: p._recv_paused,
'client_protocol': lambda p: p._client,
'transport': lambda p: p._transport,
}
@classmethod
def factory(cls, client, *args, **kwargs):
"""
Generates and returns a callable suitable for passing as the
``protocol_factory`` parameter of the ``create_connection()``
or ``create_server()`` loop methods. This class method
performs some sanity checks on the arguments, and is preferred
over using a manually constructed ``lambda``.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# Some basic sanity checks
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# And a final, basic sanity check on the argument structure
if not args and ('send' not in kwargs or 'recv' not in kwargs):
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
return lambda: cls(client, *args, **kwargs)
def __init__(self, client, *args, **kwargs):
"""
Initialize a ``FramerProtocol`` instance.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# A basic sanity check
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Initialize the framer stack for _interpret_framer()
self._framers = [FramerElement(None, None, None, None)]
# Interpret the framer arguments
elem = self._interpret_framer(args, kwargs)
if not elem.send or not elem.recv:
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
# Set the framers
self._framers = [elem]
# Instantiate and save the client protocol, now that we have
# framers
self._client = client()
# Remember the underlying transport
self._transport = None
# And initialize the receive buffer and read paused state
self._recv_buf = bytearray()
self._recv_paused = False
def _interpret_framer(self, args, kwargs):
"""
Interprets positional and keyword arguments related to
framers.
:param args: A tuple of positional arguments. The first such
argument will be interpreted as a framer object,
and the second will be interpreted as a framer
state.
:param kwargs: A dictionary of keyword arguments. The
``send`` and ``recv`` keyword arguments are
interpreted as send and receive framers,
respectively, and the ``send_state`` and
``recv_state`` keyword arguments are
interpreted as states for those framers.
:returns: An instance of ``FramerElement``, which may be
pushed onto the framer stack.
"""
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# Start with the current send and receive framers
send = self._send_framer
recv = self._recv_framer
send_state = self._send_state
recv_state = self._recv_state
# Now, is it positional style?
if args:
send = args[0]
recv = args[0]
# Do we have a state?
if len(args) > 1:
send_state = args[1]
recv_state = args[1]
else:
# Allocate one
state = framers.FramerState()
# Initialize it
send.initialize_state(state)
send_state = state
recv_state = state
else:
# OK, it's keyword style; do we have a send framer?
if 'send' in kwargs:
send = kwargs['send']
# Do we have a send state?
if 'send_state' in kwargs:
send_state = kwargs['send_state']
else:
# Allocate one and initialize it
send_state = framers.FramerState()
send.initialize_state(send_state)
# How about a receive framer?
if 'recv' in kwargs:
recv = kwargs['recv']
# Do we have a recv state?
if 'recv_state' in kwargs:
recv_state = kwargs['recv_state']
else:
# Allocate one and initialize it
recv_state = framers.FramerState()
recv.initialize_state(recv_state)
# Create and return a FramerElement
return FramerElement(send, recv, send_state, recv_state)
def connection_made(self, transport):
"""
Called by the underlying transport when a connection is made.
:param transport: The transport representing the connection.
"""
# Save the underlying transport
self._transport = transport
# Call connection_made() on the client protocol, passing
# ourself as the transport
self._client.connection_made(self)
def connection_lost(self, exc):
"""
Called by the underlying transport when a connection is lost.
:param exc: Either an exception object or ``None``. If the
latter, indicates an EOF was received, or that the
connection was aborted or closed by this side of
the connection.
"""
# Call connection_lost() on the client protocol
self._client.connection_lost(exc)
def pause_writing(self):
"""
Called by the underlying transport when the buffer goes over
the high-water mark.
"""
# Call pause_writing() on the client protocol
self._client.pause_writing()
def resume_writing(self):
"""
Called by the underlying transport when the buffer drains
below the low-water mark.
"""
# Call resume_writing() on the client protocol
self._client.resume_writing()
def data_received(self, data):
"""
Called by the underlying transport when data is received.
:param data: The data received on the connection.
"""
# First, add the data to the receive buffer
self._recv_buf += data
# Now, pass all frames we can find to the client protocol
while self._recv_buf and not self._recv_paused:
try:
# Extract one frame
frame = self._recv_framer.to_frame(self._recv_buf,
self._recv_state)
except exc.NoFrames:
# There's data in the buffer, but no complete frames
break
# Now call the client protocol's frame_received() method
self._client.frame_received(frame)
def eof_received(self):
"""
Called by the underlying transport when the other end signals
it won't send any more data.
:returns: A ``False`` value (including ``None``, the default
return value) to cause the transport to close
itself, and ``True`` to leave the connection
half-open.
"""
# Call eof_received() on the client protocol
return self._client.eof_received()
def close(self):
"""
Called by the client protocol to close the connection. If the
transport has a buffer for outgoing data, buffered data will
be flushed asynchronously. No more data will be received.
After all buffered data is flushed, the protocol's
``connection_lost()`` method will be called with ``None`` as
its argument.
"""
# Call close() on the transport
self._transport.close()
def get_extra_info(self, name, default=None):
"""
Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data.
"""
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default)
def pause_reading(self):
"""
Called by the client protocol to pause the receiving end of
the transport. No data will be passed to the protocol's
``frame_received()`` method until ``resume_reading()`` is
called.
"""
# Remember that reading is paused
self._recv_paused = True
# Call pause_reading() on the transport
self._transport.pause_reading()
def resume_reading(self):
"""
Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading.
"""
# Clear the read pause status
self._recv_paused = False
# Call resume_reading() on the transport
self._transport.resume_reading()
# If there's data in the receive buffer, pass it on to the
# client protocol
if self._recv_buf:
self.data_received(b'')
def abort(self):
"""
Called by the client protocol to close the transport
immediately, without waiting for pending operations to
complete. Buffered data will be lost. No more data will be
received. The protocol's ``connection_lost()`` method will
eventually be called with ``None`` as its argument.
"""
# Call abort() on the transport
self._transport.abort()
def can_write_eof(self):
"""
Called by the client protocol to determine if the transport
supports half-closed operations through the ``write_eof()``
method.
:returns: A ``True`` value if ``write_eof()`` is supported,
``False`` otherwise.
"""
# Call can_write_eof() on the transport
return self._transport.can_write_eof()
def get_write_buffer_size(self):
"""
Called by the client protocol to return the current size of
the output buffer used by the transport.
:returns: The current size of the output buffer used by the
transport.
"""
# Call get_write_buffer_size() on the transport
return self._transport.get_write_buffer_size()
def set_write_buffer_limits(self, high=None, low=None):
"""
Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``.
"""
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low)
def write_eof(self):
"""
Called by the client protocol to close the write end of the
transport after flushing buffered data. Data may still be
received. This method may raise ``NotImplementedError`` if
the transport (e.g., SSL) doesn't support half-closed
connections.
"""
# Call write_eof() on the transport
self._transport.write_eof()
def send_frame(self, frame):
"""
Called by the client protocol to send a frame to the remote
peer. This method does not block; it buffers the data and
arranges for it to be sent out asynchronously.
:param frame: The frame to send to the peer. Must be in the
format expected by the currently active send
framer.
"""
# Convert the frame to bytes and write them to the connection
data = self._send_framer.to_bytes(frame, self._send_state)
self._transport.write(data)
def pop_framer(self):
"""
Called by the client protocol to revert to the set of framers
in use prior to the corresponding ``push_framer()`` call.
Raises an ``IndexError`` if the framer stack cannot be popped.
"""
# If the framer stack has only one element, raise an
# IndexError
if len(self._framers) <= 1:
raise IndexError('pop from empty stack')
# Pop an element off
self._framers.pop()
def set_framer(self, *args, **kwargs):
"""
Called by the client protocol to replace the current send
framer, receive framer, or both. This does not alter the
stack maintained by ``push_framer()`` and ``pop_framer()``; if
this method is called after ``push_framer()``, then
``pop_framer()`` is called, the framers in force at the time
``push_framer()`` was called will be restored.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Now, replace the current top of the framer stack
self._framers[-1] = elem
@property
def _send_framer(self):
"""
Retrieve the current send framer.
"""
return self._framers[-1].send
@property
def _send_state(self):
"""
Retrieve the current send framer state.
"""
return self._framers[-1].send_state
@property
def _recv_framer(self):
"""
Retrieve the current receive framer.
"""
return self._framers[-1].recv
@property
def _recv_state(self):
"""
Retrieve the current receive framer state.
"""
return self._framers[-1].recv_state
|
klmitch/framer | framer/transport.py | FramerAdaptor.set_framer | python | def set_framer(self, *args, **kwargs):
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Now, replace the current top of the framer stack
self._framers[-1] = elem | Called by the client protocol to replace the current send
framer, receive framer, or both. This does not alter the
stack maintained by ``push_framer()`` and ``pop_framer()``; if
this method is called after ``push_framer()``, then
``pop_framer()`` is called, the framers in force at the time
``push_framer()`` was called will be restored.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L509-L540 | [
"def _interpret_framer(self, args, kwargs):\n \"\"\"\n Interprets positional and keyword arguments related to\n framers.\n\n :param args: A tuple of positional arguments. The first such\n argument will be interpreted as a framer object,\n and the second will be interpreted as a framer\n state.\n :param kwargs: A dictionary of keyword arguments. The\n ``send`` and ``recv`` keyword arguments are\n interpreted as send and receive framers,\n respectively, and the ``send_state`` and\n ``recv_state`` keyword arguments are\n interpreted as states for those framers.\n\n :returns: An instance of ``FramerElement``, which may be\n pushed onto the framer stack.\n \"\"\"\n\n # Cannot specify both positional and keyword arguments, but\n # must provide one or the other\n if not args and not kwargs:\n raise exc.InvalidFramerSpecification(\n \"No framers specified\")\n elif args and kwargs:\n raise exc.InvalidFramerSpecification(\n \"Cannot mix positional and keyword framer specifications\")\n\n # Start with the current send and receive framers\n send = self._send_framer\n recv = self._recv_framer\n send_state = self._send_state\n recv_state = self._recv_state\n\n # Now, is it positional style?\n if args:\n send = args[0]\n recv = args[0]\n\n # Do we have a state?\n if len(args) > 1:\n send_state = args[1]\n recv_state = args[1]\n else:\n # Allocate one\n state = framers.FramerState()\n\n # Initialize it\n send.initialize_state(state)\n\n send_state = state\n recv_state = state\n else:\n # OK, it's keyword style; do we have a send framer?\n if 'send' in kwargs:\n send = kwargs['send']\n\n # Do we have a send state?\n if 'send_state' in kwargs:\n send_state = kwargs['send_state']\n else:\n # Allocate one and initialize it\n send_state = framers.FramerState()\n send.initialize_state(send_state)\n\n # How about a receive framer?\n if 'recv' in kwargs:\n recv = kwargs['recv']\n\n # Do we have a recv state?\n if 'recv_state' in kwargs:\n recv_state = kwargs['recv_state']\n else:\n # Allocate one and initialize it\n recv_state = framers.FramerState()\n recv.initialize_state(recv_state)\n\n # Create and return a FramerElement\n return FramerElement(send, recv, send_state, recv_state)\n"
] | class FramerAdaptor(object):
"""
The Framer transport adaptor class. Instances of this
class--initialized with an appropriate ``FramedProtocol``
subclass, as well as send and receive framers--should be returned
by the factory passed to the ``create_connection()`` or
``create_server()`` loop methods.
"""
# Handlers for obtaining extra data from this transport using
# get_extra_info()
_handlers = {
'send_framer': lambda p: p._send_framer,
'send_state': lambda p: p._send_state,
'recv_framer': lambda p: p._recv_framer,
'recv_state': lambda p: p._recv_state,
'recv_buf': lambda p: six.binary_type(p._recv_buf),
'recv_paused': lambda p: p._recv_paused,
'client_protocol': lambda p: p._client,
'transport': lambda p: p._transport,
}
@classmethod
def factory(cls, client, *args, **kwargs):
"""
Generates and returns a callable suitable for passing as the
``protocol_factory`` parameter of the ``create_connection()``
or ``create_server()`` loop methods. This class method
performs some sanity checks on the arguments, and is preferred
over using a manually constructed ``lambda``.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# Some basic sanity checks
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# And a final, basic sanity check on the argument structure
if not args and ('send' not in kwargs or 'recv' not in kwargs):
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
return lambda: cls(client, *args, **kwargs)
def __init__(self, client, *args, **kwargs):
"""
Initialize a ``FramerProtocol`` instance.
The first argument must be a protocol factory for a
``FramedProtocol`` object. Subsequent positional and keyword
arguments are interpreted as for the ``set_framer()`` method,
which the proviso that both send and receive framers must be
set after argument interpretation.
:returns: A callable that returns an instance of
``FramerProtocol``.
"""
# A basic sanity check
if not six.callable(client):
raise exc.FramerException("Protocol factory is not a factory")
# Initialize the framer stack for _interpret_framer()
self._framers = [FramerElement(None, None, None, None)]
# Interpret the framer arguments
elem = self._interpret_framer(args, kwargs)
if not elem.send or not elem.recv:
raise exc.InvalidFramerSpecification(
"Both send and receive framers must be specified")
# Set the framers
self._framers = [elem]
# Instantiate and save the client protocol, now that we have
# framers
self._client = client()
# Remember the underlying transport
self._transport = None
# And initialize the receive buffer and read paused state
self._recv_buf = bytearray()
self._recv_paused = False
def _interpret_framer(self, args, kwargs):
"""
Interprets positional and keyword arguments related to
framers.
:param args: A tuple of positional arguments. The first such
argument will be interpreted as a framer object,
and the second will be interpreted as a framer
state.
:param kwargs: A dictionary of keyword arguments. The
``send`` and ``recv`` keyword arguments are
interpreted as send and receive framers,
respectively, and the ``send_state`` and
``recv_state`` keyword arguments are
interpreted as states for those framers.
:returns: An instance of ``FramerElement``, which may be
pushed onto the framer stack.
"""
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# Start with the current send and receive framers
send = self._send_framer
recv = self._recv_framer
send_state = self._send_state
recv_state = self._recv_state
# Now, is it positional style?
if args:
send = args[0]
recv = args[0]
# Do we have a state?
if len(args) > 1:
send_state = args[1]
recv_state = args[1]
else:
# Allocate one
state = framers.FramerState()
# Initialize it
send.initialize_state(state)
send_state = state
recv_state = state
else:
# OK, it's keyword style; do we have a send framer?
if 'send' in kwargs:
send = kwargs['send']
# Do we have a send state?
if 'send_state' in kwargs:
send_state = kwargs['send_state']
else:
# Allocate one and initialize it
send_state = framers.FramerState()
send.initialize_state(send_state)
# How about a receive framer?
if 'recv' in kwargs:
recv = kwargs['recv']
# Do we have a recv state?
if 'recv_state' in kwargs:
recv_state = kwargs['recv_state']
else:
# Allocate one and initialize it
recv_state = framers.FramerState()
recv.initialize_state(recv_state)
# Create and return a FramerElement
return FramerElement(send, recv, send_state, recv_state)
def connection_made(self, transport):
"""
Called by the underlying transport when a connection is made.
:param transport: The transport representing the connection.
"""
# Save the underlying transport
self._transport = transport
# Call connection_made() on the client protocol, passing
# ourself as the transport
self._client.connection_made(self)
def connection_lost(self, exc):
"""
Called by the underlying transport when a connection is lost.
:param exc: Either an exception object or ``None``. If the
latter, indicates an EOF was received, or that the
connection was aborted or closed by this side of
the connection.
"""
# Call connection_lost() on the client protocol
self._client.connection_lost(exc)
def pause_writing(self):
"""
Called by the underlying transport when the buffer goes over
the high-water mark.
"""
# Call pause_writing() on the client protocol
self._client.pause_writing()
def resume_writing(self):
"""
Called by the underlying transport when the buffer drains
below the low-water mark.
"""
# Call resume_writing() on the client protocol
self._client.resume_writing()
def data_received(self, data):
"""
Called by the underlying transport when data is received.
:param data: The data received on the connection.
"""
# First, add the data to the receive buffer
self._recv_buf += data
# Now, pass all frames we can find to the client protocol
while self._recv_buf and not self._recv_paused:
try:
# Extract one frame
frame = self._recv_framer.to_frame(self._recv_buf,
self._recv_state)
except exc.NoFrames:
# There's data in the buffer, but no complete frames
break
# Now call the client protocol's frame_received() method
self._client.frame_received(frame)
def eof_received(self):
"""
Called by the underlying transport when the other end signals
it won't send any more data.
:returns: A ``False`` value (including ``None``, the default
return value) to cause the transport to close
itself, and ``True`` to leave the connection
half-open.
"""
# Call eof_received() on the client protocol
return self._client.eof_received()
def close(self):
"""
Called by the client protocol to close the connection. If the
transport has a buffer for outgoing data, buffered data will
be flushed asynchronously. No more data will be received.
After all buffered data is flushed, the protocol's
``connection_lost()`` method will be called with ``None`` as
its argument.
"""
# Call close() on the transport
self._transport.close()
def get_extra_info(self, name, default=None):
"""
Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data.
"""
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default)
def pause_reading(self):
"""
Called by the client protocol to pause the receiving end of
the transport. No data will be passed to the protocol's
``frame_received()`` method until ``resume_reading()`` is
called.
"""
# Remember that reading is paused
self._recv_paused = True
# Call pause_reading() on the transport
self._transport.pause_reading()
def resume_reading(self):
"""
Called by the client protocol to resume the receiving end.
The protocol's ``frame_received()`` method will be called once
again if some data is available for reading.
"""
# Clear the read pause status
self._recv_paused = False
# Call resume_reading() on the transport
self._transport.resume_reading()
# If there's data in the receive buffer, pass it on to the
# client protocol
if self._recv_buf:
self.data_received(b'')
def abort(self):
"""
Called by the client protocol to close the transport
immediately, without waiting for pending operations to
complete. Buffered data will be lost. No more data will be
received. The protocol's ``connection_lost()`` method will
eventually be called with ``None`` as its argument.
"""
# Call abort() on the transport
self._transport.abort()
def can_write_eof(self):
"""
Called by the client protocol to determine if the transport
supports half-closed operations through the ``write_eof()``
method.
:returns: A ``True`` value if ``write_eof()`` is supported,
``False`` otherwise.
"""
# Call can_write_eof() on the transport
return self._transport.can_write_eof()
def get_write_buffer_size(self):
"""
Called by the client protocol to return the current size of
the output buffer used by the transport.
:returns: The current size of the output buffer used by the
transport.
"""
# Call get_write_buffer_size() on the transport
return self._transport.get_write_buffer_size()
def set_write_buffer_limits(self, high=None, low=None):
"""
Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``.
"""
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low)
def write_eof(self):
"""
Called by the client protocol to close the write end of the
transport after flushing buffered data. Data may still be
received. This method may raise ``NotImplementedError`` if
the transport (e.g., SSL) doesn't support half-closed
connections.
"""
# Call write_eof() on the transport
self._transport.write_eof()
def send_frame(self, frame):
"""
Called by the client protocol to send a frame to the remote
peer. This method does not block; it buffers the data and
arranges for it to be sent out asynchronously.
:param frame: The frame to send to the peer. Must be in the
format expected by the currently active send
framer.
"""
# Convert the frame to bytes and write them to the connection
data = self._send_framer.to_bytes(frame, self._send_state)
self._transport.write(data)
def push_framer(self, *args, **kwargs):
"""
Called by the client protocol to temporarily switch to a new
send framer, receive framer, or both. Can be called multiple
times. Each call to ``push_framer()`` must be paired with a
call to ``pop_framer()``, which restores to the previously set
framer.
When called with positional arguments, the first argument
specifies a framer object to replace both send and receive
framers. A second argument may be used to specify a state
object for the framers; if none is specified, a new one will
be allocated and initialized by calling the appropriate framer
initialization method.
When called with keyword arguments, the ``send`` and ``recv``
arguments specify the send and receive framer object,
respectively. If either is not provided, the existing framer
for that direction will be maintained. The ``send_state`` and
``recv_state`` arguments specify optional state objects for
the respective framers, and will be allocated and initialized
by calling the appropriate framer initialization method, if
not provided. If a state argument is given without a
corresponding replacement framer, it will be ignored.
"""
# First, interpret the arguments
elem = self._interpret_framer(args, kwargs)
# Append the element to the framer stack
self._framers.append(elem)
def pop_framer(self):
"""
Called by the client protocol to revert to the set of framers
in use prior to the corresponding ``push_framer()`` call.
Raises an ``IndexError`` if the framer stack cannot be popped.
"""
# If the framer stack has only one element, raise an
# IndexError
if len(self._framers) <= 1:
raise IndexError('pop from empty stack')
# Pop an element off
self._framers.pop()
@property
def _send_framer(self):
"""
Retrieve the current send framer.
"""
return self._framers[-1].send
@property
def _send_state(self):
"""
Retrieve the current send framer state.
"""
return self._framers[-1].send_state
@property
def _recv_framer(self):
"""
Retrieve the current receive framer.
"""
return self._framers[-1].recv
@property
def _recv_state(self):
"""
Retrieve the current receive framer state.
"""
return self._framers[-1].recv_state
|
klmitch/framer | framer/framers.py | IdentityFramer.to_frame | python | def to_frame(self, data, state):
# Convert the data to bytes
frame = six.binary_type(data)
# Clear the buffer
del data[:]
# Return the frame
return frame | Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L225-L249 | null | class IdentityFramer(Framer):
"""
The identity framer passes received data straight through. It is
the simplest example of a framer.
For this framer, frames are ``bytes``.
"""
def to_bytes(self, frame, state):
"""
Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream.
"""
# Ensure the frame is in bytes
return six.binary_type(frame)
|
klmitch/framer | framer/framers.py | ChunkFramer.to_frame | python | def to_frame(self, data, state):
# If we've read all the data, let the caller know
if state.chunk_remaining <= 0:
raise exc.NoFrames()
# OK, how much data do we send on?
data_len = min(state.chunk_remaining, len(data))
# Extract that data from the buffer
frame = six.binary_type(data[:data_len])
del data[:data_len]
# Update the state
state.chunk_remaining -= data_len
# Return the frame
return frame | Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L302-L334 | null | class ChunkFramer(IdentityFramer):
"""
The chunk framer passes a limited amount of data straight through.
It is intended to be used for short-term streaming: initialize it
with the amount of data to be received, push it onto the framer
stack, then pop the stack when all of the data has been received.
For this framer, frames are ``bytes``.
"""
def __init__(self, chunk_len):
"""
Initialize a ``ChunkFramer`` object.
:param chunk_len: The amount of data to pass through.
"""
super(ChunkFramer, self).__init__()
self.chunk_len = chunk_len
def initialize_state(self, state):
"""
Initialize a ``FramerState`` object. This state will be
passed in to the ``to_frame()`` and ``to_bytes()`` methods,
and may be used for processing partial frames or cross-frame
information. The default implementation does nothing.
:param state: The state to initialize.
"""
state.chunk_remaining = self.chunk_len
|
klmitch/framer | framer/framers.py | LineFramer.to_frame | python | def to_frame(self, data, state):
# Find the next newline
data_len = data.find(b'\n')
if data_len < 0:
# No line to extract
raise exc.NoFrames()
# Track how much to exclude
frame_len = data_len + 1
# Are we to exclude carriage returns?
if (self.carriage_return and data_len and
data[data_len - 1] == ord(b'\r')):
data_len -= 1
# Extract the frame
frame = six.binary_type(data[:data_len])
del data[:frame_len]
# Return the frame
return frame | Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L363-L400 | null | class LineFramer(Framer):
"""
The line framer extracts lines as frames. Lines are delimited by
newlines or carriage return/newline pairs. The line endings are
stripped off.
For this framer, frames are ``bytes``.
"""
def __init__(self, carriage_return=True):
"""
Initialize a ``LineFramer`` object.
:param carriage_return: If ``True`` (the default), accept
carriage return/newline pairs as line
separators. Also causes carriage
returns to be emitted. If ``False``,
carriage returns are not stripped from
input and not emitted on output.
"""
super(LineFramer, self).__init__()
self.carriage_return = carriage_return
self.line_end = b'\r\n' if carriage_return else b'\n'
def to_bytes(self, frame, state):
"""
Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream.
"""
# Ensure the frame is in bytes and append the delimiter
return six.binary_type(frame) + self.line_end
|
klmitch/framer | framer/framers.py | LengthEncodedFramer.to_frame | python | def to_frame(self, data, state):
# First, determine the length we're looking for
length = state.length
if length is None:
# Try decoding a length from the data buffer
length = self.decode_length(data, state)
# Now, is there enough data?
if len(data) < length:
state.length = length
raise exc.NoFrames()
# Extract the frame
frame = six.binary_type(data[:length])
del data[:length]
# Update the state
state.length = None
# Return the frame
return frame | Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L445-L481 | [
"def decode_length(self, data, state):\n \"\"\"\n Extract and decode a frame length from the data buffer. The\n consumed data should be removed from the buffer. If the\n length data is incomplete, must raise a ``NoFrames``\n exception.\n\n :param data: A ``bytearray`` instance containing the data so\n far read.\n :param state: An instance of ``FramerState``. If the buffer\n contains a partial encoded length, this object\n can be used to store state information to allow\n the remainder of the length to be read.\n\n :returns: The frame length, as an integer.\n \"\"\"\n\n pass # pragma: no cover\n",
"def decode_length(self, data, state):\n self._calls.append(('decode_length', data, state))\n if len(data) < 4:\n raise exc.NoFrames()\n length = int(data[:4])\n del data[:4]\n return length\n"
] | class LengthEncodedFramer(Framer):
"""
Many protocols encode their frames by prefixing the frame with the
encoded frame length. This abstract framer is the base class for
such framers. Most such framers can be implemented using
``StructFramer``, but other framers with unusual length encodings
can be implemented by extending this framer and implementing the
``encode_length()`` and ``decode_length()`` methods.
For this framer, frames are ``bytes``.
"""
def initialize_state(self, state):
"""
Initialize a ``FramerState`` object. This state will be
passed in to the ``to_frame()`` and ``to_bytes()`` methods,
and may be used for processing partial frames or cross-frame
information. The default implementation does nothing.
:param state: The state to initialize.
"""
# Signal that a length must be decoded
state.length = None
def to_bytes(self, frame, state):
"""
Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream.
"""
# Generate the bytes from the frame
frame = six.binary_type(frame)
return self.encode_length(frame, state) + frame
@abc.abstractmethod
def encode_length(self, frame, state):
"""
Encode the length of the specified frame into a sequence of
bytes. The frame will be appended to the byte sequence for
transmission.
:param frame: The frame to encode the length of. Should be a
``bytes`` object.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: The frame length, encoded into a sequence of bytes.
"""
pass # pragma: no cover
@abc.abstractmethod
def decode_length(self, data, state):
"""
Extract and decode a frame length from the data buffer. The
consumed data should be removed from the buffer. If the
length data is incomplete, must raise a ``NoFrames``
exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial encoded length, this object
can be used to store state information to allow
the remainder of the length to be read.
:returns: The frame length, as an integer.
"""
pass # pragma: no cover
|
klmitch/framer | framer/framers.py | LengthEncodedFramer.to_bytes | python | def to_bytes(self, frame, state):
# Generate the bytes from the frame
frame = six.binary_type(frame)
return self.encode_length(frame, state) + frame | Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L483-L499 | [
"def encode_length(self, frame, state):\n \"\"\"\n Encode the length of the specified frame into a sequence of\n bytes. The frame will be appended to the byte sequence for\n transmission.\n\n :param frame: The frame to encode the length of. Should be a\n ``bytes`` object.\n :param state: An instance of ``FramerState``. This object may\n be used to track information across calls to the\n method.\n\n :returns: The frame length, encoded into a sequence of bytes.\n \"\"\"\n\n pass # pragma: no cover\n",
"def encode_length(self, frame, state):\n self._calls.append(('encode_length', frame, state))\n return ('%04d' % len(frame)).encode('utf-8')\n"
] | class LengthEncodedFramer(Framer):
"""
Many protocols encode their frames by prefixing the frame with the
encoded frame length. This abstract framer is the base class for
such framers. Most such framers can be implemented using
``StructFramer``, but other framers with unusual length encodings
can be implemented by extending this framer and implementing the
``encode_length()`` and ``decode_length()`` methods.
For this framer, frames are ``bytes``.
"""
def initialize_state(self, state):
"""
Initialize a ``FramerState`` object. This state will be
passed in to the ``to_frame()`` and ``to_bytes()`` methods,
and may be used for processing partial frames or cross-frame
information. The default implementation does nothing.
:param state: The state to initialize.
"""
# Signal that a length must be decoded
state.length = None
def to_frame(self, data, state):
"""
Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes.
"""
# First, determine the length we're looking for
length = state.length
if length is None:
# Try decoding a length from the data buffer
length = self.decode_length(data, state)
# Now, is there enough data?
if len(data) < length:
state.length = length
raise exc.NoFrames()
# Extract the frame
frame = six.binary_type(data[:length])
del data[:length]
# Update the state
state.length = None
# Return the frame
return frame
@abc.abstractmethod
def encode_length(self, frame, state):
"""
Encode the length of the specified frame into a sequence of
bytes. The frame will be appended to the byte sequence for
transmission.
:param frame: The frame to encode the length of. Should be a
``bytes`` object.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: The frame length, encoded into a sequence of bytes.
"""
pass # pragma: no cover
@abc.abstractmethod
def decode_length(self, data, state):
"""
Extract and decode a frame length from the data buffer. The
consumed data should be removed from the buffer. If the
length data is incomplete, must raise a ``NoFrames``
exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial encoded length, this object
can be used to store state information to allow
the remainder of the length to be read.
:returns: The frame length, as an integer.
"""
pass # pragma: no cover
|
klmitch/framer | framer/framers.py | StructFramer.decode_length | python | def decode_length(self, data, state):
# Do we have enough data yet?
if len(data) < self.fmt.size:
raise exc.NoFrames()
# Extract the length
length = self.fmt.unpack(six.binary_type(data[:self.fmt.size]))[0]
del data[:self.fmt.size]
# Return the length
return length | Extract and decode a frame length from the data buffer. The
consumed data should be removed from the buffer. If the
length data is incomplete, must raise a ``NoFrames``
exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial encoded length, this object
can be used to store state information to allow
the remainder of the length to be read.
:returns: The frame length, as an integer. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L600-L626 | null | class StructFramer(LengthEncodedFramer):
"""
A subclass of ``LengthEncodedFramer`` which encodes the frame
length using a format string acceptable to the standard Python
``struct.Struct`` class.
For this framer, frames are ``bytes``.
"""
def __init__(self, fmt):
"""
Initialize a ``StructFramer`` object.
:param fmt: The ``struct``-compliant format string for the
integer length of the frame.
"""
# Sanity-check the fmt
fmt_chr = None
for c in fmt:
if c in '@=<>!x':
# Modifiers and pads we can ignore
continue
if c in 'bBhHiIlLqQ':
if fmt_chr:
raise ValueError("too many specifiers in format")
fmt_chr = c
continue
# Invalid specifier for a length
raise ValueError("unrecognized specifier in format")
if not fmt_chr:
# Must have *some* conversion!
raise ValueError("no recognized specifier in format")
super(StructFramer, self).__init__()
# Save the format
self.fmt = struct.Struct(fmt)
def encode_length(self, frame, state):
"""
Encode the length of the specified frame into a sequence of
bytes. The frame will be appended to the byte sequence for
transmission.
:param frame: The frame to encode the length of. Should be a
``bytes`` object.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: The frame length, encoded into a sequence of bytes.
"""
# Pack the frame length
return self.fmt.pack(len(frame))
|
klmitch/framer | framer/framers.py | StuffingFramer.to_frame | python | def to_frame(self, data, state):
# Find the next packet start
if not state.frame_start:
# Find the begin sequence
idx = data.find(self.begin)
if idx < 0:
# Couldn't find one
raise exc.NoFrames()
# Excise the begin sequence
del data[:idx + len(self.begin)]
# Now see if we can find the end sequence
idx = data.find(self.end)
if idx < 0:
# We've found the start, but don't have the end yet
state.frame_start = True
raise exc.NoFrames()
# Extract the frame
frame = six.binary_type(data[:idx])
del data[:idx + len(self.end)]
# Update the state
state.frame_start = False
# Unstuff the frame and return it
return self.prefix.join(frame.split(self.nop)) | Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L700-L743 | null | class StuffingFramer(Framer):
"""
Some protocols encode their frames using synchronization bytes--a
sequence of bytes that signal the beginning and end of a frame,
allowing for synchronization if a decoding error is encountered.
However, the synchronization sequence could occur within the
frame, so these protocols *stuff* a throw-away byte into such
sequences to prevent them from accidentally ending a frame or
starting another frame--and thus the name "byte stuffing."
For this framer, frames are ``bytes``.
"""
def __init__(self, begin=b'\xff\xff\xff\xff\xff',
end=b'\xff\xff\xff\xff\xfe', nop=b'\xff\xff\xff\xff\x00'):
"""
Initialize a ``StuffingFramer`` object.
:param begin: A sequence of bytes which, when encountered,
indicates the beginning of a frame. Must be the
same length as ``end`` and ``nop``, and all
arguments must have a common prefix.
:param end: A sequence of bytes which, when encountered,
indicates the end of a frame. Must be the same
length as ``begin`` and ``nop``, and all arguments
must have a common prefix.
:param nop: A sequence of bytes which, when encountered, is
thrown away. Used to interrupt sequences internal
to the frame which could be mistaken for the
beginning or ending of the frame. Must be the
same length as ``begin`` and ``end``, and all
arguments must have a common prefix.
"""
super(StuffingFramer, self).__init__()
# Make sure begin, end, and nop are binary types
self.begin = six.binary_type(begin)
self.end = six.binary_type(end)
self.nop = six.binary_type(nop)
# Determine the prefix
self.prefix = os.path.commonprefix([self.begin, self.end, self.nop])
# Do a little sanity-checking
if not self.begin or not self.end or not self.nop:
raise ValueError("no arguments may be empty")
elif not self.prefix:
raise ValueError("arguments have no common prefix")
elif (len(self.begin) != len(self.end) or
len(self.begin) != len(self.nop)):
raise ValueError("arguments must be the same length")
elif self.nop == self.begin or self.nop == self.end:
raise ValueError("nop must be distinct from begin and end")
elif self.begin == self.end:
raise ValueError("begin and end must be distinct")
def initialize_state(self, state):
"""
Initialize a ``FramerState`` object. This state will be
passed in to the ``to_frame()`` and ``to_bytes()`` methods,
and may be used for processing partial frames or cross-frame
information. The default implementation does nothing.
:param state: The state to initialize.
"""
# Signal that a start frame sequence has not been encountered
# yet
state.frame_start = False
def to_bytes(self, frame, state):
"""
Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream.
"""
# Generate and return the frame
return (self.begin +
self.nop.join(six.binary_type(frame).split(self.prefix)) +
self.end)
|
klmitch/framer | framer/framers.py | StuffingFramer.to_bytes | python | def to_bytes(self, frame, state):
# Generate and return the frame
return (self.begin +
self.nop.join(six.binary_type(frame).split(self.prefix)) +
self.end) | Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L745-L762 | null | class StuffingFramer(Framer):
"""
Some protocols encode their frames using synchronization bytes--a
sequence of bytes that signal the beginning and end of a frame,
allowing for synchronization if a decoding error is encountered.
However, the synchronization sequence could occur within the
frame, so these protocols *stuff* a throw-away byte into such
sequences to prevent them from accidentally ending a frame or
starting another frame--and thus the name "byte stuffing."
For this framer, frames are ``bytes``.
"""
def __init__(self, begin=b'\xff\xff\xff\xff\xff',
end=b'\xff\xff\xff\xff\xfe', nop=b'\xff\xff\xff\xff\x00'):
"""
Initialize a ``StuffingFramer`` object.
:param begin: A sequence of bytes which, when encountered,
indicates the beginning of a frame. Must be the
same length as ``end`` and ``nop``, and all
arguments must have a common prefix.
:param end: A sequence of bytes which, when encountered,
indicates the end of a frame. Must be the same
length as ``begin`` and ``nop``, and all arguments
must have a common prefix.
:param nop: A sequence of bytes which, when encountered, is
thrown away. Used to interrupt sequences internal
to the frame which could be mistaken for the
beginning or ending of the frame. Must be the
same length as ``begin`` and ``end``, and all
arguments must have a common prefix.
"""
super(StuffingFramer, self).__init__()
# Make sure begin, end, and nop are binary types
self.begin = six.binary_type(begin)
self.end = six.binary_type(end)
self.nop = six.binary_type(nop)
# Determine the prefix
self.prefix = os.path.commonprefix([self.begin, self.end, self.nop])
# Do a little sanity-checking
if not self.begin or not self.end or not self.nop:
raise ValueError("no arguments may be empty")
elif not self.prefix:
raise ValueError("arguments have no common prefix")
elif (len(self.begin) != len(self.end) or
len(self.begin) != len(self.nop)):
raise ValueError("arguments must be the same length")
elif self.nop == self.begin or self.nop == self.end:
raise ValueError("nop must be distinct from begin and end")
elif self.begin == self.end:
raise ValueError("begin and end must be distinct")
def initialize_state(self, state):
"""
Initialize a ``FramerState`` object. This state will be
passed in to the ``to_frame()`` and ``to_bytes()`` methods,
and may be used for processing partial frames or cross-frame
information. The default implementation does nothing.
:param state: The state to initialize.
"""
# Signal that a start frame sequence has not been encountered
# yet
state.frame_start = False
def to_frame(self, data, state):
"""
Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes.
"""
# Find the next packet start
if not state.frame_start:
# Find the begin sequence
idx = data.find(self.begin)
if idx < 0:
# Couldn't find one
raise exc.NoFrames()
# Excise the begin sequence
del data[:idx + len(self.begin)]
# Now see if we can find the end sequence
idx = data.find(self.end)
if idx < 0:
# We've found the start, but don't have the end yet
state.frame_start = True
raise exc.NoFrames()
# Extract the frame
frame = six.binary_type(data[:idx])
del data[:idx + len(self.end)]
# Update the state
state.frame_start = False
# Unstuff the frame and return it
return self.prefix.join(frame.split(self.nop))
|
klmitch/framer | framer/framers.py | COBSFramer.to_frame | python | def to_frame(self, data, state):
# Find the next null byte
data_len = data.find(b'\0')
if data_len < 0:
# No full frame yet
raise exc.NoFrames()
# Track how much to exclude
frame_len = data_len + 1
# Decode the data
frame = six.binary_type(self.variant.decode(
six.binary_type(data[:data_len])))
del data[:frame_len]
# Return the frame
return frame | Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L803-L836 | null | class COBSFramer(Framer):
"""
Some protocols encode their frames using synchronization bytes,
like ``StuffingFramer``, but the byte stuffing protocol is
Consistent Overhead Byte Stuffing, or COBS. This framer
implements framing where the end of a frame is delimited by the
null character (which never appears in a COBS-encoded frame).
For this framer, frames are ``bytes``.
"""
# Values to pass for the variant parameter of __init__()
VARIANT_COBS = cobs
VARIANT_COBSR = cobsr
def __init__(self, variant=VARIANT_COBS):
"""
Initialize a ``COBSFramer`` object.
:param variant: Select the COBS variant. Valid values are
``framer.COBSFramer.VARIANT_COBS`` (the
default) for standard COBS; and
``framer.COBSFramer.VARIANT_COBSR`` for
COBS/R, an invention of the author of the
underlying ``cobs`` Python package. It is
also possible to pass any object which has
``encode()`` and ``decode()`` functions or
methods; each should take a single ``bytes``
argument and return ``bytes``, and the value
returned by ``encode()`` must not contain the
null character ("\0").
"""
super(COBSFramer, self).__init__()
# Select the variant we're using
self.variant = variant
def to_bytes(self, frame, state):
"""
Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream.
"""
# Encode the frame and append the delimiter
return six.binary_type(self.variant.encode(
six.binary_type(frame))) + b'\0'
|
klmitch/framer | framer/framers.py | COBSFramer.to_bytes | python | def to_bytes(self, frame, state):
# Encode the frame and append the delimiter
return six.binary_type(self.variant.encode(
six.binary_type(frame))) + b'\0' | Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream. | train | https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L838-L854 | null | class COBSFramer(Framer):
"""
Some protocols encode their frames using synchronization bytes,
like ``StuffingFramer``, but the byte stuffing protocol is
Consistent Overhead Byte Stuffing, or COBS. This framer
implements framing where the end of a frame is delimited by the
null character (which never appears in a COBS-encoded frame).
For this framer, frames are ``bytes``.
"""
# Values to pass for the variant parameter of __init__()
VARIANT_COBS = cobs
VARIANT_COBSR = cobsr
def __init__(self, variant=VARIANT_COBS):
"""
Initialize a ``COBSFramer`` object.
:param variant: Select the COBS variant. Valid values are
``framer.COBSFramer.VARIANT_COBS`` (the
default) for standard COBS; and
``framer.COBSFramer.VARIANT_COBSR`` for
COBS/R, an invention of the author of the
underlying ``cobs`` Python package. It is
also possible to pass any object which has
``encode()`` and ``decode()`` functions or
methods; each should take a single ``bytes``
argument and return ``bytes``, and the value
returned by ``encode()`` must not contain the
null character ("\0").
"""
super(COBSFramer, self).__init__()
# Select the variant we're using
self.variant = variant
def to_frame(self, data, state):
"""
Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes.
"""
# Find the next null byte
data_len = data.find(b'\0')
if data_len < 0:
# No full frame yet
raise exc.NoFrames()
# Track how much to exclude
frame_len = data_len + 1
# Decode the data
frame = six.binary_type(self.variant.decode(
six.binary_type(data[:data_len])))
del data[:frame_len]
# Return the frame
return frame
|
citruz/beacontools | beacontools/scanner.py | Monitor.run | python | def run(self):
self.socket = self.bluez.hci_open_dev(self.bt_device_id)
filtr = self.bluez.hci_filter_new()
self.bluez.hci_filter_all_events(filtr)
self.bluez.hci_filter_set_ptype(filtr, self.bluez.HCI_EVENT_PKT)
self.socket.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, filtr)
self.set_scan_parameters()
self.toggle_scan(True)
while self.keep_going:
pkt = self.socket.recv(255)
event = to_int(pkt[1])
subevent = to_int(pkt[3])
if event == LE_META_EVENT and subevent == EVT_LE_ADVERTISING_REPORT:
# we have an BLE advertisement
self.process_packet(pkt)
self.socket.close() | Continously scan for BLE advertisements. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L89-L108 | [
"def to_int(string):\n \"\"\"Convert a one element byte string to int for python 2 support.\"\"\"\n if isinstance(string, str):\n return ord(string[0])\n else:\n return string\n",
"def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10,\n address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL):\n \"\"\"\"sets the le scan parameters\n\n Args:\n scan_type: ScanType.(PASSIVE|ACTIVE)\n interval: ms (as float) between scans (valid range 2.5ms - 10240ms)\n ..note:: when interval and window are equal, the scan\n runs continuos\n window: ms (as float) scan duration (valid range 2.5ms - 10240ms)\n address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)\n * PUBLIC = use device MAC address\n * RANDOM = generate a random MAC address and use that\n filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will\n return all fetched bluetooth packets (WHITELIST_ONLY is not supported,\n because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)\n\n Raises:\n ValueError: A value had an unexpected format or was not in range\n \"\"\"\n interval_fractions = interval_ms / MS_FRACTION_DIVIDER\n if interval_fractions < 0x0004 or interval_fractions > 0x4000:\n raise ValueError(\n \"Invalid interval given {}, must be in range of 2.5ms to 10240ms!\".format(\n interval_fractions))\n window_fractions = window_ms / MS_FRACTION_DIVIDER\n if window_fractions < 0x0004 or window_fractions > 0x4000:\n raise ValueError(\n \"Invalid window given {}, must be in range of 2.5ms to 10240ms!\".format(\n window_fractions))\n\n interval_fractions, window_fractions = int(interval_fractions), int(window_fractions)\n\n scan_parameter_pkg = struct.pack(\n \">BHHBB\",\n scan_type,\n interval_fractions,\n window_fractions,\n address_type,\n filter_type)\n self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS,\n scan_parameter_pkg)\n",
"def toggle_scan(self, enable, filter_duplicates=False):\n \"\"\"Enables or disables BLE scanning\n\n Args:\n enable: boolean value to enable (True) or disable (False) scanner\n filter_duplicates: boolean value to enable/disable filter, that\n omits duplicated packets\"\"\"\n command = struct.pack(\">BB\", enable, filter_duplicates)\n self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)\n",
"def process_packet(self, pkt):\n \"\"\"Parse the packet and call callback if one of the filters matches.\"\"\"\n\n # check if this could be a valid packet before parsing\n # this reduces the CPU load significantly\n if not ( \\\n ((self.mode & ScannerMode.MODE_IBEACON) and (pkt[19:23] == b\"\\x4c\\x00\\x02\\x15\")) or \\\n ((self.mode & ScannerMode.MODE_EDDYSTONE) and (pkt[19:21] == b\"\\xaa\\xfe\")) or \\\n ((self.mode & ScannerMode.MODE_ESTIMOTE) and (pkt[19:21] == b\"\\x9a\\xfe\"))):\n return\n\n bt_addr = bt_addr_to_string(pkt[7:13])\n rssi = bin_to_int(pkt[-1])\n # strip bluetooth address and parse packet\n packet = parse_packet(pkt[14:-1])\n\n # return if packet was not an beacon advertisement\n if not packet:\n return\n\n # we need to remeber which eddystone beacon has which bt address\n # because the TLM and URL frames do not contain the namespace and instance\n self.save_bt_addr(packet, bt_addr)\n # properties holds the identifying information for a beacon\n # e.g. instance and namespace for eddystone; uuid, major, minor for iBeacon\n properties = self.get_properties(packet, bt_addr)\n\n if self.device_filter is None and self.packet_filter is None:\n # no filters selected\n self.callback(bt_addr, rssi, packet, properties)\n\n elif self.device_filter is None:\n # filter by packet type\n if is_one_of(packet, self.packet_filter):\n self.callback(bt_addr, rssi, packet, properties)\n else:\n # filter by device and packet type\n if self.packet_filter and not is_one_of(packet, self.packet_filter):\n # return if packet filter does not match\n return\n\n # iterate over filters and call .matches() on each\n for filtr in self.device_filter:\n if isinstance(filtr, BtAddrFilter):\n if filtr.matches({'bt_addr':bt_addr}):\n self.callback(bt_addr, rssi, packet, properties)\n return\n\n elif filtr.matches(properties):\n self.callback(bt_addr, rssi, packet, properties)\n return\n"
] | class Monitor(threading.Thread):
"""Continously scan for BLE advertisements."""
def __init__(self, callback, bt_device_id, device_filter, packet_filter):
"""Construct interface object."""
# do import here so that the package can be used in parsing-only mode (no bluez required)
self.bluez = import_module('bluetooth._bluetooth')
threading.Thread.__init__(self)
self.daemon = False
self.keep_going = True
self.callback = callback
# number of the bt device (hciX)
self.bt_device_id = bt_device_id
# list of beacons to monitor
self.device_filter = device_filter
self.mode = get_mode(device_filter)
# list of packet types to monitor
self.packet_filter = packet_filter
# bluetooth socket
self.socket = None
# keep track of Eddystone Beacon <-> bt addr mapping
self.eddystone_mappings = []
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10,
address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL):
""""sets the le scan parameters
Args:
scan_type: ScanType.(PASSIVE|ACTIVE)
interval: ms (as float) between scans (valid range 2.5ms - 10240ms)
..note:: when interval and window are equal, the scan
runs continuos
window: ms (as float) scan duration (valid range 2.5ms - 10240ms)
address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)
* PUBLIC = use device MAC address
* RANDOM = generate a random MAC address and use that
filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will
return all fetched bluetooth packets (WHITELIST_ONLY is not supported,
because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)
Raises:
ValueError: A value had an unexpected format or was not in range
"""
interval_fractions = interval_ms / MS_FRACTION_DIVIDER
if interval_fractions < 0x0004 or interval_fractions > 0x4000:
raise ValueError(
"Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format(
interval_fractions))
window_fractions = window_ms / MS_FRACTION_DIVIDER
if window_fractions < 0x0004 or window_fractions > 0x4000:
raise ValueError(
"Invalid window given {}, must be in range of 2.5ms to 10240ms!".format(
window_fractions))
interval_fractions, window_fractions = int(interval_fractions), int(window_fractions)
scan_parameter_pkg = struct.pack(
">BHHBB",
scan_type,
interval_fractions,
window_fractions,
address_type,
filter_type)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS,
scan_parameter_pkg)
def toggle_scan(self, enable, filter_duplicates=False):
"""Enables or disables BLE scanning
Args:
enable: boolean value to enable (True) or disable (False) scanner
filter_duplicates: boolean value to enable/disable filter, that
omits duplicated packets"""
command = struct.pack(">BB", enable, filter_duplicates)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)
def process_packet(self, pkt):
"""Parse the packet and call callback if one of the filters matches."""
# check if this could be a valid packet before parsing
# this reduces the CPU load significantly
if not ( \
((self.mode & ScannerMode.MODE_IBEACON) and (pkt[19:23] == b"\x4c\x00\x02\x15")) or \
((self.mode & ScannerMode.MODE_EDDYSTONE) and (pkt[19:21] == b"\xaa\xfe")) or \
((self.mode & ScannerMode.MODE_ESTIMOTE) and (pkt[19:21] == b"\x9a\xfe"))):
return
bt_addr = bt_addr_to_string(pkt[7:13])
rssi = bin_to_int(pkt[-1])
# strip bluetooth address and parse packet
packet = parse_packet(pkt[14:-1])
# return if packet was not an beacon advertisement
if not packet:
return
# we need to remeber which eddystone beacon has which bt address
# because the TLM and URL frames do not contain the namespace and instance
self.save_bt_addr(packet, bt_addr)
# properties holds the identifying information for a beacon
# e.g. instance and namespace for eddystone; uuid, major, minor for iBeacon
properties = self.get_properties(packet, bt_addr)
if self.device_filter is None and self.packet_filter is None:
# no filters selected
self.callback(bt_addr, rssi, packet, properties)
elif self.device_filter is None:
# filter by packet type
if is_one_of(packet, self.packet_filter):
self.callback(bt_addr, rssi, packet, properties)
else:
# filter by device and packet type
if self.packet_filter and not is_one_of(packet, self.packet_filter):
# return if packet filter does not match
return
# iterate over filters and call .matches() on each
for filtr in self.device_filter:
if isinstance(filtr, BtAddrFilter):
if filtr.matches({'bt_addr':bt_addr}):
self.callback(bt_addr, rssi, packet, properties)
return
elif filtr.matches(properties):
self.callback(bt_addr, rssi, packet, properties)
return
def save_bt_addr(self, packet, bt_addr):
"""Add to the list of mappings."""
if isinstance(packet, EddystoneUIDFrame):
# remove out old mapping
new_mappings = [m for m in self.eddystone_mappings if m[0] != bt_addr]
new_mappings.append((bt_addr, packet.properties))
self.eddystone_mappings = new_mappings
def get_properties(self, packet, bt_addr):
"""Get properties of beacon depending on type."""
if is_one_of(packet, [EddystoneTLMFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneEIDFrame]):
# here we retrieve the namespace and instance which corresponds to the
# eddystone beacon with this bt address
return self.properties_from_mapping(bt_addr)
else:
return packet.properties
def properties_from_mapping(self, bt_addr):
"""Retrieve properties (namespace, instance) for the specified bt address."""
for addr, properties in self.eddystone_mappings:
if addr == bt_addr:
return properties
return None
def terminate(self):
"""Signal runner to stop and join thread."""
self.toggle_scan(False)
self.keep_going = False
self.join()
|
citruz/beacontools | beacontools/scanner.py | Monitor.set_scan_parameters | python | def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10,
address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL):
"
interval_fractions = interval_ms / MS_FRACTION_DIVIDER
if interval_fractions < 0x0004 or interval_fractions > 0x4000:
raise ValueError(
"Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format(
interval_fractions))
window_fractions = window_ms / MS_FRACTION_DIVIDER
if window_fractions < 0x0004 or window_fractions > 0x4000:
raise ValueError(
"Invalid window given {}, must be in range of 2.5ms to 10240ms!".format(
window_fractions))
interval_fractions, window_fractions = int(interval_fractions), int(window_fractions)
scan_parameter_pkg = struct.pack(
">BHHBB",
scan_type,
interval_fractions,
window_fractions,
address_type,
filter_type)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS,
scan_parameter_pkg) | sets the le scan parameters
Args:
scan_type: ScanType.(PASSIVE|ACTIVE)
interval: ms (as float) between scans (valid range 2.5ms - 10240ms)
..note:: when interval and window are equal, the scan
runs continuos
window: ms (as float) scan duration (valid range 2.5ms - 10240ms)
address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)
* PUBLIC = use device MAC address
* RANDOM = generate a random MAC address and use that
filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will
return all fetched bluetooth packets (WHITELIST_ONLY is not supported,
because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)
Raises:
ValueError: A value had an unexpected format or was not in range | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L110-L151 | null | class Monitor(threading.Thread):
"""Continously scan for BLE advertisements."""
def __init__(self, callback, bt_device_id, device_filter, packet_filter):
"""Construct interface object."""
# do import here so that the package can be used in parsing-only mode (no bluez required)
self.bluez = import_module('bluetooth._bluetooth')
threading.Thread.__init__(self)
self.daemon = False
self.keep_going = True
self.callback = callback
# number of the bt device (hciX)
self.bt_device_id = bt_device_id
# list of beacons to monitor
self.device_filter = device_filter
self.mode = get_mode(device_filter)
# list of packet types to monitor
self.packet_filter = packet_filter
# bluetooth socket
self.socket = None
# keep track of Eddystone Beacon <-> bt addr mapping
self.eddystone_mappings = []
def run(self):
"""Continously scan for BLE advertisements."""
self.socket = self.bluez.hci_open_dev(self.bt_device_id)
filtr = self.bluez.hci_filter_new()
self.bluez.hci_filter_all_events(filtr)
self.bluez.hci_filter_set_ptype(filtr, self.bluez.HCI_EVENT_PKT)
self.socket.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, filtr)
self.set_scan_parameters()
self.toggle_scan(True)
while self.keep_going:
pkt = self.socket.recv(255)
event = to_int(pkt[1])
subevent = to_int(pkt[3])
if event == LE_META_EVENT and subevent == EVT_LE_ADVERTISING_REPORT:
# we have an BLE advertisement
self.process_packet(pkt)
self.socket.close()
def toggle_scan(self, enable, filter_duplicates=False):
"""Enables or disables BLE scanning
Args:
enable: boolean value to enable (True) or disable (False) scanner
filter_duplicates: boolean value to enable/disable filter, that
omits duplicated packets"""
command = struct.pack(">BB", enable, filter_duplicates)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)
def process_packet(self, pkt):
"""Parse the packet and call callback if one of the filters matches."""
# check if this could be a valid packet before parsing
# this reduces the CPU load significantly
if not ( \
((self.mode & ScannerMode.MODE_IBEACON) and (pkt[19:23] == b"\x4c\x00\x02\x15")) or \
((self.mode & ScannerMode.MODE_EDDYSTONE) and (pkt[19:21] == b"\xaa\xfe")) or \
((self.mode & ScannerMode.MODE_ESTIMOTE) and (pkt[19:21] == b"\x9a\xfe"))):
return
bt_addr = bt_addr_to_string(pkt[7:13])
rssi = bin_to_int(pkt[-1])
# strip bluetooth address and parse packet
packet = parse_packet(pkt[14:-1])
# return if packet was not an beacon advertisement
if not packet:
return
# we need to remeber which eddystone beacon has which bt address
# because the TLM and URL frames do not contain the namespace and instance
self.save_bt_addr(packet, bt_addr)
# properties holds the identifying information for a beacon
# e.g. instance and namespace for eddystone; uuid, major, minor for iBeacon
properties = self.get_properties(packet, bt_addr)
if self.device_filter is None and self.packet_filter is None:
# no filters selected
self.callback(bt_addr, rssi, packet, properties)
elif self.device_filter is None:
# filter by packet type
if is_one_of(packet, self.packet_filter):
self.callback(bt_addr, rssi, packet, properties)
else:
# filter by device and packet type
if self.packet_filter and not is_one_of(packet, self.packet_filter):
# return if packet filter does not match
return
# iterate over filters and call .matches() on each
for filtr in self.device_filter:
if isinstance(filtr, BtAddrFilter):
if filtr.matches({'bt_addr':bt_addr}):
self.callback(bt_addr, rssi, packet, properties)
return
elif filtr.matches(properties):
self.callback(bt_addr, rssi, packet, properties)
return
def save_bt_addr(self, packet, bt_addr):
"""Add to the list of mappings."""
if isinstance(packet, EddystoneUIDFrame):
# remove out old mapping
new_mappings = [m for m in self.eddystone_mappings if m[0] != bt_addr]
new_mappings.append((bt_addr, packet.properties))
self.eddystone_mappings = new_mappings
def get_properties(self, packet, bt_addr):
"""Get properties of beacon depending on type."""
if is_one_of(packet, [EddystoneTLMFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneEIDFrame]):
# here we retrieve the namespace and instance which corresponds to the
# eddystone beacon with this bt address
return self.properties_from_mapping(bt_addr)
else:
return packet.properties
def properties_from_mapping(self, bt_addr):
"""Retrieve properties (namespace, instance) for the specified bt address."""
for addr, properties in self.eddystone_mappings:
if addr == bt_addr:
return properties
return None
def terminate(self):
"""Signal runner to stop and join thread."""
self.toggle_scan(False)
self.keep_going = False
self.join()
|
citruz/beacontools | beacontools/scanner.py | Monitor.toggle_scan | python | def toggle_scan(self, enable, filter_duplicates=False):
command = struct.pack(">BB", enable, filter_duplicates)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command) | Enables or disables BLE scanning
Args:
enable: boolean value to enable (True) or disable (False) scanner
filter_duplicates: boolean value to enable/disable filter, that
omits duplicated packets | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L153-L161 | null | class Monitor(threading.Thread):
"""Continously scan for BLE advertisements."""
def __init__(self, callback, bt_device_id, device_filter, packet_filter):
"""Construct interface object."""
# do import here so that the package can be used in parsing-only mode (no bluez required)
self.bluez = import_module('bluetooth._bluetooth')
threading.Thread.__init__(self)
self.daemon = False
self.keep_going = True
self.callback = callback
# number of the bt device (hciX)
self.bt_device_id = bt_device_id
# list of beacons to monitor
self.device_filter = device_filter
self.mode = get_mode(device_filter)
# list of packet types to monitor
self.packet_filter = packet_filter
# bluetooth socket
self.socket = None
# keep track of Eddystone Beacon <-> bt addr mapping
self.eddystone_mappings = []
def run(self):
"""Continously scan for BLE advertisements."""
self.socket = self.bluez.hci_open_dev(self.bt_device_id)
filtr = self.bluez.hci_filter_new()
self.bluez.hci_filter_all_events(filtr)
self.bluez.hci_filter_set_ptype(filtr, self.bluez.HCI_EVENT_PKT)
self.socket.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, filtr)
self.set_scan_parameters()
self.toggle_scan(True)
while self.keep_going:
pkt = self.socket.recv(255)
event = to_int(pkt[1])
subevent = to_int(pkt[3])
if event == LE_META_EVENT and subevent == EVT_LE_ADVERTISING_REPORT:
# we have an BLE advertisement
self.process_packet(pkt)
self.socket.close()
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10,
address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL):
""""sets the le scan parameters
Args:
scan_type: ScanType.(PASSIVE|ACTIVE)
interval: ms (as float) between scans (valid range 2.5ms - 10240ms)
..note:: when interval and window are equal, the scan
runs continuos
window: ms (as float) scan duration (valid range 2.5ms - 10240ms)
address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)
* PUBLIC = use device MAC address
* RANDOM = generate a random MAC address and use that
filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will
return all fetched bluetooth packets (WHITELIST_ONLY is not supported,
because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)
Raises:
ValueError: A value had an unexpected format or was not in range
"""
interval_fractions = interval_ms / MS_FRACTION_DIVIDER
if interval_fractions < 0x0004 or interval_fractions > 0x4000:
raise ValueError(
"Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format(
interval_fractions))
window_fractions = window_ms / MS_FRACTION_DIVIDER
if window_fractions < 0x0004 or window_fractions > 0x4000:
raise ValueError(
"Invalid window given {}, must be in range of 2.5ms to 10240ms!".format(
window_fractions))
interval_fractions, window_fractions = int(interval_fractions), int(window_fractions)
scan_parameter_pkg = struct.pack(
">BHHBB",
scan_type,
interval_fractions,
window_fractions,
address_type,
filter_type)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS,
scan_parameter_pkg)
def process_packet(self, pkt):
"""Parse the packet and call callback if one of the filters matches."""
# check if this could be a valid packet before parsing
# this reduces the CPU load significantly
if not ( \
((self.mode & ScannerMode.MODE_IBEACON) and (pkt[19:23] == b"\x4c\x00\x02\x15")) or \
((self.mode & ScannerMode.MODE_EDDYSTONE) and (pkt[19:21] == b"\xaa\xfe")) or \
((self.mode & ScannerMode.MODE_ESTIMOTE) and (pkt[19:21] == b"\x9a\xfe"))):
return
bt_addr = bt_addr_to_string(pkt[7:13])
rssi = bin_to_int(pkt[-1])
# strip bluetooth address and parse packet
packet = parse_packet(pkt[14:-1])
# return if packet was not an beacon advertisement
if not packet:
return
# we need to remeber which eddystone beacon has which bt address
# because the TLM and URL frames do not contain the namespace and instance
self.save_bt_addr(packet, bt_addr)
# properties holds the identifying information for a beacon
# e.g. instance and namespace for eddystone; uuid, major, minor for iBeacon
properties = self.get_properties(packet, bt_addr)
if self.device_filter is None and self.packet_filter is None:
# no filters selected
self.callback(bt_addr, rssi, packet, properties)
elif self.device_filter is None:
# filter by packet type
if is_one_of(packet, self.packet_filter):
self.callback(bt_addr, rssi, packet, properties)
else:
# filter by device and packet type
if self.packet_filter and not is_one_of(packet, self.packet_filter):
# return if packet filter does not match
return
# iterate over filters and call .matches() on each
for filtr in self.device_filter:
if isinstance(filtr, BtAddrFilter):
if filtr.matches({'bt_addr':bt_addr}):
self.callback(bt_addr, rssi, packet, properties)
return
elif filtr.matches(properties):
self.callback(bt_addr, rssi, packet, properties)
return
def save_bt_addr(self, packet, bt_addr):
"""Add to the list of mappings."""
if isinstance(packet, EddystoneUIDFrame):
# remove out old mapping
new_mappings = [m for m in self.eddystone_mappings if m[0] != bt_addr]
new_mappings.append((bt_addr, packet.properties))
self.eddystone_mappings = new_mappings
def get_properties(self, packet, bt_addr):
"""Get properties of beacon depending on type."""
if is_one_of(packet, [EddystoneTLMFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneEIDFrame]):
# here we retrieve the namespace and instance which corresponds to the
# eddystone beacon with this bt address
return self.properties_from_mapping(bt_addr)
else:
return packet.properties
def properties_from_mapping(self, bt_addr):
"""Retrieve properties (namespace, instance) for the specified bt address."""
for addr, properties in self.eddystone_mappings:
if addr == bt_addr:
return properties
return None
def terminate(self):
"""Signal runner to stop and join thread."""
self.toggle_scan(False)
self.keep_going = False
self.join()
|
citruz/beacontools | beacontools/scanner.py | Monitor.process_packet | python | def process_packet(self, pkt):
# check if this could be a valid packet before parsing
# this reduces the CPU load significantly
if not ( \
((self.mode & ScannerMode.MODE_IBEACON) and (pkt[19:23] == b"\x4c\x00\x02\x15")) or \
((self.mode & ScannerMode.MODE_EDDYSTONE) and (pkt[19:21] == b"\xaa\xfe")) or \
((self.mode & ScannerMode.MODE_ESTIMOTE) and (pkt[19:21] == b"\x9a\xfe"))):
return
bt_addr = bt_addr_to_string(pkt[7:13])
rssi = bin_to_int(pkt[-1])
# strip bluetooth address and parse packet
packet = parse_packet(pkt[14:-1])
# return if packet was not an beacon advertisement
if not packet:
return
# we need to remeber which eddystone beacon has which bt address
# because the TLM and URL frames do not contain the namespace and instance
self.save_bt_addr(packet, bt_addr)
# properties holds the identifying information for a beacon
# e.g. instance and namespace for eddystone; uuid, major, minor for iBeacon
properties = self.get_properties(packet, bt_addr)
if self.device_filter is None and self.packet_filter is None:
# no filters selected
self.callback(bt_addr, rssi, packet, properties)
elif self.device_filter is None:
# filter by packet type
if is_one_of(packet, self.packet_filter):
self.callback(bt_addr, rssi, packet, properties)
else:
# filter by device and packet type
if self.packet_filter and not is_one_of(packet, self.packet_filter):
# return if packet filter does not match
return
# iterate over filters and call .matches() on each
for filtr in self.device_filter:
if isinstance(filtr, BtAddrFilter):
if filtr.matches({'bt_addr':bt_addr}):
self.callback(bt_addr, rssi, packet, properties)
return
elif filtr.matches(properties):
self.callback(bt_addr, rssi, packet, properties)
return | Parse the packet and call callback if one of the filters matches. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L163-L213 | [
"def parse_packet(packet):\n \"\"\"Parse a beacon advertisement packet.\"\"\"\n frame = parse_ltv_packet(packet)\n if frame is None:\n frame = parse_ibeacon_packet(packet)\n return frame\n",
"def bt_addr_to_string(addr):\n \"\"\"Convert a binary string to the hex representation.\"\"\"\n addr_str = array.array('B', addr)\n addr_str.reverse()\n hex_str = hexlify(addr_str.tostring()).decode('ascii')\n # insert \":\" seperator between the bytes\n return ':'.join(a+b for a, b in zip(hex_str[::2], hex_str[1::2]))\n",
"def is_one_of(obj, types):\n \"\"\"Return true iff obj is an instance of one of the types.\"\"\"\n for type_ in types:\n if isinstance(obj, type_):\n return True\n return False\n",
"def bin_to_int(string):\n \"\"\"Convert a one element byte string to signed int for python 2 support.\"\"\"\n if isinstance(string, str):\n return struct.unpack(\"b\", string)[0]\n else:\n return struct.unpack(\"b\", bytes([string]))[0]\n",
"def save_bt_addr(self, packet, bt_addr):\n \"\"\"Add to the list of mappings.\"\"\"\n if isinstance(packet, EddystoneUIDFrame):\n # remove out old mapping\n new_mappings = [m for m in self.eddystone_mappings if m[0] != bt_addr]\n new_mappings.append((bt_addr, packet.properties))\n self.eddystone_mappings = new_mappings\n",
"def get_properties(self, packet, bt_addr):\n \"\"\"Get properties of beacon depending on type.\"\"\"\n if is_one_of(packet, [EddystoneTLMFrame, EddystoneURLFrame, \\\n EddystoneEncryptedTLMFrame, EddystoneEIDFrame]):\n # here we retrieve the namespace and instance which corresponds to the\n # eddystone beacon with this bt address\n return self.properties_from_mapping(bt_addr)\n else:\n return packet.properties\n"
] | class Monitor(threading.Thread):
"""Continously scan for BLE advertisements."""
def __init__(self, callback, bt_device_id, device_filter, packet_filter):
"""Construct interface object."""
# do import here so that the package can be used in parsing-only mode (no bluez required)
self.bluez = import_module('bluetooth._bluetooth')
threading.Thread.__init__(self)
self.daemon = False
self.keep_going = True
self.callback = callback
# number of the bt device (hciX)
self.bt_device_id = bt_device_id
# list of beacons to monitor
self.device_filter = device_filter
self.mode = get_mode(device_filter)
# list of packet types to monitor
self.packet_filter = packet_filter
# bluetooth socket
self.socket = None
# keep track of Eddystone Beacon <-> bt addr mapping
self.eddystone_mappings = []
def run(self):
"""Continously scan for BLE advertisements."""
self.socket = self.bluez.hci_open_dev(self.bt_device_id)
filtr = self.bluez.hci_filter_new()
self.bluez.hci_filter_all_events(filtr)
self.bluez.hci_filter_set_ptype(filtr, self.bluez.HCI_EVENT_PKT)
self.socket.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, filtr)
self.set_scan_parameters()
self.toggle_scan(True)
while self.keep_going:
pkt = self.socket.recv(255)
event = to_int(pkt[1])
subevent = to_int(pkt[3])
if event == LE_META_EVENT and subevent == EVT_LE_ADVERTISING_REPORT:
# we have an BLE advertisement
self.process_packet(pkt)
self.socket.close()
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10,
address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL):
""""sets the le scan parameters
Args:
scan_type: ScanType.(PASSIVE|ACTIVE)
interval: ms (as float) between scans (valid range 2.5ms - 10240ms)
..note:: when interval and window are equal, the scan
runs continuos
window: ms (as float) scan duration (valid range 2.5ms - 10240ms)
address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)
* PUBLIC = use device MAC address
* RANDOM = generate a random MAC address and use that
filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will
return all fetched bluetooth packets (WHITELIST_ONLY is not supported,
because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)
Raises:
ValueError: A value had an unexpected format or was not in range
"""
interval_fractions = interval_ms / MS_FRACTION_DIVIDER
if interval_fractions < 0x0004 or interval_fractions > 0x4000:
raise ValueError(
"Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format(
interval_fractions))
window_fractions = window_ms / MS_FRACTION_DIVIDER
if window_fractions < 0x0004 or window_fractions > 0x4000:
raise ValueError(
"Invalid window given {}, must be in range of 2.5ms to 10240ms!".format(
window_fractions))
interval_fractions, window_fractions = int(interval_fractions), int(window_fractions)
scan_parameter_pkg = struct.pack(
">BHHBB",
scan_type,
interval_fractions,
window_fractions,
address_type,
filter_type)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS,
scan_parameter_pkg)
def toggle_scan(self, enable, filter_duplicates=False):
"""Enables or disables BLE scanning
Args:
enable: boolean value to enable (True) or disable (False) scanner
filter_duplicates: boolean value to enable/disable filter, that
omits duplicated packets"""
command = struct.pack(">BB", enable, filter_duplicates)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)
def save_bt_addr(self, packet, bt_addr):
"""Add to the list of mappings."""
if isinstance(packet, EddystoneUIDFrame):
# remove out old mapping
new_mappings = [m for m in self.eddystone_mappings if m[0] != bt_addr]
new_mappings.append((bt_addr, packet.properties))
self.eddystone_mappings = new_mappings
def get_properties(self, packet, bt_addr):
"""Get properties of beacon depending on type."""
if is_one_of(packet, [EddystoneTLMFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneEIDFrame]):
# here we retrieve the namespace and instance which corresponds to the
# eddystone beacon with this bt address
return self.properties_from_mapping(bt_addr)
else:
return packet.properties
def properties_from_mapping(self, bt_addr):
"""Retrieve properties (namespace, instance) for the specified bt address."""
for addr, properties in self.eddystone_mappings:
if addr == bt_addr:
return properties
return None
def terminate(self):
"""Signal runner to stop and join thread."""
self.toggle_scan(False)
self.keep_going = False
self.join()
|
citruz/beacontools | beacontools/scanner.py | Monitor.save_bt_addr | python | def save_bt_addr(self, packet, bt_addr):
if isinstance(packet, EddystoneUIDFrame):
# remove out old mapping
new_mappings = [m for m in self.eddystone_mappings if m[0] != bt_addr]
new_mappings.append((bt_addr, packet.properties))
self.eddystone_mappings = new_mappings | Add to the list of mappings. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L215-L221 | null | class Monitor(threading.Thread):
"""Continously scan for BLE advertisements."""
def __init__(self, callback, bt_device_id, device_filter, packet_filter):
"""Construct interface object."""
# do import here so that the package can be used in parsing-only mode (no bluez required)
self.bluez = import_module('bluetooth._bluetooth')
threading.Thread.__init__(self)
self.daemon = False
self.keep_going = True
self.callback = callback
# number of the bt device (hciX)
self.bt_device_id = bt_device_id
# list of beacons to monitor
self.device_filter = device_filter
self.mode = get_mode(device_filter)
# list of packet types to monitor
self.packet_filter = packet_filter
# bluetooth socket
self.socket = None
# keep track of Eddystone Beacon <-> bt addr mapping
self.eddystone_mappings = []
def run(self):
"""Continously scan for BLE advertisements."""
self.socket = self.bluez.hci_open_dev(self.bt_device_id)
filtr = self.bluez.hci_filter_new()
self.bluez.hci_filter_all_events(filtr)
self.bluez.hci_filter_set_ptype(filtr, self.bluez.HCI_EVENT_PKT)
self.socket.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, filtr)
self.set_scan_parameters()
self.toggle_scan(True)
while self.keep_going:
pkt = self.socket.recv(255)
event = to_int(pkt[1])
subevent = to_int(pkt[3])
if event == LE_META_EVENT and subevent == EVT_LE_ADVERTISING_REPORT:
# we have an BLE advertisement
self.process_packet(pkt)
self.socket.close()
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10,
address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL):
""""sets the le scan parameters
Args:
scan_type: ScanType.(PASSIVE|ACTIVE)
interval: ms (as float) between scans (valid range 2.5ms - 10240ms)
..note:: when interval and window are equal, the scan
runs continuos
window: ms (as float) scan duration (valid range 2.5ms - 10240ms)
address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)
* PUBLIC = use device MAC address
* RANDOM = generate a random MAC address and use that
filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will
return all fetched bluetooth packets (WHITELIST_ONLY is not supported,
because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)
Raises:
ValueError: A value had an unexpected format or was not in range
"""
interval_fractions = interval_ms / MS_FRACTION_DIVIDER
if interval_fractions < 0x0004 or interval_fractions > 0x4000:
raise ValueError(
"Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format(
interval_fractions))
window_fractions = window_ms / MS_FRACTION_DIVIDER
if window_fractions < 0x0004 or window_fractions > 0x4000:
raise ValueError(
"Invalid window given {}, must be in range of 2.5ms to 10240ms!".format(
window_fractions))
interval_fractions, window_fractions = int(interval_fractions), int(window_fractions)
scan_parameter_pkg = struct.pack(
">BHHBB",
scan_type,
interval_fractions,
window_fractions,
address_type,
filter_type)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS,
scan_parameter_pkg)
def toggle_scan(self, enable, filter_duplicates=False):
"""Enables or disables BLE scanning
Args:
enable: boolean value to enable (True) or disable (False) scanner
filter_duplicates: boolean value to enable/disable filter, that
omits duplicated packets"""
command = struct.pack(">BB", enable, filter_duplicates)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)
def process_packet(self, pkt):
"""Parse the packet and call callback if one of the filters matches."""
# check if this could be a valid packet before parsing
# this reduces the CPU load significantly
if not ( \
((self.mode & ScannerMode.MODE_IBEACON) and (pkt[19:23] == b"\x4c\x00\x02\x15")) or \
((self.mode & ScannerMode.MODE_EDDYSTONE) and (pkt[19:21] == b"\xaa\xfe")) or \
((self.mode & ScannerMode.MODE_ESTIMOTE) and (pkt[19:21] == b"\x9a\xfe"))):
return
bt_addr = bt_addr_to_string(pkt[7:13])
rssi = bin_to_int(pkt[-1])
# strip bluetooth address and parse packet
packet = parse_packet(pkt[14:-1])
# return if packet was not an beacon advertisement
if not packet:
return
# we need to remeber which eddystone beacon has which bt address
# because the TLM and URL frames do not contain the namespace and instance
self.save_bt_addr(packet, bt_addr)
# properties holds the identifying information for a beacon
# e.g. instance and namespace for eddystone; uuid, major, minor for iBeacon
properties = self.get_properties(packet, bt_addr)
if self.device_filter is None and self.packet_filter is None:
# no filters selected
self.callback(bt_addr, rssi, packet, properties)
elif self.device_filter is None:
# filter by packet type
if is_one_of(packet, self.packet_filter):
self.callback(bt_addr, rssi, packet, properties)
else:
# filter by device and packet type
if self.packet_filter and not is_one_of(packet, self.packet_filter):
# return if packet filter does not match
return
# iterate over filters and call .matches() on each
for filtr in self.device_filter:
if isinstance(filtr, BtAddrFilter):
if filtr.matches({'bt_addr':bt_addr}):
self.callback(bt_addr, rssi, packet, properties)
return
elif filtr.matches(properties):
self.callback(bt_addr, rssi, packet, properties)
return
def get_properties(self, packet, bt_addr):
"""Get properties of beacon depending on type."""
if is_one_of(packet, [EddystoneTLMFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneEIDFrame]):
# here we retrieve the namespace and instance which corresponds to the
# eddystone beacon with this bt address
return self.properties_from_mapping(bt_addr)
else:
return packet.properties
def properties_from_mapping(self, bt_addr):
"""Retrieve properties (namespace, instance) for the specified bt address."""
for addr, properties in self.eddystone_mappings:
if addr == bt_addr:
return properties
return None
def terminate(self):
"""Signal runner to stop and join thread."""
self.toggle_scan(False)
self.keep_going = False
self.join()
|
citruz/beacontools | beacontools/scanner.py | Monitor.get_properties | python | def get_properties(self, packet, bt_addr):
if is_one_of(packet, [EddystoneTLMFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneEIDFrame]):
# here we retrieve the namespace and instance which corresponds to the
# eddystone beacon with this bt address
return self.properties_from_mapping(bt_addr)
else:
return packet.properties | Get properties of beacon depending on type. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L223-L231 | [
"def is_one_of(obj, types):\n \"\"\"Return true iff obj is an instance of one of the types.\"\"\"\n for type_ in types:\n if isinstance(obj, type_):\n return True\n return False\n",
"def properties_from_mapping(self, bt_addr):\n \"\"\"Retrieve properties (namespace, instance) for the specified bt address.\"\"\"\n for addr, properties in self.eddystone_mappings:\n if addr == bt_addr:\n return properties\n return None\n"
] | class Monitor(threading.Thread):
"""Continously scan for BLE advertisements."""
def __init__(self, callback, bt_device_id, device_filter, packet_filter):
"""Construct interface object."""
# do import here so that the package can be used in parsing-only mode (no bluez required)
self.bluez = import_module('bluetooth._bluetooth')
threading.Thread.__init__(self)
self.daemon = False
self.keep_going = True
self.callback = callback
# number of the bt device (hciX)
self.bt_device_id = bt_device_id
# list of beacons to monitor
self.device_filter = device_filter
self.mode = get_mode(device_filter)
# list of packet types to monitor
self.packet_filter = packet_filter
# bluetooth socket
self.socket = None
# keep track of Eddystone Beacon <-> bt addr mapping
self.eddystone_mappings = []
def run(self):
"""Continously scan for BLE advertisements."""
self.socket = self.bluez.hci_open_dev(self.bt_device_id)
filtr = self.bluez.hci_filter_new()
self.bluez.hci_filter_all_events(filtr)
self.bluez.hci_filter_set_ptype(filtr, self.bluez.HCI_EVENT_PKT)
self.socket.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, filtr)
self.set_scan_parameters()
self.toggle_scan(True)
while self.keep_going:
pkt = self.socket.recv(255)
event = to_int(pkt[1])
subevent = to_int(pkt[3])
if event == LE_META_EVENT and subevent == EVT_LE_ADVERTISING_REPORT:
# we have an BLE advertisement
self.process_packet(pkt)
self.socket.close()
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10,
address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL):
""""sets the le scan parameters
Args:
scan_type: ScanType.(PASSIVE|ACTIVE)
interval: ms (as float) between scans (valid range 2.5ms - 10240ms)
..note:: when interval and window are equal, the scan
runs continuos
window: ms (as float) scan duration (valid range 2.5ms - 10240ms)
address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)
* PUBLIC = use device MAC address
* RANDOM = generate a random MAC address and use that
filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will
return all fetched bluetooth packets (WHITELIST_ONLY is not supported,
because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)
Raises:
ValueError: A value had an unexpected format or was not in range
"""
interval_fractions = interval_ms / MS_FRACTION_DIVIDER
if interval_fractions < 0x0004 or interval_fractions > 0x4000:
raise ValueError(
"Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format(
interval_fractions))
window_fractions = window_ms / MS_FRACTION_DIVIDER
if window_fractions < 0x0004 or window_fractions > 0x4000:
raise ValueError(
"Invalid window given {}, must be in range of 2.5ms to 10240ms!".format(
window_fractions))
interval_fractions, window_fractions = int(interval_fractions), int(window_fractions)
scan_parameter_pkg = struct.pack(
">BHHBB",
scan_type,
interval_fractions,
window_fractions,
address_type,
filter_type)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS,
scan_parameter_pkg)
def toggle_scan(self, enable, filter_duplicates=False):
"""Enables or disables BLE scanning
Args:
enable: boolean value to enable (True) or disable (False) scanner
filter_duplicates: boolean value to enable/disable filter, that
omits duplicated packets"""
command = struct.pack(">BB", enable, filter_duplicates)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)
def process_packet(self, pkt):
"""Parse the packet and call callback if one of the filters matches."""
# check if this could be a valid packet before parsing
# this reduces the CPU load significantly
if not ( \
((self.mode & ScannerMode.MODE_IBEACON) and (pkt[19:23] == b"\x4c\x00\x02\x15")) or \
((self.mode & ScannerMode.MODE_EDDYSTONE) and (pkt[19:21] == b"\xaa\xfe")) or \
((self.mode & ScannerMode.MODE_ESTIMOTE) and (pkt[19:21] == b"\x9a\xfe"))):
return
bt_addr = bt_addr_to_string(pkt[7:13])
rssi = bin_to_int(pkt[-1])
# strip bluetooth address and parse packet
packet = parse_packet(pkt[14:-1])
# return if packet was not an beacon advertisement
if not packet:
return
# we need to remeber which eddystone beacon has which bt address
# because the TLM and URL frames do not contain the namespace and instance
self.save_bt_addr(packet, bt_addr)
# properties holds the identifying information for a beacon
# e.g. instance and namespace for eddystone; uuid, major, minor for iBeacon
properties = self.get_properties(packet, bt_addr)
if self.device_filter is None and self.packet_filter is None:
# no filters selected
self.callback(bt_addr, rssi, packet, properties)
elif self.device_filter is None:
# filter by packet type
if is_one_of(packet, self.packet_filter):
self.callback(bt_addr, rssi, packet, properties)
else:
# filter by device and packet type
if self.packet_filter and not is_one_of(packet, self.packet_filter):
# return if packet filter does not match
return
# iterate over filters and call .matches() on each
for filtr in self.device_filter:
if isinstance(filtr, BtAddrFilter):
if filtr.matches({'bt_addr':bt_addr}):
self.callback(bt_addr, rssi, packet, properties)
return
elif filtr.matches(properties):
self.callback(bt_addr, rssi, packet, properties)
return
def save_bt_addr(self, packet, bt_addr):
"""Add to the list of mappings."""
if isinstance(packet, EddystoneUIDFrame):
# remove out old mapping
new_mappings = [m for m in self.eddystone_mappings if m[0] != bt_addr]
new_mappings.append((bt_addr, packet.properties))
self.eddystone_mappings = new_mappings
def properties_from_mapping(self, bt_addr):
"""Retrieve properties (namespace, instance) for the specified bt address."""
for addr, properties in self.eddystone_mappings:
if addr == bt_addr:
return properties
return None
def terminate(self):
"""Signal runner to stop and join thread."""
self.toggle_scan(False)
self.keep_going = False
self.join()
|
citruz/beacontools | beacontools/scanner.py | Monitor.properties_from_mapping | python | def properties_from_mapping(self, bt_addr):
for addr, properties in self.eddystone_mappings:
if addr == bt_addr:
return properties
return None | Retrieve properties (namespace, instance) for the specified bt address. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L233-L238 | null | class Monitor(threading.Thread):
"""Continously scan for BLE advertisements."""
def __init__(self, callback, bt_device_id, device_filter, packet_filter):
"""Construct interface object."""
# do import here so that the package can be used in parsing-only mode (no bluez required)
self.bluez = import_module('bluetooth._bluetooth')
threading.Thread.__init__(self)
self.daemon = False
self.keep_going = True
self.callback = callback
# number of the bt device (hciX)
self.bt_device_id = bt_device_id
# list of beacons to monitor
self.device_filter = device_filter
self.mode = get_mode(device_filter)
# list of packet types to monitor
self.packet_filter = packet_filter
# bluetooth socket
self.socket = None
# keep track of Eddystone Beacon <-> bt addr mapping
self.eddystone_mappings = []
def run(self):
"""Continously scan for BLE advertisements."""
self.socket = self.bluez.hci_open_dev(self.bt_device_id)
filtr = self.bluez.hci_filter_new()
self.bluez.hci_filter_all_events(filtr)
self.bluez.hci_filter_set_ptype(filtr, self.bluez.HCI_EVENT_PKT)
self.socket.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, filtr)
self.set_scan_parameters()
self.toggle_scan(True)
while self.keep_going:
pkt = self.socket.recv(255)
event = to_int(pkt[1])
subevent = to_int(pkt[3])
if event == LE_META_EVENT and subevent == EVT_LE_ADVERTISING_REPORT:
# we have an BLE advertisement
self.process_packet(pkt)
self.socket.close()
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10,
address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL):
""""sets the le scan parameters
Args:
scan_type: ScanType.(PASSIVE|ACTIVE)
interval: ms (as float) between scans (valid range 2.5ms - 10240ms)
..note:: when interval and window are equal, the scan
runs continuos
window: ms (as float) scan duration (valid range 2.5ms - 10240ms)
address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)
* PUBLIC = use device MAC address
* RANDOM = generate a random MAC address and use that
filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will
return all fetched bluetooth packets (WHITELIST_ONLY is not supported,
because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)
Raises:
ValueError: A value had an unexpected format or was not in range
"""
interval_fractions = interval_ms / MS_FRACTION_DIVIDER
if interval_fractions < 0x0004 or interval_fractions > 0x4000:
raise ValueError(
"Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format(
interval_fractions))
window_fractions = window_ms / MS_FRACTION_DIVIDER
if window_fractions < 0x0004 or window_fractions > 0x4000:
raise ValueError(
"Invalid window given {}, must be in range of 2.5ms to 10240ms!".format(
window_fractions))
interval_fractions, window_fractions = int(interval_fractions), int(window_fractions)
scan_parameter_pkg = struct.pack(
">BHHBB",
scan_type,
interval_fractions,
window_fractions,
address_type,
filter_type)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS,
scan_parameter_pkg)
def toggle_scan(self, enable, filter_duplicates=False):
"""Enables or disables BLE scanning
Args:
enable: boolean value to enable (True) or disable (False) scanner
filter_duplicates: boolean value to enable/disable filter, that
omits duplicated packets"""
command = struct.pack(">BB", enable, filter_duplicates)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)
def process_packet(self, pkt):
"""Parse the packet and call callback if one of the filters matches."""
# check if this could be a valid packet before parsing
# this reduces the CPU load significantly
if not ( \
((self.mode & ScannerMode.MODE_IBEACON) and (pkt[19:23] == b"\x4c\x00\x02\x15")) or \
((self.mode & ScannerMode.MODE_EDDYSTONE) and (pkt[19:21] == b"\xaa\xfe")) or \
((self.mode & ScannerMode.MODE_ESTIMOTE) and (pkt[19:21] == b"\x9a\xfe"))):
return
bt_addr = bt_addr_to_string(pkt[7:13])
rssi = bin_to_int(pkt[-1])
# strip bluetooth address and parse packet
packet = parse_packet(pkt[14:-1])
# return if packet was not an beacon advertisement
if not packet:
return
# we need to remeber which eddystone beacon has which bt address
# because the TLM and URL frames do not contain the namespace and instance
self.save_bt_addr(packet, bt_addr)
# properties holds the identifying information for a beacon
# e.g. instance and namespace for eddystone; uuid, major, minor for iBeacon
properties = self.get_properties(packet, bt_addr)
if self.device_filter is None and self.packet_filter is None:
# no filters selected
self.callback(bt_addr, rssi, packet, properties)
elif self.device_filter is None:
# filter by packet type
if is_one_of(packet, self.packet_filter):
self.callback(bt_addr, rssi, packet, properties)
else:
# filter by device and packet type
if self.packet_filter and not is_one_of(packet, self.packet_filter):
# return if packet filter does not match
return
# iterate over filters and call .matches() on each
for filtr in self.device_filter:
if isinstance(filtr, BtAddrFilter):
if filtr.matches({'bt_addr':bt_addr}):
self.callback(bt_addr, rssi, packet, properties)
return
elif filtr.matches(properties):
self.callback(bt_addr, rssi, packet, properties)
return
def save_bt_addr(self, packet, bt_addr):
"""Add to the list of mappings."""
if isinstance(packet, EddystoneUIDFrame):
# remove out old mapping
new_mappings = [m for m in self.eddystone_mappings if m[0] != bt_addr]
new_mappings.append((bt_addr, packet.properties))
self.eddystone_mappings = new_mappings
def get_properties(self, packet, bt_addr):
"""Get properties of beacon depending on type."""
if is_one_of(packet, [EddystoneTLMFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneEIDFrame]):
# here we retrieve the namespace and instance which corresponds to the
# eddystone beacon with this bt address
return self.properties_from_mapping(bt_addr)
else:
return packet.properties
def terminate(self):
"""Signal runner to stop and join thread."""
self.toggle_scan(False)
self.keep_going = False
self.join()
|
citruz/beacontools | beacontools/scanner.py | Monitor.terminate | python | def terminate(self):
self.toggle_scan(False)
self.keep_going = False
self.join() | Signal runner to stop and join thread. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L240-L244 | [
"def toggle_scan(self, enable, filter_duplicates=False):\n \"\"\"Enables or disables BLE scanning\n\n Args:\n enable: boolean value to enable (True) or disable (False) scanner\n filter_duplicates: boolean value to enable/disable filter, that\n omits duplicated packets\"\"\"\n command = struct.pack(\">BB\", enable, filter_duplicates)\n self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)\n"
] | class Monitor(threading.Thread):
"""Continously scan for BLE advertisements."""
def __init__(self, callback, bt_device_id, device_filter, packet_filter):
"""Construct interface object."""
# do import here so that the package can be used in parsing-only mode (no bluez required)
self.bluez = import_module('bluetooth._bluetooth')
threading.Thread.__init__(self)
self.daemon = False
self.keep_going = True
self.callback = callback
# number of the bt device (hciX)
self.bt_device_id = bt_device_id
# list of beacons to monitor
self.device_filter = device_filter
self.mode = get_mode(device_filter)
# list of packet types to monitor
self.packet_filter = packet_filter
# bluetooth socket
self.socket = None
# keep track of Eddystone Beacon <-> bt addr mapping
self.eddystone_mappings = []
def run(self):
"""Continously scan for BLE advertisements."""
self.socket = self.bluez.hci_open_dev(self.bt_device_id)
filtr = self.bluez.hci_filter_new()
self.bluez.hci_filter_all_events(filtr)
self.bluez.hci_filter_set_ptype(filtr, self.bluez.HCI_EVENT_PKT)
self.socket.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, filtr)
self.set_scan_parameters()
self.toggle_scan(True)
while self.keep_going:
pkt = self.socket.recv(255)
event = to_int(pkt[1])
subevent = to_int(pkt[3])
if event == LE_META_EVENT and subevent == EVT_LE_ADVERTISING_REPORT:
# we have an BLE advertisement
self.process_packet(pkt)
self.socket.close()
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10,
address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL):
""""sets the le scan parameters
Args:
scan_type: ScanType.(PASSIVE|ACTIVE)
interval: ms (as float) between scans (valid range 2.5ms - 10240ms)
..note:: when interval and window are equal, the scan
runs continuos
window: ms (as float) scan duration (valid range 2.5ms - 10240ms)
address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)
* PUBLIC = use device MAC address
* RANDOM = generate a random MAC address and use that
filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will
return all fetched bluetooth packets (WHITELIST_ONLY is not supported,
because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)
Raises:
ValueError: A value had an unexpected format or was not in range
"""
interval_fractions = interval_ms / MS_FRACTION_DIVIDER
if interval_fractions < 0x0004 or interval_fractions > 0x4000:
raise ValueError(
"Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format(
interval_fractions))
window_fractions = window_ms / MS_FRACTION_DIVIDER
if window_fractions < 0x0004 or window_fractions > 0x4000:
raise ValueError(
"Invalid window given {}, must be in range of 2.5ms to 10240ms!".format(
window_fractions))
interval_fractions, window_fractions = int(interval_fractions), int(window_fractions)
scan_parameter_pkg = struct.pack(
">BHHBB",
scan_type,
interval_fractions,
window_fractions,
address_type,
filter_type)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS,
scan_parameter_pkg)
def toggle_scan(self, enable, filter_duplicates=False):
"""Enables or disables BLE scanning
Args:
enable: boolean value to enable (True) or disable (False) scanner
filter_duplicates: boolean value to enable/disable filter, that
omits duplicated packets"""
command = struct.pack(">BB", enable, filter_duplicates)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)
def process_packet(self, pkt):
"""Parse the packet and call callback if one of the filters matches."""
# check if this could be a valid packet before parsing
# this reduces the CPU load significantly
if not ( \
((self.mode & ScannerMode.MODE_IBEACON) and (pkt[19:23] == b"\x4c\x00\x02\x15")) or \
((self.mode & ScannerMode.MODE_EDDYSTONE) and (pkt[19:21] == b"\xaa\xfe")) or \
((self.mode & ScannerMode.MODE_ESTIMOTE) and (pkt[19:21] == b"\x9a\xfe"))):
return
bt_addr = bt_addr_to_string(pkt[7:13])
rssi = bin_to_int(pkt[-1])
# strip bluetooth address and parse packet
packet = parse_packet(pkt[14:-1])
# return if packet was not an beacon advertisement
if not packet:
return
# we need to remeber which eddystone beacon has which bt address
# because the TLM and URL frames do not contain the namespace and instance
self.save_bt_addr(packet, bt_addr)
# properties holds the identifying information for a beacon
# e.g. instance and namespace for eddystone; uuid, major, minor for iBeacon
properties = self.get_properties(packet, bt_addr)
if self.device_filter is None and self.packet_filter is None:
# no filters selected
self.callback(bt_addr, rssi, packet, properties)
elif self.device_filter is None:
# filter by packet type
if is_one_of(packet, self.packet_filter):
self.callback(bt_addr, rssi, packet, properties)
else:
# filter by device and packet type
if self.packet_filter and not is_one_of(packet, self.packet_filter):
# return if packet filter does not match
return
# iterate over filters and call .matches() on each
for filtr in self.device_filter:
if isinstance(filtr, BtAddrFilter):
if filtr.matches({'bt_addr':bt_addr}):
self.callback(bt_addr, rssi, packet, properties)
return
elif filtr.matches(properties):
self.callback(bt_addr, rssi, packet, properties)
return
def save_bt_addr(self, packet, bt_addr):
"""Add to the list of mappings."""
if isinstance(packet, EddystoneUIDFrame):
# remove out old mapping
new_mappings = [m for m in self.eddystone_mappings if m[0] != bt_addr]
new_mappings.append((bt_addr, packet.properties))
self.eddystone_mappings = new_mappings
def get_properties(self, packet, bt_addr):
"""Get properties of beacon depending on type."""
if is_one_of(packet, [EddystoneTLMFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneEIDFrame]):
# here we retrieve the namespace and instance which corresponds to the
# eddystone beacon with this bt address
return self.properties_from_mapping(bt_addr)
else:
return packet.properties
def properties_from_mapping(self, bt_addr):
"""Retrieve properties (namespace, instance) for the specified bt address."""
for addr, properties in self.eddystone_mappings:
if addr == bt_addr:
return properties
return None
|
citruz/beacontools | beacontools/utils.py | data_to_uuid | python | def data_to_uuid(data):
string = data_to_hexstring(data)
return string[0:8]+'-'+string[8:12]+'-'+string[12:16]+'-'+string[16:20]+'-'+string[20:32] | Convert an array of binary data to the iBeacon uuid format. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/utils.py#L24-L27 | [
"def data_to_hexstring(data):\n \"\"\"Convert an array of binary data to the hex representation as a string.\"\"\"\n return hexlify(data_to_binstring(data)).decode('ascii')\n"
] | """Utilities for byte conversion."""
from binascii import hexlify
from re import compile as compile_regex
import array
import struct
from .const import ScannerMode
# compiled regex to match lowercase MAC-addresses coming from
# bt_addr_to_string
RE_MAC_ADDR = compile_regex('(?:[0-9a-f]{2}:){5}(?:[0-9a-f]{2})')
def is_valid_mac(mac):
""""Returns True if the given argument matches RE_MAC_ADDR, otherwise False"""
return RE_MAC_ADDR.match(mac) is not None
def data_to_hexstring(data):
"""Convert an array of binary data to the hex representation as a string."""
return hexlify(data_to_binstring(data)).decode('ascii')
def data_to_binstring(data):
"""Convert an array of binary data to a binary string."""
return array.array('B', data).tostring()
def bt_addr_to_string(addr):
"""Convert a binary string to the hex representation."""
addr_str = array.array('B', addr)
addr_str.reverse()
hex_str = hexlify(addr_str.tostring()).decode('ascii')
# insert ":" seperator between the bytes
return ':'.join(a+b for a, b in zip(hex_str[::2], hex_str[1::2]))
def is_one_of(obj, types):
"""Return true iff obj is an instance of one of the types."""
for type_ in types:
if isinstance(obj, type_):
return True
return False
def is_packet_type(cls):
"""Check if class is one the packet types."""
from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneTLMFrame, \
EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB
return (cls in [EddystoneURLFrame, EddystoneUIDFrame, EddystoneEncryptedTLMFrame, \
EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB])
def to_int(string):
"""Convert a one element byte string to int for python 2 support."""
if isinstance(string, str):
return ord(string[0])
else:
return string
def bin_to_int(string):
"""Convert a one element byte string to signed int for python 2 support."""
if isinstance(string, str):
return struct.unpack("b", string)[0]
else:
return struct.unpack("b", bytes([string]))[0]
def get_mode(device_filter):
"""Determine which beacons the scanner should look for."""
from .device_filters import IBeaconFilter, EddystoneFilter, BtAddrFilter, EstimoteFilter
if device_filter is None or len(device_filter) == 0:
return ScannerMode.MODE_ALL
mode = ScannerMode.MODE_NONE
for filtr in device_filter:
if isinstance(filtr, IBeaconFilter):
mode |= ScannerMode.MODE_IBEACON
elif isinstance(filtr, EddystoneFilter):
mode |= ScannerMode.MODE_EDDYSTONE
elif isinstance(filtr, EstimoteFilter):
mode |= ScannerMode.MODE_ESTIMOTE
elif isinstance(filtr, BtAddrFilter):
mode |= ScannerMode.MODE_ALL
break
return mode
|
citruz/beacontools | beacontools/utils.py | bt_addr_to_string | python | def bt_addr_to_string(addr):
addr_str = array.array('B', addr)
addr_str.reverse()
hex_str = hexlify(addr_str.tostring()).decode('ascii')
# insert ":" seperator between the bytes
return ':'.join(a+b for a, b in zip(hex_str[::2], hex_str[1::2])) | Convert a binary string to the hex representation. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/utils.py#L35-L41 | null | """Utilities for byte conversion."""
from binascii import hexlify
from re import compile as compile_regex
import array
import struct
from .const import ScannerMode
# compiled regex to match lowercase MAC-addresses coming from
# bt_addr_to_string
RE_MAC_ADDR = compile_regex('(?:[0-9a-f]{2}:){5}(?:[0-9a-f]{2})')
def is_valid_mac(mac):
""""Returns True if the given argument matches RE_MAC_ADDR, otherwise False"""
return RE_MAC_ADDR.match(mac) is not None
def data_to_hexstring(data):
"""Convert an array of binary data to the hex representation as a string."""
return hexlify(data_to_binstring(data)).decode('ascii')
def data_to_uuid(data):
"""Convert an array of binary data to the iBeacon uuid format."""
string = data_to_hexstring(data)
return string[0:8]+'-'+string[8:12]+'-'+string[12:16]+'-'+string[16:20]+'-'+string[20:32]
def data_to_binstring(data):
"""Convert an array of binary data to a binary string."""
return array.array('B', data).tostring()
def is_one_of(obj, types):
"""Return true iff obj is an instance of one of the types."""
for type_ in types:
if isinstance(obj, type_):
return True
return False
def is_packet_type(cls):
"""Check if class is one the packet types."""
from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneTLMFrame, \
EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB
return (cls in [EddystoneURLFrame, EddystoneUIDFrame, EddystoneEncryptedTLMFrame, \
EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB])
def to_int(string):
"""Convert a one element byte string to int for python 2 support."""
if isinstance(string, str):
return ord(string[0])
else:
return string
def bin_to_int(string):
"""Convert a one element byte string to signed int for python 2 support."""
if isinstance(string, str):
return struct.unpack("b", string)[0]
else:
return struct.unpack("b", bytes([string]))[0]
def get_mode(device_filter):
"""Determine which beacons the scanner should look for."""
from .device_filters import IBeaconFilter, EddystoneFilter, BtAddrFilter, EstimoteFilter
if device_filter is None or len(device_filter) == 0:
return ScannerMode.MODE_ALL
mode = ScannerMode.MODE_NONE
for filtr in device_filter:
if isinstance(filtr, IBeaconFilter):
mode |= ScannerMode.MODE_IBEACON
elif isinstance(filtr, EddystoneFilter):
mode |= ScannerMode.MODE_EDDYSTONE
elif isinstance(filtr, EstimoteFilter):
mode |= ScannerMode.MODE_ESTIMOTE
elif isinstance(filtr, BtAddrFilter):
mode |= ScannerMode.MODE_ALL
break
return mode
|
citruz/beacontools | beacontools/utils.py | is_one_of | python | def is_one_of(obj, types):
for type_ in types:
if isinstance(obj, type_):
return True
return False | Return true iff obj is an instance of one of the types. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/utils.py#L44-L49 | null | """Utilities for byte conversion."""
from binascii import hexlify
from re import compile as compile_regex
import array
import struct
from .const import ScannerMode
# compiled regex to match lowercase MAC-addresses coming from
# bt_addr_to_string
RE_MAC_ADDR = compile_regex('(?:[0-9a-f]{2}:){5}(?:[0-9a-f]{2})')
def is_valid_mac(mac):
""""Returns True if the given argument matches RE_MAC_ADDR, otherwise False"""
return RE_MAC_ADDR.match(mac) is not None
def data_to_hexstring(data):
"""Convert an array of binary data to the hex representation as a string."""
return hexlify(data_to_binstring(data)).decode('ascii')
def data_to_uuid(data):
"""Convert an array of binary data to the iBeacon uuid format."""
string = data_to_hexstring(data)
return string[0:8]+'-'+string[8:12]+'-'+string[12:16]+'-'+string[16:20]+'-'+string[20:32]
def data_to_binstring(data):
"""Convert an array of binary data to a binary string."""
return array.array('B', data).tostring()
def bt_addr_to_string(addr):
"""Convert a binary string to the hex representation."""
addr_str = array.array('B', addr)
addr_str.reverse()
hex_str = hexlify(addr_str.tostring()).decode('ascii')
# insert ":" seperator between the bytes
return ':'.join(a+b for a, b in zip(hex_str[::2], hex_str[1::2]))
def is_packet_type(cls):
"""Check if class is one the packet types."""
from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneTLMFrame, \
EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB
return (cls in [EddystoneURLFrame, EddystoneUIDFrame, EddystoneEncryptedTLMFrame, \
EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB])
def to_int(string):
"""Convert a one element byte string to int for python 2 support."""
if isinstance(string, str):
return ord(string[0])
else:
return string
def bin_to_int(string):
"""Convert a one element byte string to signed int for python 2 support."""
if isinstance(string, str):
return struct.unpack("b", string)[0]
else:
return struct.unpack("b", bytes([string]))[0]
def get_mode(device_filter):
"""Determine which beacons the scanner should look for."""
from .device_filters import IBeaconFilter, EddystoneFilter, BtAddrFilter, EstimoteFilter
if device_filter is None or len(device_filter) == 0:
return ScannerMode.MODE_ALL
mode = ScannerMode.MODE_NONE
for filtr in device_filter:
if isinstance(filtr, IBeaconFilter):
mode |= ScannerMode.MODE_IBEACON
elif isinstance(filtr, EddystoneFilter):
mode |= ScannerMode.MODE_EDDYSTONE
elif isinstance(filtr, EstimoteFilter):
mode |= ScannerMode.MODE_ESTIMOTE
elif isinstance(filtr, BtAddrFilter):
mode |= ScannerMode.MODE_ALL
break
return mode
|
citruz/beacontools | beacontools/utils.py | is_packet_type | python | def is_packet_type(cls):
from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneTLMFrame, \
EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB
return (cls in [EddystoneURLFrame, EddystoneUIDFrame, EddystoneEncryptedTLMFrame, \
EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB]) | Check if class is one the packet types. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/utils.py#L52-L60 | null | """Utilities for byte conversion."""
from binascii import hexlify
from re import compile as compile_regex
import array
import struct
from .const import ScannerMode
# compiled regex to match lowercase MAC-addresses coming from
# bt_addr_to_string
RE_MAC_ADDR = compile_regex('(?:[0-9a-f]{2}:){5}(?:[0-9a-f]{2})')
def is_valid_mac(mac):
""""Returns True if the given argument matches RE_MAC_ADDR, otherwise False"""
return RE_MAC_ADDR.match(mac) is not None
def data_to_hexstring(data):
"""Convert an array of binary data to the hex representation as a string."""
return hexlify(data_to_binstring(data)).decode('ascii')
def data_to_uuid(data):
"""Convert an array of binary data to the iBeacon uuid format."""
string = data_to_hexstring(data)
return string[0:8]+'-'+string[8:12]+'-'+string[12:16]+'-'+string[16:20]+'-'+string[20:32]
def data_to_binstring(data):
"""Convert an array of binary data to a binary string."""
return array.array('B', data).tostring()
def bt_addr_to_string(addr):
"""Convert a binary string to the hex representation."""
addr_str = array.array('B', addr)
addr_str.reverse()
hex_str = hexlify(addr_str.tostring()).decode('ascii')
# insert ":" seperator between the bytes
return ':'.join(a+b for a, b in zip(hex_str[::2], hex_str[1::2]))
def is_one_of(obj, types):
"""Return true iff obj is an instance of one of the types."""
for type_ in types:
if isinstance(obj, type_):
return True
return False
def to_int(string):
"""Convert a one element byte string to int for python 2 support."""
if isinstance(string, str):
return ord(string[0])
else:
return string
def bin_to_int(string):
"""Convert a one element byte string to signed int for python 2 support."""
if isinstance(string, str):
return struct.unpack("b", string)[0]
else:
return struct.unpack("b", bytes([string]))[0]
def get_mode(device_filter):
"""Determine which beacons the scanner should look for."""
from .device_filters import IBeaconFilter, EddystoneFilter, BtAddrFilter, EstimoteFilter
if device_filter is None or len(device_filter) == 0:
return ScannerMode.MODE_ALL
mode = ScannerMode.MODE_NONE
for filtr in device_filter:
if isinstance(filtr, IBeaconFilter):
mode |= ScannerMode.MODE_IBEACON
elif isinstance(filtr, EddystoneFilter):
mode |= ScannerMode.MODE_EDDYSTONE
elif isinstance(filtr, EstimoteFilter):
mode |= ScannerMode.MODE_ESTIMOTE
elif isinstance(filtr, BtAddrFilter):
mode |= ScannerMode.MODE_ALL
break
return mode
|
citruz/beacontools | beacontools/utils.py | bin_to_int | python | def bin_to_int(string):
if isinstance(string, str):
return struct.unpack("b", string)[0]
else:
return struct.unpack("b", bytes([string]))[0] | Convert a one element byte string to signed int for python 2 support. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/utils.py#L71-L76 | null | """Utilities for byte conversion."""
from binascii import hexlify
from re import compile as compile_regex
import array
import struct
from .const import ScannerMode
# compiled regex to match lowercase MAC-addresses coming from
# bt_addr_to_string
RE_MAC_ADDR = compile_regex('(?:[0-9a-f]{2}:){5}(?:[0-9a-f]{2})')
def is_valid_mac(mac):
""""Returns True if the given argument matches RE_MAC_ADDR, otherwise False"""
return RE_MAC_ADDR.match(mac) is not None
def data_to_hexstring(data):
"""Convert an array of binary data to the hex representation as a string."""
return hexlify(data_to_binstring(data)).decode('ascii')
def data_to_uuid(data):
"""Convert an array of binary data to the iBeacon uuid format."""
string = data_to_hexstring(data)
return string[0:8]+'-'+string[8:12]+'-'+string[12:16]+'-'+string[16:20]+'-'+string[20:32]
def data_to_binstring(data):
"""Convert an array of binary data to a binary string."""
return array.array('B', data).tostring()
def bt_addr_to_string(addr):
"""Convert a binary string to the hex representation."""
addr_str = array.array('B', addr)
addr_str.reverse()
hex_str = hexlify(addr_str.tostring()).decode('ascii')
# insert ":" seperator between the bytes
return ':'.join(a+b for a, b in zip(hex_str[::2], hex_str[1::2]))
def is_one_of(obj, types):
"""Return true iff obj is an instance of one of the types."""
for type_ in types:
if isinstance(obj, type_):
return True
return False
def is_packet_type(cls):
"""Check if class is one the packet types."""
from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneTLMFrame, \
EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB
return (cls in [EddystoneURLFrame, EddystoneUIDFrame, EddystoneEncryptedTLMFrame, \
EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB])
def to_int(string):
"""Convert a one element byte string to int for python 2 support."""
if isinstance(string, str):
return ord(string[0])
else:
return string
def get_mode(device_filter):
"""Determine which beacons the scanner should look for."""
from .device_filters import IBeaconFilter, EddystoneFilter, BtAddrFilter, EstimoteFilter
if device_filter is None or len(device_filter) == 0:
return ScannerMode.MODE_ALL
mode = ScannerMode.MODE_NONE
for filtr in device_filter:
if isinstance(filtr, IBeaconFilter):
mode |= ScannerMode.MODE_IBEACON
elif isinstance(filtr, EddystoneFilter):
mode |= ScannerMode.MODE_EDDYSTONE
elif isinstance(filtr, EstimoteFilter):
mode |= ScannerMode.MODE_ESTIMOTE
elif isinstance(filtr, BtAddrFilter):
mode |= ScannerMode.MODE_ALL
break
return mode
|
citruz/beacontools | beacontools/utils.py | get_mode | python | def get_mode(device_filter):
from .device_filters import IBeaconFilter, EddystoneFilter, BtAddrFilter, EstimoteFilter
if device_filter is None or len(device_filter) == 0:
return ScannerMode.MODE_ALL
mode = ScannerMode.MODE_NONE
for filtr in device_filter:
if isinstance(filtr, IBeaconFilter):
mode |= ScannerMode.MODE_IBEACON
elif isinstance(filtr, EddystoneFilter):
mode |= ScannerMode.MODE_EDDYSTONE
elif isinstance(filtr, EstimoteFilter):
mode |= ScannerMode.MODE_ESTIMOTE
elif isinstance(filtr, BtAddrFilter):
mode |= ScannerMode.MODE_ALL
break
return mode | Determine which beacons the scanner should look for. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/utils.py#L79-L97 | null | """Utilities for byte conversion."""
from binascii import hexlify
from re import compile as compile_regex
import array
import struct
from .const import ScannerMode
# compiled regex to match lowercase MAC-addresses coming from
# bt_addr_to_string
RE_MAC_ADDR = compile_regex('(?:[0-9a-f]{2}:){5}(?:[0-9a-f]{2})')
def is_valid_mac(mac):
""""Returns True if the given argument matches RE_MAC_ADDR, otherwise False"""
return RE_MAC_ADDR.match(mac) is not None
def data_to_hexstring(data):
"""Convert an array of binary data to the hex representation as a string."""
return hexlify(data_to_binstring(data)).decode('ascii')
def data_to_uuid(data):
"""Convert an array of binary data to the iBeacon uuid format."""
string = data_to_hexstring(data)
return string[0:8]+'-'+string[8:12]+'-'+string[12:16]+'-'+string[16:20]+'-'+string[20:32]
def data_to_binstring(data):
"""Convert an array of binary data to a binary string."""
return array.array('B', data).tostring()
def bt_addr_to_string(addr):
"""Convert a binary string to the hex representation."""
addr_str = array.array('B', addr)
addr_str.reverse()
hex_str = hexlify(addr_str.tostring()).decode('ascii')
# insert ":" seperator between the bytes
return ':'.join(a+b for a, b in zip(hex_str[::2], hex_str[1::2]))
def is_one_of(obj, types):
"""Return true iff obj is an instance of one of the types."""
for type_ in types:
if isinstance(obj, type_):
return True
return False
def is_packet_type(cls):
"""Check if class is one the packet types."""
from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneTLMFrame, \
EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB
return (cls in [EddystoneURLFrame, EddystoneUIDFrame, EddystoneEncryptedTLMFrame, \
EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB])
def to_int(string):
"""Convert a one element byte string to int for python 2 support."""
if isinstance(string, str):
return ord(string[0])
else:
return string
def bin_to_int(string):
"""Convert a one element byte string to signed int for python 2 support."""
if isinstance(string, str):
return struct.unpack("b", string)[0]
else:
return struct.unpack("b", bytes([string]))[0]
|
citruz/beacontools | beacontools/device_filters.py | DeviceFilter.matches | python | def matches(self, filter_props):
if filter_props is None:
return False
found_one = False
for key, value in filter_props.items():
if key in self.properties and value != self.properties[key]:
return False
elif key in self.properties and value == self.properties[key]:
found_one = True
return found_one | Check if the filter matches the supplied properties. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/device_filters.py#L13-L25 | null | class DeviceFilter(object):
"""Base class for all device filters. Should not be used by itself."""
def __init__(self):
"""Initialize filter."""
self.properties = {}
def __repr__(self):
return "{}({})".format(
self.__class__.__name__,
", ".join(["=".join((k, str(v),)) for k, v in self.properties.items()]))
|
citruz/beacontools | beacontools/parser.py | parse_packet | python | def parse_packet(packet):
frame = parse_ltv_packet(packet)
if frame is None:
frame = parse_ibeacon_packet(packet)
return frame | Parse a beacon advertisement packet. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/parser.py#L14-L19 | [
"def parse_ltv_packet(packet):\n \"\"\"Parse a tag-length-value style beacon packet.\"\"\"\n try:\n frame = LTVFrame.parse(packet)\n for ltv in frame:\n if ltv['type'] == SERVICE_DATA_TYPE:\n data = ltv['value']\n\n if data[\"service_identifier\"] == EDDYSTONE_UUID:\n return parse_eddystone_service_data(data)\n\n elif data[\"service_identifier\"] == ESTIMOTE_UUID:\n return parse_estimote_service_data(data)\n\n except ConstructError:\n return None\n\n return None\n",
"def parse_ibeacon_packet(packet):\n \"\"\"Parse an ibeacon beacon advertisement packet.\"\"\"\n try:\n pkt = IBeaconAdvertisingPacket.parse(packet)\n return IBeaconAdvertisement(pkt)\n\n except ConstructError:\n return None\n"
] | """Beacon advertisement parser."""
from construct import ConstructError
from .structs import LTVFrame, IBeaconAdvertisingPacket
from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, EddystoneEncryptedTLMFrame, \
EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB
from .const import EDDYSTONE_TLM_UNENCRYPTED, EDDYSTONE_TLM_ENCRYPTED, SERVICE_DATA_TYPE, \
EDDYSTONE_UID_FRAME, EDDYSTONE_TLM_FRAME, EDDYSTONE_URL_FRAME, \
EDDYSTONE_EID_FRAME, EDDYSTONE_UUID, ESTIMOTE_UUID, ESTIMOTE_TELEMETRY_FRAME, \
ESTIMOTE_TELEMETRY_SUBFRAME_A, ESTIMOTE_TELEMETRY_SUBFRAME_B
def parse_ltv_packet(packet):
"""Parse a tag-length-value style beacon packet."""
try:
frame = LTVFrame.parse(packet)
for ltv in frame:
if ltv['type'] == SERVICE_DATA_TYPE:
data = ltv['value']
if data["service_identifier"] == EDDYSTONE_UUID:
return parse_eddystone_service_data(data)
elif data["service_identifier"] == ESTIMOTE_UUID:
return parse_estimote_service_data(data)
except ConstructError:
return None
return None
def parse_eddystone_service_data(data):
"""Parse Eddystone service data."""
if data['frame_type'] == EDDYSTONE_UID_FRAME:
return EddystoneUIDFrame(data['frame'])
elif data['frame_type'] == EDDYSTONE_TLM_FRAME:
if data['frame']['tlm_version'] == EDDYSTONE_TLM_ENCRYPTED:
return EddystoneEncryptedTLMFrame(data['frame']['data'])
elif data['frame']['tlm_version'] == EDDYSTONE_TLM_UNENCRYPTED:
return EddystoneTLMFrame(data['frame']['data'])
elif data['frame_type'] == EDDYSTONE_URL_FRAME:
return EddystoneURLFrame(data['frame'])
elif data['frame_type'] == EDDYSTONE_EID_FRAME:
return EddystoneEIDFrame(data['frame'])
else:
return None
def parse_estimote_service_data(data):
"""Parse Estimote service data."""
if data['frame_type'] & 0xF == ESTIMOTE_TELEMETRY_FRAME:
protocol_version = (data['frame_type'] & 0xF0) >> 4
if data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_A:
return EstimoteTelemetryFrameA(data['frame'], protocol_version)
elif data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_B:
return EstimoteTelemetryFrameB(data['frame'], protocol_version)
return None
def parse_ibeacon_packet(packet):
"""Parse an ibeacon beacon advertisement packet."""
try:
pkt = IBeaconAdvertisingPacket.parse(packet)
return IBeaconAdvertisement(pkt)
except ConstructError:
return None
|
citruz/beacontools | beacontools/parser.py | parse_ltv_packet | python | def parse_ltv_packet(packet):
try:
frame = LTVFrame.parse(packet)
for ltv in frame:
if ltv['type'] == SERVICE_DATA_TYPE:
data = ltv['value']
if data["service_identifier"] == EDDYSTONE_UUID:
return parse_eddystone_service_data(data)
elif data["service_identifier"] == ESTIMOTE_UUID:
return parse_estimote_service_data(data)
except ConstructError:
return None
return None | Parse a tag-length-value style beacon packet. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/parser.py#L21-L38 | [
"def parse_eddystone_service_data(data):\n \"\"\"Parse Eddystone service data.\"\"\"\n if data['frame_type'] == EDDYSTONE_UID_FRAME:\n return EddystoneUIDFrame(data['frame'])\n\n elif data['frame_type'] == EDDYSTONE_TLM_FRAME:\n if data['frame']['tlm_version'] == EDDYSTONE_TLM_ENCRYPTED:\n return EddystoneEncryptedTLMFrame(data['frame']['data'])\n elif data['frame']['tlm_version'] == EDDYSTONE_TLM_UNENCRYPTED:\n return EddystoneTLMFrame(data['frame']['data'])\n\n elif data['frame_type'] == EDDYSTONE_URL_FRAME:\n return EddystoneURLFrame(data['frame'])\n\n elif data['frame_type'] == EDDYSTONE_EID_FRAME:\n return EddystoneEIDFrame(data['frame'])\n else:\n return None\n",
"def parse_estimote_service_data(data):\n \"\"\"Parse Estimote service data.\"\"\"\n if data['frame_type'] & 0xF == ESTIMOTE_TELEMETRY_FRAME:\n protocol_version = (data['frame_type'] & 0xF0) >> 4\n if data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_A:\n return EstimoteTelemetryFrameA(data['frame'], protocol_version)\n elif data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_B:\n return EstimoteTelemetryFrameB(data['frame'], protocol_version)\n return None\n"
] | """Beacon advertisement parser."""
from construct import ConstructError
from .structs import LTVFrame, IBeaconAdvertisingPacket
from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, EddystoneEncryptedTLMFrame, \
EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB
from .const import EDDYSTONE_TLM_UNENCRYPTED, EDDYSTONE_TLM_ENCRYPTED, SERVICE_DATA_TYPE, \
EDDYSTONE_UID_FRAME, EDDYSTONE_TLM_FRAME, EDDYSTONE_URL_FRAME, \
EDDYSTONE_EID_FRAME, EDDYSTONE_UUID, ESTIMOTE_UUID, ESTIMOTE_TELEMETRY_FRAME, \
ESTIMOTE_TELEMETRY_SUBFRAME_A, ESTIMOTE_TELEMETRY_SUBFRAME_B
def parse_packet(packet):
"""Parse a beacon advertisement packet."""
frame = parse_ltv_packet(packet)
if frame is None:
frame = parse_ibeacon_packet(packet)
return frame
def parse_eddystone_service_data(data):
"""Parse Eddystone service data."""
if data['frame_type'] == EDDYSTONE_UID_FRAME:
return EddystoneUIDFrame(data['frame'])
elif data['frame_type'] == EDDYSTONE_TLM_FRAME:
if data['frame']['tlm_version'] == EDDYSTONE_TLM_ENCRYPTED:
return EddystoneEncryptedTLMFrame(data['frame']['data'])
elif data['frame']['tlm_version'] == EDDYSTONE_TLM_UNENCRYPTED:
return EddystoneTLMFrame(data['frame']['data'])
elif data['frame_type'] == EDDYSTONE_URL_FRAME:
return EddystoneURLFrame(data['frame'])
elif data['frame_type'] == EDDYSTONE_EID_FRAME:
return EddystoneEIDFrame(data['frame'])
else:
return None
def parse_estimote_service_data(data):
"""Parse Estimote service data."""
if data['frame_type'] & 0xF == ESTIMOTE_TELEMETRY_FRAME:
protocol_version = (data['frame_type'] & 0xF0) >> 4
if data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_A:
return EstimoteTelemetryFrameA(data['frame'], protocol_version)
elif data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_B:
return EstimoteTelemetryFrameB(data['frame'], protocol_version)
return None
def parse_ibeacon_packet(packet):
"""Parse an ibeacon beacon advertisement packet."""
try:
pkt = IBeaconAdvertisingPacket.parse(packet)
return IBeaconAdvertisement(pkt)
except ConstructError:
return None
|
citruz/beacontools | beacontools/parser.py | parse_eddystone_service_data | python | def parse_eddystone_service_data(data):
if data['frame_type'] == EDDYSTONE_UID_FRAME:
return EddystoneUIDFrame(data['frame'])
elif data['frame_type'] == EDDYSTONE_TLM_FRAME:
if data['frame']['tlm_version'] == EDDYSTONE_TLM_ENCRYPTED:
return EddystoneEncryptedTLMFrame(data['frame']['data'])
elif data['frame']['tlm_version'] == EDDYSTONE_TLM_UNENCRYPTED:
return EddystoneTLMFrame(data['frame']['data'])
elif data['frame_type'] == EDDYSTONE_URL_FRAME:
return EddystoneURLFrame(data['frame'])
elif data['frame_type'] == EDDYSTONE_EID_FRAME:
return EddystoneEIDFrame(data['frame'])
else:
return None | Parse Eddystone service data. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/parser.py#L40-L57 | null | """Beacon advertisement parser."""
from construct import ConstructError
from .structs import LTVFrame, IBeaconAdvertisingPacket
from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, EddystoneEncryptedTLMFrame, \
EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB
from .const import EDDYSTONE_TLM_UNENCRYPTED, EDDYSTONE_TLM_ENCRYPTED, SERVICE_DATA_TYPE, \
EDDYSTONE_UID_FRAME, EDDYSTONE_TLM_FRAME, EDDYSTONE_URL_FRAME, \
EDDYSTONE_EID_FRAME, EDDYSTONE_UUID, ESTIMOTE_UUID, ESTIMOTE_TELEMETRY_FRAME, \
ESTIMOTE_TELEMETRY_SUBFRAME_A, ESTIMOTE_TELEMETRY_SUBFRAME_B
def parse_packet(packet):
"""Parse a beacon advertisement packet."""
frame = parse_ltv_packet(packet)
if frame is None:
frame = parse_ibeacon_packet(packet)
return frame
def parse_ltv_packet(packet):
"""Parse a tag-length-value style beacon packet."""
try:
frame = LTVFrame.parse(packet)
for ltv in frame:
if ltv['type'] == SERVICE_DATA_TYPE:
data = ltv['value']
if data["service_identifier"] == EDDYSTONE_UUID:
return parse_eddystone_service_data(data)
elif data["service_identifier"] == ESTIMOTE_UUID:
return parse_estimote_service_data(data)
except ConstructError:
return None
return None
def parse_estimote_service_data(data):
"""Parse Estimote service data."""
if data['frame_type'] & 0xF == ESTIMOTE_TELEMETRY_FRAME:
protocol_version = (data['frame_type'] & 0xF0) >> 4
if data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_A:
return EstimoteTelemetryFrameA(data['frame'], protocol_version)
elif data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_B:
return EstimoteTelemetryFrameB(data['frame'], protocol_version)
return None
def parse_ibeacon_packet(packet):
"""Parse an ibeacon beacon advertisement packet."""
try:
pkt = IBeaconAdvertisingPacket.parse(packet)
return IBeaconAdvertisement(pkt)
except ConstructError:
return None
|
citruz/beacontools | beacontools/parser.py | parse_estimote_service_data | python | def parse_estimote_service_data(data):
if data['frame_type'] & 0xF == ESTIMOTE_TELEMETRY_FRAME:
protocol_version = (data['frame_type'] & 0xF0) >> 4
if data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_A:
return EstimoteTelemetryFrameA(data['frame'], protocol_version)
elif data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_B:
return EstimoteTelemetryFrameB(data['frame'], protocol_version)
return None | Parse Estimote service data. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/parser.py#L59-L67 | null | """Beacon advertisement parser."""
from construct import ConstructError
from .structs import LTVFrame, IBeaconAdvertisingPacket
from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, EddystoneEncryptedTLMFrame, \
EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB
from .const import EDDYSTONE_TLM_UNENCRYPTED, EDDYSTONE_TLM_ENCRYPTED, SERVICE_DATA_TYPE, \
EDDYSTONE_UID_FRAME, EDDYSTONE_TLM_FRAME, EDDYSTONE_URL_FRAME, \
EDDYSTONE_EID_FRAME, EDDYSTONE_UUID, ESTIMOTE_UUID, ESTIMOTE_TELEMETRY_FRAME, \
ESTIMOTE_TELEMETRY_SUBFRAME_A, ESTIMOTE_TELEMETRY_SUBFRAME_B
def parse_packet(packet):
"""Parse a beacon advertisement packet."""
frame = parse_ltv_packet(packet)
if frame is None:
frame = parse_ibeacon_packet(packet)
return frame
def parse_ltv_packet(packet):
"""Parse a tag-length-value style beacon packet."""
try:
frame = LTVFrame.parse(packet)
for ltv in frame:
if ltv['type'] == SERVICE_DATA_TYPE:
data = ltv['value']
if data["service_identifier"] == EDDYSTONE_UUID:
return parse_eddystone_service_data(data)
elif data["service_identifier"] == ESTIMOTE_UUID:
return parse_estimote_service_data(data)
except ConstructError:
return None
return None
def parse_eddystone_service_data(data):
"""Parse Eddystone service data."""
if data['frame_type'] == EDDYSTONE_UID_FRAME:
return EddystoneUIDFrame(data['frame'])
elif data['frame_type'] == EDDYSTONE_TLM_FRAME:
if data['frame']['tlm_version'] == EDDYSTONE_TLM_ENCRYPTED:
return EddystoneEncryptedTLMFrame(data['frame']['data'])
elif data['frame']['tlm_version'] == EDDYSTONE_TLM_UNENCRYPTED:
return EddystoneTLMFrame(data['frame']['data'])
elif data['frame_type'] == EDDYSTONE_URL_FRAME:
return EddystoneURLFrame(data['frame'])
elif data['frame_type'] == EDDYSTONE_EID_FRAME:
return EddystoneEIDFrame(data['frame'])
else:
return None
def parse_ibeacon_packet(packet):
"""Parse an ibeacon beacon advertisement packet."""
try:
pkt = IBeaconAdvertisingPacket.parse(packet)
return IBeaconAdvertisement(pkt)
except ConstructError:
return None
|
citruz/beacontools | beacontools/packet_types/estimote.py | EstimoteTelemetryFrameA.parse_motion_state | python | def parse_motion_state(val):
number = val & 0b00111111
unit = (val & 0b11000000) >> 6
if unit == 1:
number *= 60 # minutes
elif unit == 2:
number *= 60 * 60 # hours
elif unit == 3 and number < 32:
number *= 60 * 60 * 24 # days
elif unit == 3:
number -= 32
number *= 60 * 60 * 24 * 7 # weeks
return number | Convert motion state byte to seconds. | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/packet_types/estimote.py#L46-L59 | null | class EstimoteTelemetryFrameA(object):
"""Estimote telemetry subframe A."""
def __init__(self, data, protocol_version):
self._protocol_version = protocol_version
self._identifier = data_to_hexstring(data['identifier'])
sub = data['sub_frame']
# acceleration: convert to tuple and normalize
self._acceleration = tuple([v * 2 / 127.0 for v in sub['acceleration']])
# motion states
self._previous_motion_state = self.parse_motion_state(sub['previous_motion'])
self._current_motion_state = self.parse_motion_state(sub['current_motion'])
self._is_moving = (sub['combined_fields'][0] & 0b00000011) == 1
# gpio
states = []
for i in range(4):
states.append((sub['combined_fields'][0] & (1 << (4+i))) != 0)
self._gpio_states = tuple(states)
# error codes
if self.protocol_version == 2:
self._has_firmware_error = ((sub['combined_fields'][0] & 0b00000100) >> 2) == 1
self._has_clock_error = ((sub['combined_fields'][0] & 0b00001000) >> 3) == 1
elif self.protocol_version == 1:
self._has_firmware_error = (sub['combined_fields'][1] & 0b00000001) == 1
self._has_clock_error = ((sub['combined_fields'][1] & 0b00000010) >> 1) == 1
else:
self._has_firmware_error = None
self._has_clock_error = None
# pressure
if self.protocol_version == 2:
self._pressure = sub['combined_fields'][1] | \
sub['combined_fields'][2] << 8 | \
sub['combined_fields'][3] << 16 | \
sub['combined_fields'][4] << 24
if self._pressure == 0xffffffff:
self._pressure = None
else:
self._pressure /= 256.0
else:
self._pressure = None
@staticmethod
@property
def protocol_version(self):
"""Protocol version of the packet."""
return self._protocol_version
@property
def identifier(self):
"""First half of the identifier of the beacon (8 bytes)."""
return self._identifier
@property
def acceleration(self):
"""Tuple of acceleration values for (X, Y, Z) axis, in g."""
return self._acceleration
@property
def is_moving(self):
"""Whether the beacon is in motion at the moment (Bool)"""
return self._is_moving
@property
def current_motion_state(self):
"""Duration of current motion state in seconds.
E.g., if is_moving is True, this states how long the beacon is beeing moved already and
previous_motion_state will tell how long it has been still before."""
return self._current_motion_state
@property
def previous_motion_state(self):
"""Duration of previous motion state in seconds (see current_motion_state)."""
return self._previous_motion_state
@property
def gpio_states(self):
"""Tuple with state of the GPIO pins 0-3 (True is high, False is low)."""
return self._gpio_states
@property
def has_firmware_error(self):
"""If beacon has a firmware problem.
Only available if protocol version > 0, None otherwise."""
return self._has_firmware_error
@property
def has_clock_error(self):
"""If beacon has a clock problem. Only available if protocol version > 0, None otherwise."""
return self._has_clock_error
@property
def pressure(self):
"""Atmosperic pressure in Pascal. None if all bits are set.
Only available if protocol version is 2, None otherwise ."""
return self._pressure
@property
def properties(self):
"""Get beacon properties."""
return {'identifier': self.identifier, 'protocol_version': self.protocol_version}
def __str__(self):
return "EstimoteTelemetryFrameA<identifier: %s, protocol_version: %u>" \
% (self.identifier, self.protocol_version)
|
mosdef-hub/foyer | foyer/smarts_graph.py | _find_chordless_cycles | python | def _find_chordless_cycles(bond_graph, max_cycle_size):
cycles = [[] for _ in bond_graph.nodes]
'''
For all nodes we need to find the cycles that they are included within.
'''
for i, node in enumerate(bond_graph.nodes):
neighbors = list(bond_graph.neighbors(node))
pairs = list(itertools.combinations(neighbors, 2))
'''
Loop over all pairs of neighbors of the node. We will see if a ring
exists that includes these branches.
'''
for pair in pairs:
'''
We need to store all node sequences that could be rings. We will
update this as we traverse the graph.
'''
connected = False
possible_rings = []
last_node = pair[0]
ring = [last_node, node, pair[1]]
possible_rings.append(ring)
if bond_graph.has_edge(last_node, pair[1]):
cycles[i].append(ring)
connected = True
while not connected:
'''
Branch and create a new list of possible rings
'''
new_possible_rings = []
for possible_ring in possible_rings:
next_neighbors = list(bond_graph.neighbors(possible_ring[-1]))
for next_neighbor in next_neighbors:
if next_neighbor != possible_ring[-2]:
new_possible_rings.append(possible_ring + \
[next_neighbor])
possible_rings = new_possible_rings
for possible_ring in possible_rings:
if bond_graph.has_edge(possible_ring[-1], last_node):
if any([bond_graph.has_edge(possible_ring[-1], internal_node)
for internal_node in possible_ring[1:-2]]):
pass
else:
cycles[i].append(possible_ring)
connected = True
if not possible_rings or len(possible_rings[0]) == max_cycle_size:
break
return cycles | Find all chordless cycles (i.e. rings) in the bond graph
Traverses the bond graph to determine all cycles (i.e. rings) each
atom is contained within. Algorithm has been adapted from:
https://stackoverflow.com/questions/4022662/find-all-chordless-cycles-in-an-undirected-graph/4028855#4028855 | train | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/smarts_graph.py#L241-L301 | null | from collections import OrderedDict, defaultdict
import itertools
import sys
import networkx as nx
from networkx.algorithms import isomorphism
from oset import oset as OrderedSet
import parmed.periodic_table as pt
from foyer.smarts import SMARTS
class SMARTSGraph(nx.Graph):
"""A graph representation of a SMARTS pattern.
Attributes
----------
smarts_string : str
parser : foyer.smarts.SMARTS
name : str
overrides : set
Other Parameters
----------
args
kwargs
"""
# Because the first atom in a SMARTS string is always the one we want to
# type, the graph's nodes needs to be ordered.
node_dict_factory = OrderedDict
def __init__(self, smarts_string, parser=None, name=None, overrides=None,
*args, **kwargs):
super(SMARTSGraph, self).__init__(*args, **kwargs)
self.smarts_string = smarts_string
self.name = name
self.overrides = overrides
if parser is None:
self.ast = SMARTS().parse(smarts_string)
else:
self.ast = parser.parse(smarts_string)
self._atom_indices = OrderedDict()
self._add_nodes()
self._add_edges(self.ast)
self._add_label_edges()
self._graph_matcher = None
def _add_nodes(self):
"""Add all atoms in the SMARTS string as nodes in the graph."""
for n, atom in enumerate(self.ast.select('atom')):
self.add_node(n, atom=atom)
self._atom_indices[id(atom)] = n
def _add_edges(self, ast_node, trunk=None):
""""Add all bonds in the SMARTS string as edges in the graph."""
atom_indices = self._atom_indices
for atom in ast_node.tail:
if atom.head == 'atom':
atom_idx = atom_indices[id(atom)]
if atom.is_first_kid and atom.parent().head == 'branch':
trunk_idx = atom_indices[id(trunk)]
self.add_edge(atom_idx, trunk_idx)
if not atom.is_last_kid:
if atom.next_kid.head == 'atom':
next_idx = atom_indices[id(atom.next_kid)]
self.add_edge(atom_idx, next_idx)
elif atom.next_kid.head == 'branch':
trunk = atom
else: # We traveled through the whole branch.
return
elif atom.head == 'branch':
self._add_edges(atom, trunk)
def _add_label_edges(self):
"""Add edges between all atoms with the same atom_label in rings."""
labels = self.ast.select('atom_label')
if not labels:
return
# We need each individual label and atoms with multiple ring labels
# would yield e.g. the string '12' so split those up.
label_digits = defaultdict(list)
for label in labels:
digits = list(label.tail[0])
for digit in digits:
label_digits[digit].append(label.parent())
for label, (atom1, atom2) in label_digits.items():
atom1_idx = self._atom_indices[id(atom1)]
atom2_idx = self._atom_indices[id(atom2)]
self.add_edge(atom1_idx, atom2_idx)
def _node_match(self, host, pattern):
atom_expr = pattern['atom'].tail[0]
atom = host['atom']
return self._atom_expr_matches(atom_expr, atom)
def _atom_expr_matches(self, atom_expr, atom):
if atom_expr.head == 'not_expression':
return not self._atom_expr_matches(atom_expr.tail[0], atom)
elif atom_expr.head in ('and_expression', 'weak_and_expression'):
return (self._atom_expr_matches(atom_expr.tail[0], atom) and
self._atom_expr_matches(atom_expr.tail[1], atom))
elif atom_expr.head == 'or_expression':
return (self._atom_expr_matches(atom_expr.tail[0], atom) or
self._atom_expr_matches(atom_expr.tail[1], atom))
elif atom_expr.head == 'atom_id':
return self._atom_id_matches(atom_expr.tail[0], atom)
elif atom_expr.head == 'atom_symbol':
return self._atom_id_matches(atom_expr, atom)
else:
raise TypeError('Expected atom_id, atom_symbol, and_expression, '
'or_expression, or not_expression. '
'Got {}'.format(atom_expr.head))
@staticmethod
def _atom_id_matches(atom_id, atom):
atomic_num = atom.element.atomic_number
if atom_id.head == 'atomic_num':
return atomic_num == int(atom_id.tail[0])
elif atom_id.head == 'atom_symbol':
if str(atom_id.tail[0]) == '*':
return True
elif str(atom_id.tail[0]).startswith('_'):
return atom.element.name == str(atom_id.tail[0])
else:
return atomic_num == pt.AtomicNum[str(atom_id.tail[0])]
elif atom_id.head == 'has_label':
label = atom_id.tail[0][1:] # Strip the % sign from the beginning.
return label in atom.whitelist
elif atom_id.head == 'neighbor_count':
return len(atom.bond_partners) == int(atom_id.tail[0])
elif atom_id.head == 'ring_size':
cycle_len = int(atom_id.tail[0])
for cycle in atom.cycles:
if len(cycle) == cycle_len:
return True
return False
elif atom_id.head == 'ring_count':
n_cycles = len(atom.cycles)
if n_cycles == int(atom_id.tail[0]):
return True
return False
elif atom_id.head == 'matches_string':
raise NotImplementedError('matches_string is not yet implemented')
def find_matches(self, topology):
"""Return sets of atoms that match this SMARTS pattern in a topology.
Notes:
------
When this function gets used in atomtyper.py, we actively modify the
white- and blacklists of the atoms in `topology` after finding a match.
This means that between every successive call of
`subgraph_isomorphisms_iter()`, the topology against which we are
matching may have actually changed. Currently, we take advantage of this
behavior in some edges cases (e.g. see `test_hexa_coordinated` in
`test_smarts.py`).
"""
# Note: Needs to be updated in sync with the grammar in `smarts.py`.
ring_tokens = ['ring_size', 'ring_count']
has_ring_rules = any(self.ast.select(token)
for token in ring_tokens)
_prepare_atoms(topology, compute_cycles=has_ring_rules)
top_graph = nx.Graph()
top_graph.add_nodes_from(((a.index, {'atom': a})
for a in topology.atoms()))
top_graph.add_edges_from(((b[0].index, b[1].index)
for b in topology.bonds()))
if self._graph_matcher is None:
atom = nx.get_node_attributes(self, name='atom')[0]
if len(atom.select('atom_symbol')) == 1 and not atom.select('not_expression'):
try:
element = atom.select('atom_symbol').strees[0].tail[0]
except IndexError:
try:
atomic_num = atom.select('atomic_num').strees[0].tail[0]
element = pt.Element[int(atomic_num)]
except IndexError:
element = None
else:
element = None
self._graph_matcher = SMARTSMatcher(top_graph, self,
node_match=self._node_match,
element=element)
matched_atoms = set()
for mapping in self._graph_matcher.subgraph_isomorphisms_iter():
mapping = {node_id: atom_id for atom_id, node_id in mapping.items()}
# The first node in the smarts graph always corresponds to the atom
# that we are trying to match.
atom_index = mapping[0]
# Don't yield duplicate matches found via matching the pattern in a
# different order.
if atom_index not in matched_atoms:
matched_atoms.add(atom_index)
yield atom_index
class SMARTSMatcher(isomorphism.vf2userfunc.GraphMatcher):
def __init__(self, G1, G2, node_match, element):
super(SMARTSMatcher, self).__init__(G1, G2, node_match)
self.element = element
if element not in [None, '*']:
self.valid_nodes = [n for n, atom in nx.get_node_attributes(G1, name='atom').items()
if atom.element.symbol == element]
else:
self.valid_nodes = G1.nodes()
def candidate_pairs_iter(self):
"""Iterator over candidate pairs of nodes in G1 and G2."""
# All computations are done using the current state!
G2_nodes = self.G2_nodes
# First we compute the inout-terminal sets.
T1_inout = set(self.inout_1.keys()) - set(self.core_1.keys())
T2_inout = set(self.inout_2.keys()) - set(self.core_2.keys())
# If T1_inout and T2_inout are both nonempty.
# P(s) = T1_inout x {min T2_inout}
if T1_inout and T2_inout:
for node in T1_inout:
yield node, min(T2_inout)
else:
# First we determine the candidate node for G2
other_node = min(G2_nodes - set(self.core_2))
host_nodes = self.valid_nodes if other_node == 0 else self.G1.nodes()
for node in host_nodes:
if node not in self.core_1:
yield node, other_node
# For all other cases, we don't have any candidate pairs.
def _prepare_atoms(topology, compute_cycles=False):
"""Compute cycles and add white-/blacklists to atoms."""
atom1 = next(topology.atoms())
has_whitelists = hasattr(atom1, 'whitelist')
has_cycles = hasattr(atom1, 'cycles')
compute_cycles = compute_cycles and not has_cycles
if compute_cycles or not has_whitelists:
for atom in topology.atoms():
if compute_cycles:
atom.cycles = set()
if not has_whitelists:
atom.whitelist = OrderedSet()
atom.blacklist = OrderedSet()
if compute_cycles:
bond_graph = nx.Graph()
bond_graph.add_nodes_from(topology.atoms())
bond_graph.add_edges_from(topology.bonds())
all_cycles = _find_chordless_cycles(bond_graph, max_cycle_size=8)
for atom, cycles in zip(bond_graph.nodes, all_cycles):
for cycle in cycles:
atom.cycles.add(tuple(cycle))
|
mosdef-hub/foyer | foyer/smarts_graph.py | _prepare_atoms | python | def _prepare_atoms(topology, compute_cycles=False):
atom1 = next(topology.atoms())
has_whitelists = hasattr(atom1, 'whitelist')
has_cycles = hasattr(atom1, 'cycles')
compute_cycles = compute_cycles and not has_cycles
if compute_cycles or not has_whitelists:
for atom in topology.atoms():
if compute_cycles:
atom.cycles = set()
if not has_whitelists:
atom.whitelist = OrderedSet()
atom.blacklist = OrderedSet()
if compute_cycles:
bond_graph = nx.Graph()
bond_graph.add_nodes_from(topology.atoms())
bond_graph.add_edges_from(topology.bonds())
all_cycles = _find_chordless_cycles(bond_graph, max_cycle_size=8)
for atom, cycles in zip(bond_graph.nodes, all_cycles):
for cycle in cycles:
atom.cycles.add(tuple(cycle)) | Compute cycles and add white-/blacklists to atoms. | train | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/smarts_graph.py#L304-L326 | null | from collections import OrderedDict, defaultdict
import itertools
import sys
import networkx as nx
from networkx.algorithms import isomorphism
from oset import oset as OrderedSet
import parmed.periodic_table as pt
from foyer.smarts import SMARTS
class SMARTSGraph(nx.Graph):
"""A graph representation of a SMARTS pattern.
Attributes
----------
smarts_string : str
parser : foyer.smarts.SMARTS
name : str
overrides : set
Other Parameters
----------
args
kwargs
"""
# Because the first atom in a SMARTS string is always the one we want to
# type, the graph's nodes needs to be ordered.
node_dict_factory = OrderedDict
def __init__(self, smarts_string, parser=None, name=None, overrides=None,
*args, **kwargs):
super(SMARTSGraph, self).__init__(*args, **kwargs)
self.smarts_string = smarts_string
self.name = name
self.overrides = overrides
if parser is None:
self.ast = SMARTS().parse(smarts_string)
else:
self.ast = parser.parse(smarts_string)
self._atom_indices = OrderedDict()
self._add_nodes()
self._add_edges(self.ast)
self._add_label_edges()
self._graph_matcher = None
def _add_nodes(self):
"""Add all atoms in the SMARTS string as nodes in the graph."""
for n, atom in enumerate(self.ast.select('atom')):
self.add_node(n, atom=atom)
self._atom_indices[id(atom)] = n
def _add_edges(self, ast_node, trunk=None):
""""Add all bonds in the SMARTS string as edges in the graph."""
atom_indices = self._atom_indices
for atom in ast_node.tail:
if atom.head == 'atom':
atom_idx = atom_indices[id(atom)]
if atom.is_first_kid and atom.parent().head == 'branch':
trunk_idx = atom_indices[id(trunk)]
self.add_edge(atom_idx, trunk_idx)
if not atom.is_last_kid:
if atom.next_kid.head == 'atom':
next_idx = atom_indices[id(atom.next_kid)]
self.add_edge(atom_idx, next_idx)
elif atom.next_kid.head == 'branch':
trunk = atom
else: # We traveled through the whole branch.
return
elif atom.head == 'branch':
self._add_edges(atom, trunk)
def _add_label_edges(self):
"""Add edges between all atoms with the same atom_label in rings."""
labels = self.ast.select('atom_label')
if not labels:
return
# We need each individual label and atoms with multiple ring labels
# would yield e.g. the string '12' so split those up.
label_digits = defaultdict(list)
for label in labels:
digits = list(label.tail[0])
for digit in digits:
label_digits[digit].append(label.parent())
for label, (atom1, atom2) in label_digits.items():
atom1_idx = self._atom_indices[id(atom1)]
atom2_idx = self._atom_indices[id(atom2)]
self.add_edge(atom1_idx, atom2_idx)
def _node_match(self, host, pattern):
atom_expr = pattern['atom'].tail[0]
atom = host['atom']
return self._atom_expr_matches(atom_expr, atom)
def _atom_expr_matches(self, atom_expr, atom):
if atom_expr.head == 'not_expression':
return not self._atom_expr_matches(atom_expr.tail[0], atom)
elif atom_expr.head in ('and_expression', 'weak_and_expression'):
return (self._atom_expr_matches(atom_expr.tail[0], atom) and
self._atom_expr_matches(atom_expr.tail[1], atom))
elif atom_expr.head == 'or_expression':
return (self._atom_expr_matches(atom_expr.tail[0], atom) or
self._atom_expr_matches(atom_expr.tail[1], atom))
elif atom_expr.head == 'atom_id':
return self._atom_id_matches(atom_expr.tail[0], atom)
elif atom_expr.head == 'atom_symbol':
return self._atom_id_matches(atom_expr, atom)
else:
raise TypeError('Expected atom_id, atom_symbol, and_expression, '
'or_expression, or not_expression. '
'Got {}'.format(atom_expr.head))
@staticmethod
def _atom_id_matches(atom_id, atom):
atomic_num = atom.element.atomic_number
if atom_id.head == 'atomic_num':
return atomic_num == int(atom_id.tail[0])
elif atom_id.head == 'atom_symbol':
if str(atom_id.tail[0]) == '*':
return True
elif str(atom_id.tail[0]).startswith('_'):
return atom.element.name == str(atom_id.tail[0])
else:
return atomic_num == pt.AtomicNum[str(atom_id.tail[0])]
elif atom_id.head == 'has_label':
label = atom_id.tail[0][1:] # Strip the % sign from the beginning.
return label in atom.whitelist
elif atom_id.head == 'neighbor_count':
return len(atom.bond_partners) == int(atom_id.tail[0])
elif atom_id.head == 'ring_size':
cycle_len = int(atom_id.tail[0])
for cycle in atom.cycles:
if len(cycle) == cycle_len:
return True
return False
elif atom_id.head == 'ring_count':
n_cycles = len(atom.cycles)
if n_cycles == int(atom_id.tail[0]):
return True
return False
elif atom_id.head == 'matches_string':
raise NotImplementedError('matches_string is not yet implemented')
def find_matches(self, topology):
"""Return sets of atoms that match this SMARTS pattern in a topology.
Notes:
------
When this function gets used in atomtyper.py, we actively modify the
white- and blacklists of the atoms in `topology` after finding a match.
This means that between every successive call of
`subgraph_isomorphisms_iter()`, the topology against which we are
matching may have actually changed. Currently, we take advantage of this
behavior in some edges cases (e.g. see `test_hexa_coordinated` in
`test_smarts.py`).
"""
# Note: Needs to be updated in sync with the grammar in `smarts.py`.
ring_tokens = ['ring_size', 'ring_count']
has_ring_rules = any(self.ast.select(token)
for token in ring_tokens)
_prepare_atoms(topology, compute_cycles=has_ring_rules)
top_graph = nx.Graph()
top_graph.add_nodes_from(((a.index, {'atom': a})
for a in topology.atoms()))
top_graph.add_edges_from(((b[0].index, b[1].index)
for b in topology.bonds()))
if self._graph_matcher is None:
atom = nx.get_node_attributes(self, name='atom')[0]
if len(atom.select('atom_symbol')) == 1 and not atom.select('not_expression'):
try:
element = atom.select('atom_symbol').strees[0].tail[0]
except IndexError:
try:
atomic_num = atom.select('atomic_num').strees[0].tail[0]
element = pt.Element[int(atomic_num)]
except IndexError:
element = None
else:
element = None
self._graph_matcher = SMARTSMatcher(top_graph, self,
node_match=self._node_match,
element=element)
matched_atoms = set()
for mapping in self._graph_matcher.subgraph_isomorphisms_iter():
mapping = {node_id: atom_id for atom_id, node_id in mapping.items()}
# The first node in the smarts graph always corresponds to the atom
# that we are trying to match.
atom_index = mapping[0]
# Don't yield duplicate matches found via matching the pattern in a
# different order.
if atom_index not in matched_atoms:
matched_atoms.add(atom_index)
yield atom_index
class SMARTSMatcher(isomorphism.vf2userfunc.GraphMatcher):
def __init__(self, G1, G2, node_match, element):
super(SMARTSMatcher, self).__init__(G1, G2, node_match)
self.element = element
if element not in [None, '*']:
self.valid_nodes = [n for n, atom in nx.get_node_attributes(G1, name='atom').items()
if atom.element.symbol == element]
else:
self.valid_nodes = G1.nodes()
def candidate_pairs_iter(self):
"""Iterator over candidate pairs of nodes in G1 and G2."""
# All computations are done using the current state!
G2_nodes = self.G2_nodes
# First we compute the inout-terminal sets.
T1_inout = set(self.inout_1.keys()) - set(self.core_1.keys())
T2_inout = set(self.inout_2.keys()) - set(self.core_2.keys())
# If T1_inout and T2_inout are both nonempty.
# P(s) = T1_inout x {min T2_inout}
if T1_inout and T2_inout:
for node in T1_inout:
yield node, min(T2_inout)
else:
# First we determine the candidate node for G2
other_node = min(G2_nodes - set(self.core_2))
host_nodes = self.valid_nodes if other_node == 0 else self.G1.nodes()
for node in host_nodes:
if node not in self.core_1:
yield node, other_node
# For all other cases, we don't have any candidate pairs.
def _find_chordless_cycles(bond_graph, max_cycle_size):
"""Find all chordless cycles (i.e. rings) in the bond graph
Traverses the bond graph to determine all cycles (i.e. rings) each
atom is contained within. Algorithm has been adapted from:
https://stackoverflow.com/questions/4022662/find-all-chordless-cycles-in-an-undirected-graph/4028855#4028855
"""
cycles = [[] for _ in bond_graph.nodes]
'''
For all nodes we need to find the cycles that they are included within.
'''
for i, node in enumerate(bond_graph.nodes):
neighbors = list(bond_graph.neighbors(node))
pairs = list(itertools.combinations(neighbors, 2))
'''
Loop over all pairs of neighbors of the node. We will see if a ring
exists that includes these branches.
'''
for pair in pairs:
'''
We need to store all node sequences that could be rings. We will
update this as we traverse the graph.
'''
connected = False
possible_rings = []
last_node = pair[0]
ring = [last_node, node, pair[1]]
possible_rings.append(ring)
if bond_graph.has_edge(last_node, pair[1]):
cycles[i].append(ring)
connected = True
while not connected:
'''
Branch and create a new list of possible rings
'''
new_possible_rings = []
for possible_ring in possible_rings:
next_neighbors = list(bond_graph.neighbors(possible_ring[-1]))
for next_neighbor in next_neighbors:
if next_neighbor != possible_ring[-2]:
new_possible_rings.append(possible_ring + \
[next_neighbor])
possible_rings = new_possible_rings
for possible_ring in possible_rings:
if bond_graph.has_edge(possible_ring[-1], last_node):
if any([bond_graph.has_edge(possible_ring[-1], internal_node)
for internal_node in possible_ring[1:-2]]):
pass
else:
cycles[i].append(possible_ring)
connected = True
if not possible_rings or len(possible_rings[0]) == max_cycle_size:
break
return cycles
|
mosdef-hub/foyer | foyer/smarts_graph.py | SMARTSGraph._add_nodes | python | def _add_nodes(self):
for n, atom in enumerate(self.ast.select('atom')):
self.add_node(n, atom=atom)
self._atom_indices[id(atom)] = n | Add all atoms in the SMARTS string as nodes in the graph. | train | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/smarts_graph.py#L51-L55 | null | class SMARTSGraph(nx.Graph):
"""A graph representation of a SMARTS pattern.
Attributes
----------
smarts_string : str
parser : foyer.smarts.SMARTS
name : str
overrides : set
Other Parameters
----------
args
kwargs
"""
# Because the first atom in a SMARTS string is always the one we want to
# type, the graph's nodes needs to be ordered.
node_dict_factory = OrderedDict
def __init__(self, smarts_string, parser=None, name=None, overrides=None,
*args, **kwargs):
super(SMARTSGraph, self).__init__(*args, **kwargs)
self.smarts_string = smarts_string
self.name = name
self.overrides = overrides
if parser is None:
self.ast = SMARTS().parse(smarts_string)
else:
self.ast = parser.parse(smarts_string)
self._atom_indices = OrderedDict()
self._add_nodes()
self._add_edges(self.ast)
self._add_label_edges()
self._graph_matcher = None
def _add_edges(self, ast_node, trunk=None):
""""Add all bonds in the SMARTS string as edges in the graph."""
atom_indices = self._atom_indices
for atom in ast_node.tail:
if atom.head == 'atom':
atom_idx = atom_indices[id(atom)]
if atom.is_first_kid and atom.parent().head == 'branch':
trunk_idx = atom_indices[id(trunk)]
self.add_edge(atom_idx, trunk_idx)
if not atom.is_last_kid:
if atom.next_kid.head == 'atom':
next_idx = atom_indices[id(atom.next_kid)]
self.add_edge(atom_idx, next_idx)
elif atom.next_kid.head == 'branch':
trunk = atom
else: # We traveled through the whole branch.
return
elif atom.head == 'branch':
self._add_edges(atom, trunk)
def _add_label_edges(self):
"""Add edges between all atoms with the same atom_label in rings."""
labels = self.ast.select('atom_label')
if not labels:
return
# We need each individual label and atoms with multiple ring labels
# would yield e.g. the string '12' so split those up.
label_digits = defaultdict(list)
for label in labels:
digits = list(label.tail[0])
for digit in digits:
label_digits[digit].append(label.parent())
for label, (atom1, atom2) in label_digits.items():
atom1_idx = self._atom_indices[id(atom1)]
atom2_idx = self._atom_indices[id(atom2)]
self.add_edge(atom1_idx, atom2_idx)
def _node_match(self, host, pattern):
atom_expr = pattern['atom'].tail[0]
atom = host['atom']
return self._atom_expr_matches(atom_expr, atom)
def _atom_expr_matches(self, atom_expr, atom):
if atom_expr.head == 'not_expression':
return not self._atom_expr_matches(atom_expr.tail[0], atom)
elif atom_expr.head in ('and_expression', 'weak_and_expression'):
return (self._atom_expr_matches(atom_expr.tail[0], atom) and
self._atom_expr_matches(atom_expr.tail[1], atom))
elif atom_expr.head == 'or_expression':
return (self._atom_expr_matches(atom_expr.tail[0], atom) or
self._atom_expr_matches(atom_expr.tail[1], atom))
elif atom_expr.head == 'atom_id':
return self._atom_id_matches(atom_expr.tail[0], atom)
elif atom_expr.head == 'atom_symbol':
return self._atom_id_matches(atom_expr, atom)
else:
raise TypeError('Expected atom_id, atom_symbol, and_expression, '
'or_expression, or not_expression. '
'Got {}'.format(atom_expr.head))
@staticmethod
def _atom_id_matches(atom_id, atom):
atomic_num = atom.element.atomic_number
if atom_id.head == 'atomic_num':
return atomic_num == int(atom_id.tail[0])
elif atom_id.head == 'atom_symbol':
if str(atom_id.tail[0]) == '*':
return True
elif str(atom_id.tail[0]).startswith('_'):
return atom.element.name == str(atom_id.tail[0])
else:
return atomic_num == pt.AtomicNum[str(atom_id.tail[0])]
elif atom_id.head == 'has_label':
label = atom_id.tail[0][1:] # Strip the % sign from the beginning.
return label in atom.whitelist
elif atom_id.head == 'neighbor_count':
return len(atom.bond_partners) == int(atom_id.tail[0])
elif atom_id.head == 'ring_size':
cycle_len = int(atom_id.tail[0])
for cycle in atom.cycles:
if len(cycle) == cycle_len:
return True
return False
elif atom_id.head == 'ring_count':
n_cycles = len(atom.cycles)
if n_cycles == int(atom_id.tail[0]):
return True
return False
elif atom_id.head == 'matches_string':
raise NotImplementedError('matches_string is not yet implemented')
def find_matches(self, topology):
"""Return sets of atoms that match this SMARTS pattern in a topology.
Notes:
------
When this function gets used in atomtyper.py, we actively modify the
white- and blacklists of the atoms in `topology` after finding a match.
This means that between every successive call of
`subgraph_isomorphisms_iter()`, the topology against which we are
matching may have actually changed. Currently, we take advantage of this
behavior in some edges cases (e.g. see `test_hexa_coordinated` in
`test_smarts.py`).
"""
# Note: Needs to be updated in sync with the grammar in `smarts.py`.
ring_tokens = ['ring_size', 'ring_count']
has_ring_rules = any(self.ast.select(token)
for token in ring_tokens)
_prepare_atoms(topology, compute_cycles=has_ring_rules)
top_graph = nx.Graph()
top_graph.add_nodes_from(((a.index, {'atom': a})
for a in topology.atoms()))
top_graph.add_edges_from(((b[0].index, b[1].index)
for b in topology.bonds()))
if self._graph_matcher is None:
atom = nx.get_node_attributes(self, name='atom')[0]
if len(atom.select('atom_symbol')) == 1 and not atom.select('not_expression'):
try:
element = atom.select('atom_symbol').strees[0].tail[0]
except IndexError:
try:
atomic_num = atom.select('atomic_num').strees[0].tail[0]
element = pt.Element[int(atomic_num)]
except IndexError:
element = None
else:
element = None
self._graph_matcher = SMARTSMatcher(top_graph, self,
node_match=self._node_match,
element=element)
matched_atoms = set()
for mapping in self._graph_matcher.subgraph_isomorphisms_iter():
mapping = {node_id: atom_id for atom_id, node_id in mapping.items()}
# The first node in the smarts graph always corresponds to the atom
# that we are trying to match.
atom_index = mapping[0]
# Don't yield duplicate matches found via matching the pattern in a
# different order.
if atom_index not in matched_atoms:
matched_atoms.add(atom_index)
yield atom_index
|
mosdef-hub/foyer | foyer/smarts_graph.py | SMARTSGraph._add_edges | python | def _add_edges(self, ast_node, trunk=None):
"
atom_indices = self._atom_indices
for atom in ast_node.tail:
if atom.head == 'atom':
atom_idx = atom_indices[id(atom)]
if atom.is_first_kid and atom.parent().head == 'branch':
trunk_idx = atom_indices[id(trunk)]
self.add_edge(atom_idx, trunk_idx)
if not atom.is_last_kid:
if atom.next_kid.head == 'atom':
next_idx = atom_indices[id(atom.next_kid)]
self.add_edge(atom_idx, next_idx)
elif atom.next_kid.head == 'branch':
trunk = atom
else: # We traveled through the whole branch.
return
elif atom.head == 'branch':
self._add_edges(atom, trunk) | Add all bonds in the SMARTS string as edges in the graph. | train | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/smarts_graph.py#L57-L75 | null | class SMARTSGraph(nx.Graph):
"""A graph representation of a SMARTS pattern.
Attributes
----------
smarts_string : str
parser : foyer.smarts.SMARTS
name : str
overrides : set
Other Parameters
----------
args
kwargs
"""
# Because the first atom in a SMARTS string is always the one we want to
# type, the graph's nodes needs to be ordered.
node_dict_factory = OrderedDict
def __init__(self, smarts_string, parser=None, name=None, overrides=None,
*args, **kwargs):
super(SMARTSGraph, self).__init__(*args, **kwargs)
self.smarts_string = smarts_string
self.name = name
self.overrides = overrides
if parser is None:
self.ast = SMARTS().parse(smarts_string)
else:
self.ast = parser.parse(smarts_string)
self._atom_indices = OrderedDict()
self._add_nodes()
self._add_edges(self.ast)
self._add_label_edges()
self._graph_matcher = None
def _add_nodes(self):
"""Add all atoms in the SMARTS string as nodes in the graph."""
for n, atom in enumerate(self.ast.select('atom')):
self.add_node(n, atom=atom)
self._atom_indices[id(atom)] = n
def _add_label_edges(self):
"""Add edges between all atoms with the same atom_label in rings."""
labels = self.ast.select('atom_label')
if not labels:
return
# We need each individual label and atoms with multiple ring labels
# would yield e.g. the string '12' so split those up.
label_digits = defaultdict(list)
for label in labels:
digits = list(label.tail[0])
for digit in digits:
label_digits[digit].append(label.parent())
for label, (atom1, atom2) in label_digits.items():
atom1_idx = self._atom_indices[id(atom1)]
atom2_idx = self._atom_indices[id(atom2)]
self.add_edge(atom1_idx, atom2_idx)
def _node_match(self, host, pattern):
atom_expr = pattern['atom'].tail[0]
atom = host['atom']
return self._atom_expr_matches(atom_expr, atom)
def _atom_expr_matches(self, atom_expr, atom):
if atom_expr.head == 'not_expression':
return not self._atom_expr_matches(atom_expr.tail[0], atom)
elif atom_expr.head in ('and_expression', 'weak_and_expression'):
return (self._atom_expr_matches(atom_expr.tail[0], atom) and
self._atom_expr_matches(atom_expr.tail[1], atom))
elif atom_expr.head == 'or_expression':
return (self._atom_expr_matches(atom_expr.tail[0], atom) or
self._atom_expr_matches(atom_expr.tail[1], atom))
elif atom_expr.head == 'atom_id':
return self._atom_id_matches(atom_expr.tail[0], atom)
elif atom_expr.head == 'atom_symbol':
return self._atom_id_matches(atom_expr, atom)
else:
raise TypeError('Expected atom_id, atom_symbol, and_expression, '
'or_expression, or not_expression. '
'Got {}'.format(atom_expr.head))
@staticmethod
def _atom_id_matches(atom_id, atom):
atomic_num = atom.element.atomic_number
if atom_id.head == 'atomic_num':
return atomic_num == int(atom_id.tail[0])
elif atom_id.head == 'atom_symbol':
if str(atom_id.tail[0]) == '*':
return True
elif str(atom_id.tail[0]).startswith('_'):
return atom.element.name == str(atom_id.tail[0])
else:
return atomic_num == pt.AtomicNum[str(atom_id.tail[0])]
elif atom_id.head == 'has_label':
label = atom_id.tail[0][1:] # Strip the % sign from the beginning.
return label in atom.whitelist
elif atom_id.head == 'neighbor_count':
return len(atom.bond_partners) == int(atom_id.tail[0])
elif atom_id.head == 'ring_size':
cycle_len = int(atom_id.tail[0])
for cycle in atom.cycles:
if len(cycle) == cycle_len:
return True
return False
elif atom_id.head == 'ring_count':
n_cycles = len(atom.cycles)
if n_cycles == int(atom_id.tail[0]):
return True
return False
elif atom_id.head == 'matches_string':
raise NotImplementedError('matches_string is not yet implemented')
def find_matches(self, topology):
"""Return sets of atoms that match this SMARTS pattern in a topology.
Notes:
------
When this function gets used in atomtyper.py, we actively modify the
white- and blacklists of the atoms in `topology` after finding a match.
This means that between every successive call of
`subgraph_isomorphisms_iter()`, the topology against which we are
matching may have actually changed. Currently, we take advantage of this
behavior in some edges cases (e.g. see `test_hexa_coordinated` in
`test_smarts.py`).
"""
# Note: Needs to be updated in sync with the grammar in `smarts.py`.
ring_tokens = ['ring_size', 'ring_count']
has_ring_rules = any(self.ast.select(token)
for token in ring_tokens)
_prepare_atoms(topology, compute_cycles=has_ring_rules)
top_graph = nx.Graph()
top_graph.add_nodes_from(((a.index, {'atom': a})
for a in topology.atoms()))
top_graph.add_edges_from(((b[0].index, b[1].index)
for b in topology.bonds()))
if self._graph_matcher is None:
atom = nx.get_node_attributes(self, name='atom')[0]
if len(atom.select('atom_symbol')) == 1 and not atom.select('not_expression'):
try:
element = atom.select('atom_symbol').strees[0].tail[0]
except IndexError:
try:
atomic_num = atom.select('atomic_num').strees[0].tail[0]
element = pt.Element[int(atomic_num)]
except IndexError:
element = None
else:
element = None
self._graph_matcher = SMARTSMatcher(top_graph, self,
node_match=self._node_match,
element=element)
matched_atoms = set()
for mapping in self._graph_matcher.subgraph_isomorphisms_iter():
mapping = {node_id: atom_id for atom_id, node_id in mapping.items()}
# The first node in the smarts graph always corresponds to the atom
# that we are trying to match.
atom_index = mapping[0]
# Don't yield duplicate matches found via matching the pattern in a
# different order.
if atom_index not in matched_atoms:
matched_atoms.add(atom_index)
yield atom_index
|
mosdef-hub/foyer | foyer/smarts_graph.py | SMARTSGraph._add_label_edges | python | def _add_label_edges(self):
labels = self.ast.select('atom_label')
if not labels:
return
# We need each individual label and atoms with multiple ring labels
# would yield e.g. the string '12' so split those up.
label_digits = defaultdict(list)
for label in labels:
digits = list(label.tail[0])
for digit in digits:
label_digits[digit].append(label.parent())
for label, (atom1, atom2) in label_digits.items():
atom1_idx = self._atom_indices[id(atom1)]
atom2_idx = self._atom_indices[id(atom2)]
self.add_edge(atom1_idx, atom2_idx) | Add edges between all atoms with the same atom_label in rings. | train | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/smarts_graph.py#L77-L94 | null | class SMARTSGraph(nx.Graph):
"""A graph representation of a SMARTS pattern.
Attributes
----------
smarts_string : str
parser : foyer.smarts.SMARTS
name : str
overrides : set
Other Parameters
----------
args
kwargs
"""
# Because the first atom in a SMARTS string is always the one we want to
# type, the graph's nodes needs to be ordered.
node_dict_factory = OrderedDict
def __init__(self, smarts_string, parser=None, name=None, overrides=None,
*args, **kwargs):
super(SMARTSGraph, self).__init__(*args, **kwargs)
self.smarts_string = smarts_string
self.name = name
self.overrides = overrides
if parser is None:
self.ast = SMARTS().parse(smarts_string)
else:
self.ast = parser.parse(smarts_string)
self._atom_indices = OrderedDict()
self._add_nodes()
self._add_edges(self.ast)
self._add_label_edges()
self._graph_matcher = None
def _add_nodes(self):
"""Add all atoms in the SMARTS string as nodes in the graph."""
for n, atom in enumerate(self.ast.select('atom')):
self.add_node(n, atom=atom)
self._atom_indices[id(atom)] = n
def _add_edges(self, ast_node, trunk=None):
""""Add all bonds in the SMARTS string as edges in the graph."""
atom_indices = self._atom_indices
for atom in ast_node.tail:
if atom.head == 'atom':
atom_idx = atom_indices[id(atom)]
if atom.is_first_kid and atom.parent().head == 'branch':
trunk_idx = atom_indices[id(trunk)]
self.add_edge(atom_idx, trunk_idx)
if not atom.is_last_kid:
if atom.next_kid.head == 'atom':
next_idx = atom_indices[id(atom.next_kid)]
self.add_edge(atom_idx, next_idx)
elif atom.next_kid.head == 'branch':
trunk = atom
else: # We traveled through the whole branch.
return
elif atom.head == 'branch':
self._add_edges(atom, trunk)
def _node_match(self, host, pattern):
atom_expr = pattern['atom'].tail[0]
atom = host['atom']
return self._atom_expr_matches(atom_expr, atom)
def _atom_expr_matches(self, atom_expr, atom):
if atom_expr.head == 'not_expression':
return not self._atom_expr_matches(atom_expr.tail[0], atom)
elif atom_expr.head in ('and_expression', 'weak_and_expression'):
return (self._atom_expr_matches(atom_expr.tail[0], atom) and
self._atom_expr_matches(atom_expr.tail[1], atom))
elif atom_expr.head == 'or_expression':
return (self._atom_expr_matches(atom_expr.tail[0], atom) or
self._atom_expr_matches(atom_expr.tail[1], atom))
elif atom_expr.head == 'atom_id':
return self._atom_id_matches(atom_expr.tail[0], atom)
elif atom_expr.head == 'atom_symbol':
return self._atom_id_matches(atom_expr, atom)
else:
raise TypeError('Expected atom_id, atom_symbol, and_expression, '
'or_expression, or not_expression. '
'Got {}'.format(atom_expr.head))
@staticmethod
def _atom_id_matches(atom_id, atom):
atomic_num = atom.element.atomic_number
if atom_id.head == 'atomic_num':
return atomic_num == int(atom_id.tail[0])
elif atom_id.head == 'atom_symbol':
if str(atom_id.tail[0]) == '*':
return True
elif str(atom_id.tail[0]).startswith('_'):
return atom.element.name == str(atom_id.tail[0])
else:
return atomic_num == pt.AtomicNum[str(atom_id.tail[0])]
elif atom_id.head == 'has_label':
label = atom_id.tail[0][1:] # Strip the % sign from the beginning.
return label in atom.whitelist
elif atom_id.head == 'neighbor_count':
return len(atom.bond_partners) == int(atom_id.tail[0])
elif atom_id.head == 'ring_size':
cycle_len = int(atom_id.tail[0])
for cycle in atom.cycles:
if len(cycle) == cycle_len:
return True
return False
elif atom_id.head == 'ring_count':
n_cycles = len(atom.cycles)
if n_cycles == int(atom_id.tail[0]):
return True
return False
elif atom_id.head == 'matches_string':
raise NotImplementedError('matches_string is not yet implemented')
def find_matches(self, topology):
"""Return sets of atoms that match this SMARTS pattern in a topology.
Notes:
------
When this function gets used in atomtyper.py, we actively modify the
white- and blacklists of the atoms in `topology` after finding a match.
This means that between every successive call of
`subgraph_isomorphisms_iter()`, the topology against which we are
matching may have actually changed. Currently, we take advantage of this
behavior in some edges cases (e.g. see `test_hexa_coordinated` in
`test_smarts.py`).
"""
# Note: Needs to be updated in sync with the grammar in `smarts.py`.
ring_tokens = ['ring_size', 'ring_count']
has_ring_rules = any(self.ast.select(token)
for token in ring_tokens)
_prepare_atoms(topology, compute_cycles=has_ring_rules)
top_graph = nx.Graph()
top_graph.add_nodes_from(((a.index, {'atom': a})
for a in topology.atoms()))
top_graph.add_edges_from(((b[0].index, b[1].index)
for b in topology.bonds()))
if self._graph_matcher is None:
atom = nx.get_node_attributes(self, name='atom')[0]
if len(atom.select('atom_symbol')) == 1 and not atom.select('not_expression'):
try:
element = atom.select('atom_symbol').strees[0].tail[0]
except IndexError:
try:
atomic_num = atom.select('atomic_num').strees[0].tail[0]
element = pt.Element[int(atomic_num)]
except IndexError:
element = None
else:
element = None
self._graph_matcher = SMARTSMatcher(top_graph, self,
node_match=self._node_match,
element=element)
matched_atoms = set()
for mapping in self._graph_matcher.subgraph_isomorphisms_iter():
mapping = {node_id: atom_id for atom_id, node_id in mapping.items()}
# The first node in the smarts graph always corresponds to the atom
# that we are trying to match.
atom_index = mapping[0]
# Don't yield duplicate matches found via matching the pattern in a
# different order.
if atom_index not in matched_atoms:
matched_atoms.add(atom_index)
yield atom_index
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.