repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
google/budou | budou/chunk.py | Chunk.serialize | python | def serialize(self):
return {
'word': self.word,
'pos': self.pos,
'label': self.label,
'dependency': self.dependency,
'has_cjk': self.has_cjk(),
} | Returns serialized chunk data in dictionary. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L79-L87 | [
"def has_cjk(self):\n \"\"\"Checks if the word of the chunk contains CJK characters.\n\n This is using unicode codepoint ranges from\n https://github.com/nltk/nltk/blob/develop/nltk/tokenize/util.py#L149\n\n Returns:\n bool: True if the chunk has any CJK character.\n \"\"\"\n cjk_codepoint_ranges = [\n ... | class Chunk:
"""A unit for word segmentation.
Attributes:
word (str): Surface word of the chunk.
pos (:obj:`str`, optional): Part of speech.
label (:obj:`str`, optional): Label information.
dependency (:obj:`bool`, optional): Dependency to neighbor words.
:code:`None` for no dependency, :code:`True` for dependency to the
following word, and :code:`False` for the dependency to the previous
word.
Args:
word (str): Surface word of the chunk.
pos (:obj:`str`, optional): Part of speech.
label (:obj:`str`, optional): Label information.
dependency (:obj:`bool`, optional): Dependency to neighbor words.
:code:`None` for no dependency, :code:`True` for dependency to the
following word, and :code:`False` for the dependency to the previous
word.
"""
_SPACE_POS = 'SPACE'
_BREAK_POS = 'BREAK'
def __init__(self, word, pos=None, label=None, dependency=None):
self.word = word
self.pos = pos
self.label = label
self.dependency = dependency
def __repr__(self):
return 'Chunk(%s, %s, %s, %s)' % (
repr(self.word), self.pos, self.label, self.dependency)
@classmethod
def space(cls):
"""Creates space Chunk.
Returns:
A chunk (:obj:`budou.chunk.Chunk`)
"""
chunk = cls(u' ', cls._SPACE_POS)
return chunk
@classmethod
def breakline(cls):
"""Creates breakline Chunk.
Returns:
A chunk (:obj:`budou.chunk.Chunk`)
"""
chunk = cls(u'\n', cls._BREAK_POS)
return chunk
def is_space(self):
"""Whether the chunk is a space.
Returns:
bool: True if it is a space.
"""
return self.pos == self._SPACE_POS
def is_punct(self):
"""Whether the chunk is a punctuation mark.
See also https://en.wikipedia.org/wiki/Unicode_character_property
Returns:
bool: True if it is a punctuation mark.
"""
return len(self.word) == 1 and unicodedata.category(self.word)[0] == 'P'
def is_open_punct(self):
"""Whether the chunk is an open punctuation mark.
Ps: Punctuation, open (e.g. opening bracket characters)
Pi: Punctuation, initial quote (e.g. opening quotation mark)
See also https://en.wikipedia.org/wiki/Unicode_character_property
Returns:
bool: True if it is an open punctuation mark.
"""
return self.is_punct() and unicodedata.category(self.word) in {'Ps', 'Pi'}
def has_cjk(self):
"""Checks if the word of the chunk contains CJK characters.
This is using unicode codepoint ranges from
https://github.com/nltk/nltk/blob/develop/nltk/tokenize/util.py#L149
Returns:
bool: True if the chunk has any CJK character.
"""
cjk_codepoint_ranges = [
(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215),
(63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)]
for char in self.word:
if any([start <= ord(char) <= end
for start, end in cjk_codepoint_ranges]):
return True
return False
|
google/budou | budou/chunk.py | Chunk.has_cjk | python | def has_cjk(self):
cjk_codepoint_ranges = [
(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215),
(63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)]
for char in self.word:
if any([start <= ord(char) <= end
for start, end in cjk_codepoint_ranges]):
return True
return False | Checks if the word of the chunk contains CJK characters.
This is using unicode codepoint ranges from
https://github.com/nltk/nltk/blob/develop/nltk/tokenize/util.py#L149
Returns:
bool: True if the chunk has any CJK character. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L119-L135 | null | class Chunk:
"""A unit for word segmentation.
Attributes:
word (str): Surface word of the chunk.
pos (:obj:`str`, optional): Part of speech.
label (:obj:`str`, optional): Label information.
dependency (:obj:`bool`, optional): Dependency to neighbor words.
:code:`None` for no dependency, :code:`True` for dependency to the
following word, and :code:`False` for the dependency to the previous
word.
Args:
word (str): Surface word of the chunk.
pos (:obj:`str`, optional): Part of speech.
label (:obj:`str`, optional): Label information.
dependency (:obj:`bool`, optional): Dependency to neighbor words.
:code:`None` for no dependency, :code:`True` for dependency to the
following word, and :code:`False` for the dependency to the previous
word.
"""
_SPACE_POS = 'SPACE'
_BREAK_POS = 'BREAK'
def __init__(self, word, pos=None, label=None, dependency=None):
self.word = word
self.pos = pos
self.label = label
self.dependency = dependency
def __repr__(self):
return 'Chunk(%s, %s, %s, %s)' % (
repr(self.word), self.pos, self.label, self.dependency)
@classmethod
def space(cls):
"""Creates space Chunk.
Returns:
A chunk (:obj:`budou.chunk.Chunk`)
"""
chunk = cls(u' ', cls._SPACE_POS)
return chunk
@classmethod
def breakline(cls):
"""Creates breakline Chunk.
Returns:
A chunk (:obj:`budou.chunk.Chunk`)
"""
chunk = cls(u'\n', cls._BREAK_POS)
return chunk
def serialize(self):
"""Returns serialized chunk data in dictionary."""
return {
'word': self.word,
'pos': self.pos,
'label': self.label,
'dependency': self.dependency,
'has_cjk': self.has_cjk(),
}
def is_space(self):
"""Whether the chunk is a space.
Returns:
bool: True if it is a space.
"""
return self.pos == self._SPACE_POS
def is_punct(self):
"""Whether the chunk is a punctuation mark.
See also https://en.wikipedia.org/wiki/Unicode_character_property
Returns:
bool: True if it is a punctuation mark.
"""
return len(self.word) == 1 and unicodedata.category(self.word)[0] == 'P'
def is_open_punct(self):
"""Whether the chunk is an open punctuation mark.
Ps: Punctuation, open (e.g. opening bracket characters)
Pi: Punctuation, initial quote (e.g. opening quotation mark)
See also https://en.wikipedia.org/wiki/Unicode_character_property
Returns:
bool: True if it is an open punctuation mark.
"""
return self.is_punct() and unicodedata.category(self.word) in {'Ps', 'Pi'}
|
google/budou | budou/chunk.py | ChunkList.get_overlaps | python | def get_overlaps(self, offset, length):
# In case entity's offset points to a space just before the entity.
if ''.join([chunk.word for chunk in self])[offset] == ' ':
offset += 1
index = 0
result = ChunkList()
for chunk in self:
if offset < index + len(chunk.word) and index < offset + length:
result.append(chunk)
index += len(chunk.word)
return result | Returns chunks overlapped with the given range.
Args:
offset (int): Begin offset of the range.
length (int): Length of the range.
Returns:
Overlapped chunks. (:obj:`budou.chunk.ChunkList`) | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L189-L208 | null | class ChunkList(collections.MutableSequence):
"""List of :obj:`budou.chunk.Chunk` with some helpers.
This list accepts only instances of :obj:`budou.chunk.Chunk`.
Example:
.. code-block:: python
from budou.chunk import Chunk, ChunkList
chunks = ChunkList(Chunk('abc'), Chunk('def'))
chunks.append(Chunk('ghi')) # OK
chunks.append('jkl') # NG
Args:
args (list of :obj:`budou.chunk.Chunk`): Initial values included in the
list.
"""
def __init__(self, *args):
self.list = list()
self.extend(list(args))
def _check(self, val):
"""Checks if the value is an instance of :obj:`budou.chunk.Chunk`.
Args:
val (:obj:`budou.chunk.Chunk`): input to check
Raises:
TypeError: If :code:`val` is not an instance of :obj:`budou.chunk.Chunk`.
"""
if not isinstance(val, Chunk):
raise TypeError
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __delitem__(self, i):
del self.list[i]
def __setitem__(self, i, v):
self._check(v)
self.list[i] = v
def insert(self, index, value):
self._check(value)
self.list.insert(index, value)
def swap(self, old_chunks, new_chunk):
"""Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.
"""
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:indexes[-1] + 1]
self.insert(indexes[0], new_chunk)
def resolve_dependencies(self):
"""Resolves chunk dependency by concatenating them.
"""
self._concatenate_inner(True)
self._concatenate_inner(False)
self._insert_breaklines()
def _concatenate_inner(self, direction):
"""Concatenates chunks based on each chunk's dependency.
Args:
direction (bool): Direction of concatenation process. True for forward.
"""
tmp_bucket = []
source_chunks = self if direction else self[::-1]
target_chunks = ChunkList()
for chunk in source_chunks:
if (
# if the chunk has matched dependency, do concatenation.
chunk.dependency == direction or
# if the chunk is SPACE, concatenate to the previous chunk.
(direction is False and chunk.is_space())
):
tmp_bucket.append(chunk)
continue
tmp_bucket.append(chunk)
if not direction:
tmp_bucket = tmp_bucket[::-1]
new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])
new_chunk = Chunk(new_word, pos=chunk.pos, label=chunk.label,
dependency=chunk.dependency)
target_chunks.append(new_chunk)
tmp_bucket = ChunkList()
if tmp_bucket:
target_chunks += tmp_bucket
if not direction:
target_chunks = target_chunks[::-1]
self.list = target_chunks
def _insert_breaklines(self):
"""Inserts a breakline instead of a trailing space if the chunk is in CJK.
"""
target_chunks = ChunkList()
for chunk in self:
if chunk.word[-1] == ' ' and chunk.has_cjk():
chunk.word = chunk.word[:-1]
target_chunks.append(chunk)
target_chunks.append(chunk.breakline())
else:
target_chunks.append(chunk)
self.list = target_chunks
def html_serialize(self, attributes, max_length=None):
"""Returns concatenated HTML code with SPAN tag.
Args:
attributes (dict): A map of name-value pairs for attributes of output
SPAN tags.
max_length (:obj:`int`, optional): Maximum length of span enclosed chunk.
Returns:
The organized HTML code. (str)
"""
doc = ET.Element('span')
for chunk in self:
if (chunk.has_cjk() and
not (max_length and len(chunk.word) > max_length)):
ele = ET.Element('span')
ele.text = chunk.word
for key, val in attributes.items():
ele.attrib[key] = val
doc.append(ele)
else:
# add word without span tag for non-CJK text (e.g. English)
# by appending it after the last element
if doc.getchildren():
if doc.getchildren()[-1].tail is None:
doc.getchildren()[-1].tail = chunk.word
else:
doc.getchildren()[-1].tail += chunk.word
else:
if doc.text is None:
doc.text = chunk.word
else:
doc.text += chunk.word
result = ET.tostring(doc, encoding='utf-8').decode('utf-8')
result = html5lib.serialize(
html5lib.parseFragment(result), sanitize=True,
quote_attr_values='always')
return result
|
google/budou | budou/chunk.py | ChunkList.swap | python | def swap(self, old_chunks, new_chunk):
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:indexes[-1] + 1]
self.insert(indexes[0], new_chunk) | Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L210-L220 | null | class ChunkList(collections.MutableSequence):
"""List of :obj:`budou.chunk.Chunk` with some helpers.
This list accepts only instances of :obj:`budou.chunk.Chunk`.
Example:
.. code-block:: python
from budou.chunk import Chunk, ChunkList
chunks = ChunkList(Chunk('abc'), Chunk('def'))
chunks.append(Chunk('ghi')) # OK
chunks.append('jkl') # NG
Args:
args (list of :obj:`budou.chunk.Chunk`): Initial values included in the
list.
"""
def __init__(self, *args):
self.list = list()
self.extend(list(args))
def _check(self, val):
"""Checks if the value is an instance of :obj:`budou.chunk.Chunk`.
Args:
val (:obj:`budou.chunk.Chunk`): input to check
Raises:
TypeError: If :code:`val` is not an instance of :obj:`budou.chunk.Chunk`.
"""
if not isinstance(val, Chunk):
raise TypeError
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __delitem__(self, i):
del self.list[i]
def __setitem__(self, i, v):
self._check(v)
self.list[i] = v
def insert(self, index, value):
self._check(value)
self.list.insert(index, value)
def get_overlaps(self, offset, length):
"""Returns chunks overlapped with the given range.
Args:
offset (int): Begin offset of the range.
length (int): Length of the range.
Returns:
Overlapped chunks. (:obj:`budou.chunk.ChunkList`)
"""
# In case entity's offset points to a space just before the entity.
if ''.join([chunk.word for chunk in self])[offset] == ' ':
offset += 1
index = 0
result = ChunkList()
for chunk in self:
if offset < index + len(chunk.word) and index < offset + length:
result.append(chunk)
index += len(chunk.word)
return result
def resolve_dependencies(self):
"""Resolves chunk dependency by concatenating them.
"""
self._concatenate_inner(True)
self._concatenate_inner(False)
self._insert_breaklines()
def _concatenate_inner(self, direction):
"""Concatenates chunks based on each chunk's dependency.
Args:
direction (bool): Direction of concatenation process. True for forward.
"""
tmp_bucket = []
source_chunks = self if direction else self[::-1]
target_chunks = ChunkList()
for chunk in source_chunks:
if (
# if the chunk has matched dependency, do concatenation.
chunk.dependency == direction or
# if the chunk is SPACE, concatenate to the previous chunk.
(direction is False and chunk.is_space())
):
tmp_bucket.append(chunk)
continue
tmp_bucket.append(chunk)
if not direction:
tmp_bucket = tmp_bucket[::-1]
new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])
new_chunk = Chunk(new_word, pos=chunk.pos, label=chunk.label,
dependency=chunk.dependency)
target_chunks.append(new_chunk)
tmp_bucket = ChunkList()
if tmp_bucket:
target_chunks += tmp_bucket
if not direction:
target_chunks = target_chunks[::-1]
self.list = target_chunks
def _insert_breaklines(self):
"""Inserts a breakline instead of a trailing space if the chunk is in CJK.
"""
target_chunks = ChunkList()
for chunk in self:
if chunk.word[-1] == ' ' and chunk.has_cjk():
chunk.word = chunk.word[:-1]
target_chunks.append(chunk)
target_chunks.append(chunk.breakline())
else:
target_chunks.append(chunk)
self.list = target_chunks
def html_serialize(self, attributes, max_length=None):
"""Returns concatenated HTML code with SPAN tag.
Args:
attributes (dict): A map of name-value pairs for attributes of output
SPAN tags.
max_length (:obj:`int`, optional): Maximum length of span enclosed chunk.
Returns:
The organized HTML code. (str)
"""
doc = ET.Element('span')
for chunk in self:
if (chunk.has_cjk() and
not (max_length and len(chunk.word) > max_length)):
ele = ET.Element('span')
ele.text = chunk.word
for key, val in attributes.items():
ele.attrib[key] = val
doc.append(ele)
else:
# add word without span tag for non-CJK text (e.g. English)
# by appending it after the last element
if doc.getchildren():
if doc.getchildren()[-1].tail is None:
doc.getchildren()[-1].tail = chunk.word
else:
doc.getchildren()[-1].tail += chunk.word
else:
if doc.text is None:
doc.text = chunk.word
else:
doc.text += chunk.word
result = ET.tostring(doc, encoding='utf-8').decode('utf-8')
result = html5lib.serialize(
html5lib.parseFragment(result), sanitize=True,
quote_attr_values='always')
return result
|
google/budou | budou/chunk.py | ChunkList.resolve_dependencies | python | def resolve_dependencies(self):
self._concatenate_inner(True)
self._concatenate_inner(False)
self._insert_breaklines() | Resolves chunk dependency by concatenating them. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L222-L227 | null | class ChunkList(collections.MutableSequence):
"""List of :obj:`budou.chunk.Chunk` with some helpers.
This list accepts only instances of :obj:`budou.chunk.Chunk`.
Example:
.. code-block:: python
from budou.chunk import Chunk, ChunkList
chunks = ChunkList(Chunk('abc'), Chunk('def'))
chunks.append(Chunk('ghi')) # OK
chunks.append('jkl') # NG
Args:
args (list of :obj:`budou.chunk.Chunk`): Initial values included in the
list.
"""
def __init__(self, *args):
self.list = list()
self.extend(list(args))
def _check(self, val):
"""Checks if the value is an instance of :obj:`budou.chunk.Chunk`.
Args:
val (:obj:`budou.chunk.Chunk`): input to check
Raises:
TypeError: If :code:`val` is not an instance of :obj:`budou.chunk.Chunk`.
"""
if not isinstance(val, Chunk):
raise TypeError
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __delitem__(self, i):
del self.list[i]
def __setitem__(self, i, v):
self._check(v)
self.list[i] = v
def insert(self, index, value):
self._check(value)
self.list.insert(index, value)
def get_overlaps(self, offset, length):
"""Returns chunks overlapped with the given range.
Args:
offset (int): Begin offset of the range.
length (int): Length of the range.
Returns:
Overlapped chunks. (:obj:`budou.chunk.ChunkList`)
"""
# In case entity's offset points to a space just before the entity.
if ''.join([chunk.word for chunk in self])[offset] == ' ':
offset += 1
index = 0
result = ChunkList()
for chunk in self:
if offset < index + len(chunk.word) and index < offset + length:
result.append(chunk)
index += len(chunk.word)
return result
def swap(self, old_chunks, new_chunk):
"""Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.
"""
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:indexes[-1] + 1]
self.insert(indexes[0], new_chunk)
def _concatenate_inner(self, direction):
"""Concatenates chunks based on each chunk's dependency.
Args:
direction (bool): Direction of concatenation process. True for forward.
"""
tmp_bucket = []
source_chunks = self if direction else self[::-1]
target_chunks = ChunkList()
for chunk in source_chunks:
if (
# if the chunk has matched dependency, do concatenation.
chunk.dependency == direction or
# if the chunk is SPACE, concatenate to the previous chunk.
(direction is False and chunk.is_space())
):
tmp_bucket.append(chunk)
continue
tmp_bucket.append(chunk)
if not direction:
tmp_bucket = tmp_bucket[::-1]
new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])
new_chunk = Chunk(new_word, pos=chunk.pos, label=chunk.label,
dependency=chunk.dependency)
target_chunks.append(new_chunk)
tmp_bucket = ChunkList()
if tmp_bucket:
target_chunks += tmp_bucket
if not direction:
target_chunks = target_chunks[::-1]
self.list = target_chunks
def _insert_breaklines(self):
"""Inserts a breakline instead of a trailing space if the chunk is in CJK.
"""
target_chunks = ChunkList()
for chunk in self:
if chunk.word[-1] == ' ' and chunk.has_cjk():
chunk.word = chunk.word[:-1]
target_chunks.append(chunk)
target_chunks.append(chunk.breakline())
else:
target_chunks.append(chunk)
self.list = target_chunks
def html_serialize(self, attributes, max_length=None):
"""Returns concatenated HTML code with SPAN tag.
Args:
attributes (dict): A map of name-value pairs for attributes of output
SPAN tags.
max_length (:obj:`int`, optional): Maximum length of span enclosed chunk.
Returns:
The organized HTML code. (str)
"""
doc = ET.Element('span')
for chunk in self:
if (chunk.has_cjk() and
not (max_length and len(chunk.word) > max_length)):
ele = ET.Element('span')
ele.text = chunk.word
for key, val in attributes.items():
ele.attrib[key] = val
doc.append(ele)
else:
# add word without span tag for non-CJK text (e.g. English)
# by appending it after the last element
if doc.getchildren():
if doc.getchildren()[-1].tail is None:
doc.getchildren()[-1].tail = chunk.word
else:
doc.getchildren()[-1].tail += chunk.word
else:
if doc.text is None:
doc.text = chunk.word
else:
doc.text += chunk.word
result = ET.tostring(doc, encoding='utf-8').decode('utf-8')
result = html5lib.serialize(
html5lib.parseFragment(result), sanitize=True,
quote_attr_values='always')
return result
|
google/budou | budou/chunk.py | ChunkList._concatenate_inner | python | def _concatenate_inner(self, direction):
tmp_bucket = []
source_chunks = self if direction else self[::-1]
target_chunks = ChunkList()
for chunk in source_chunks:
if (
# if the chunk has matched dependency, do concatenation.
chunk.dependency == direction or
# if the chunk is SPACE, concatenate to the previous chunk.
(direction is False and chunk.is_space())
):
tmp_bucket.append(chunk)
continue
tmp_bucket.append(chunk)
if not direction:
tmp_bucket = tmp_bucket[::-1]
new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])
new_chunk = Chunk(new_word, pos=chunk.pos, label=chunk.label,
dependency=chunk.dependency)
target_chunks.append(new_chunk)
tmp_bucket = ChunkList()
if tmp_bucket:
target_chunks += tmp_bucket
if not direction:
target_chunks = target_chunks[::-1]
self.list = target_chunks | Concatenates chunks based on each chunk's dependency.
Args:
direction (bool): Direction of concatenation process. True for forward. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L229-L259 | null | class ChunkList(collections.MutableSequence):
"""List of :obj:`budou.chunk.Chunk` with some helpers.
This list accepts only instances of :obj:`budou.chunk.Chunk`.
Example:
.. code-block:: python
from budou.chunk import Chunk, ChunkList
chunks = ChunkList(Chunk('abc'), Chunk('def'))
chunks.append(Chunk('ghi')) # OK
chunks.append('jkl') # NG
Args:
args (list of :obj:`budou.chunk.Chunk`): Initial values included in the
list.
"""
def __init__(self, *args):
self.list = list()
self.extend(list(args))
def _check(self, val):
"""Checks if the value is an instance of :obj:`budou.chunk.Chunk`.
Args:
val (:obj:`budou.chunk.Chunk`): input to check
Raises:
TypeError: If :code:`val` is not an instance of :obj:`budou.chunk.Chunk`.
"""
if not isinstance(val, Chunk):
raise TypeError
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __delitem__(self, i):
del self.list[i]
def __setitem__(self, i, v):
self._check(v)
self.list[i] = v
def insert(self, index, value):
self._check(value)
self.list.insert(index, value)
def get_overlaps(self, offset, length):
"""Returns chunks overlapped with the given range.
Args:
offset (int): Begin offset of the range.
length (int): Length of the range.
Returns:
Overlapped chunks. (:obj:`budou.chunk.ChunkList`)
"""
# In case entity's offset points to a space just before the entity.
if ''.join([chunk.word for chunk in self])[offset] == ' ':
offset += 1
index = 0
result = ChunkList()
for chunk in self:
if offset < index + len(chunk.word) and index < offset + length:
result.append(chunk)
index += len(chunk.word)
return result
def swap(self, old_chunks, new_chunk):
"""Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.
"""
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:indexes[-1] + 1]
self.insert(indexes[0], new_chunk)
def resolve_dependencies(self):
"""Resolves chunk dependency by concatenating them.
"""
self._concatenate_inner(True)
self._concatenate_inner(False)
self._insert_breaklines()
def _insert_breaklines(self):
"""Inserts a breakline instead of a trailing space if the chunk is in CJK.
"""
target_chunks = ChunkList()
for chunk in self:
if chunk.word[-1] == ' ' and chunk.has_cjk():
chunk.word = chunk.word[:-1]
target_chunks.append(chunk)
target_chunks.append(chunk.breakline())
else:
target_chunks.append(chunk)
self.list = target_chunks
def html_serialize(self, attributes, max_length=None):
"""Returns concatenated HTML code with SPAN tag.
Args:
attributes (dict): A map of name-value pairs for attributes of output
SPAN tags.
max_length (:obj:`int`, optional): Maximum length of span enclosed chunk.
Returns:
The organized HTML code. (str)
"""
doc = ET.Element('span')
for chunk in self:
if (chunk.has_cjk() and
not (max_length and len(chunk.word) > max_length)):
ele = ET.Element('span')
ele.text = chunk.word
for key, val in attributes.items():
ele.attrib[key] = val
doc.append(ele)
else:
# add word without span tag for non-CJK text (e.g. English)
# by appending it after the last element
if doc.getchildren():
if doc.getchildren()[-1].tail is None:
doc.getchildren()[-1].tail = chunk.word
else:
doc.getchildren()[-1].tail += chunk.word
else:
if doc.text is None:
doc.text = chunk.word
else:
doc.text += chunk.word
result = ET.tostring(doc, encoding='utf-8').decode('utf-8')
result = html5lib.serialize(
html5lib.parseFragment(result), sanitize=True,
quote_attr_values='always')
return result
|
google/budou | budou/chunk.py | ChunkList._insert_breaklines | python | def _insert_breaklines(self):
target_chunks = ChunkList()
for chunk in self:
if chunk.word[-1] == ' ' and chunk.has_cjk():
chunk.word = chunk.word[:-1]
target_chunks.append(chunk)
target_chunks.append(chunk.breakline())
else:
target_chunks.append(chunk)
self.list = target_chunks | Inserts a breakline instead of a trailing space if the chunk is in CJK. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L261-L272 | null | class ChunkList(collections.MutableSequence):
"""List of :obj:`budou.chunk.Chunk` with some helpers.
This list accepts only instances of :obj:`budou.chunk.Chunk`.
Example:
.. code-block:: python
from budou.chunk import Chunk, ChunkList
chunks = ChunkList(Chunk('abc'), Chunk('def'))
chunks.append(Chunk('ghi')) # OK
chunks.append('jkl') # NG
Args:
args (list of :obj:`budou.chunk.Chunk`): Initial values included in the
list.
"""
def __init__(self, *args):
self.list = list()
self.extend(list(args))
def _check(self, val):
"""Checks if the value is an instance of :obj:`budou.chunk.Chunk`.
Args:
val (:obj:`budou.chunk.Chunk`): input to check
Raises:
TypeError: If :code:`val` is not an instance of :obj:`budou.chunk.Chunk`.
"""
if not isinstance(val, Chunk):
raise TypeError
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __delitem__(self, i):
del self.list[i]
def __setitem__(self, i, v):
self._check(v)
self.list[i] = v
def insert(self, index, value):
self._check(value)
self.list.insert(index, value)
def get_overlaps(self, offset, length):
"""Returns chunks overlapped with the given range.
Args:
offset (int): Begin offset of the range.
length (int): Length of the range.
Returns:
Overlapped chunks. (:obj:`budou.chunk.ChunkList`)
"""
# In case entity's offset points to a space just before the entity.
if ''.join([chunk.word for chunk in self])[offset] == ' ':
offset += 1
index = 0
result = ChunkList()
for chunk in self:
if offset < index + len(chunk.word) and index < offset + length:
result.append(chunk)
index += len(chunk.word)
return result
def swap(self, old_chunks, new_chunk):
"""Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.
"""
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:indexes[-1] + 1]
self.insert(indexes[0], new_chunk)
def resolve_dependencies(self):
"""Resolves chunk dependency by concatenating them.
"""
self._concatenate_inner(True)
self._concatenate_inner(False)
self._insert_breaklines()
def _concatenate_inner(self, direction):
"""Concatenates chunks based on each chunk's dependency.
Args:
direction (bool): Direction of concatenation process. True for forward.
"""
tmp_bucket = []
source_chunks = self if direction else self[::-1]
target_chunks = ChunkList()
for chunk in source_chunks:
if (
# if the chunk has matched dependency, do concatenation.
chunk.dependency == direction or
# if the chunk is SPACE, concatenate to the previous chunk.
(direction is False and chunk.is_space())
):
tmp_bucket.append(chunk)
continue
tmp_bucket.append(chunk)
if not direction:
tmp_bucket = tmp_bucket[::-1]
new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])
new_chunk = Chunk(new_word, pos=chunk.pos, label=chunk.label,
dependency=chunk.dependency)
target_chunks.append(new_chunk)
tmp_bucket = ChunkList()
if tmp_bucket:
target_chunks += tmp_bucket
if not direction:
target_chunks = target_chunks[::-1]
self.list = target_chunks
def html_serialize(self, attributes, max_length=None):
"""Returns concatenated HTML code with SPAN tag.
Args:
attributes (dict): A map of name-value pairs for attributes of output
SPAN tags.
max_length (:obj:`int`, optional): Maximum length of span enclosed chunk.
Returns:
The organized HTML code. (str)
"""
doc = ET.Element('span')
for chunk in self:
if (chunk.has_cjk() and
not (max_length and len(chunk.word) > max_length)):
ele = ET.Element('span')
ele.text = chunk.word
for key, val in attributes.items():
ele.attrib[key] = val
doc.append(ele)
else:
# add word without span tag for non-CJK text (e.g. English)
# by appending it after the last element
if doc.getchildren():
if doc.getchildren()[-1].tail is None:
doc.getchildren()[-1].tail = chunk.word
else:
doc.getchildren()[-1].tail += chunk.word
else:
if doc.text is None:
doc.text = chunk.word
else:
doc.text += chunk.word
result = ET.tostring(doc, encoding='utf-8').decode('utf-8')
result = html5lib.serialize(
html5lib.parseFragment(result), sanitize=True,
quote_attr_values='always')
return result
|
google/budou | budou/chunk.py | ChunkList.html_serialize | python | def html_serialize(self, attributes, max_length=None):
doc = ET.Element('span')
for chunk in self:
if (chunk.has_cjk() and
not (max_length and len(chunk.word) > max_length)):
ele = ET.Element('span')
ele.text = chunk.word
for key, val in attributes.items():
ele.attrib[key] = val
doc.append(ele)
else:
# add word without span tag for non-CJK text (e.g. English)
# by appending it after the last element
if doc.getchildren():
if doc.getchildren()[-1].tail is None:
doc.getchildren()[-1].tail = chunk.word
else:
doc.getchildren()[-1].tail += chunk.word
else:
if doc.text is None:
doc.text = chunk.word
else:
doc.text += chunk.word
result = ET.tostring(doc, encoding='utf-8').decode('utf-8')
result = html5lib.serialize(
html5lib.parseFragment(result), sanitize=True,
quote_attr_values='always')
return result | Returns concatenated HTML code with SPAN tag.
Args:
attributes (dict): A map of name-value pairs for attributes of output
SPAN tags.
max_length (:obj:`int`, optional): Maximum length of span enclosed chunk.
Returns:
The organized HTML code. (str) | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L274-L311 | null | class ChunkList(collections.MutableSequence):
"""List of :obj:`budou.chunk.Chunk` with some helpers.
This list accepts only instances of :obj:`budou.chunk.Chunk`.
Example:
.. code-block:: python
from budou.chunk import Chunk, ChunkList
chunks = ChunkList(Chunk('abc'), Chunk('def'))
chunks.append(Chunk('ghi')) # OK
chunks.append('jkl') # NG
Args:
args (list of :obj:`budou.chunk.Chunk`): Initial values included in the
list.
"""
def __init__(self, *args):
self.list = list()
self.extend(list(args))
def _check(self, val):
"""Checks if the value is an instance of :obj:`budou.chunk.Chunk`.
Args:
val (:obj:`budou.chunk.Chunk`): input to check
Raises:
TypeError: If :code:`val` is not an instance of :obj:`budou.chunk.Chunk`.
"""
if not isinstance(val, Chunk):
raise TypeError
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __delitem__(self, i):
del self.list[i]
def __setitem__(self, i, v):
self._check(v)
self.list[i] = v
def insert(self, index, value):
self._check(value)
self.list.insert(index, value)
def get_overlaps(self, offset, length):
"""Returns chunks overlapped with the given range.
Args:
offset (int): Begin offset of the range.
length (int): Length of the range.
Returns:
Overlapped chunks. (:obj:`budou.chunk.ChunkList`)
"""
# In case entity's offset points to a space just before the entity.
if ''.join([chunk.word for chunk in self])[offset] == ' ':
offset += 1
index = 0
result = ChunkList()
for chunk in self:
if offset < index + len(chunk.word) and index < offset + length:
result.append(chunk)
index += len(chunk.word)
return result
def swap(self, old_chunks, new_chunk):
"""Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.
"""
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:indexes[-1] + 1]
self.insert(indexes[0], new_chunk)
def resolve_dependencies(self):
"""Resolves chunk dependency by concatenating them.
"""
self._concatenate_inner(True)
self._concatenate_inner(False)
self._insert_breaklines()
def _concatenate_inner(self, direction):
"""Concatenates chunks based on each chunk's dependency.
Args:
direction (bool): Direction of concatenation process. True for forward.
"""
tmp_bucket = []
source_chunks = self if direction else self[::-1]
target_chunks = ChunkList()
for chunk in source_chunks:
if (
# if the chunk has matched dependency, do concatenation.
chunk.dependency == direction or
# if the chunk is SPACE, concatenate to the previous chunk.
(direction is False and chunk.is_space())
):
tmp_bucket.append(chunk)
continue
tmp_bucket.append(chunk)
if not direction:
tmp_bucket = tmp_bucket[::-1]
new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])
new_chunk = Chunk(new_word, pos=chunk.pos, label=chunk.label,
dependency=chunk.dependency)
target_chunks.append(new_chunk)
tmp_bucket = ChunkList()
if tmp_bucket:
target_chunks += tmp_bucket
if not direction:
target_chunks = target_chunks[::-1]
self.list = target_chunks
def _insert_breaklines(self):
"""Inserts a breakline instead of a trailing space if the chunk is in CJK.
"""
target_chunks = ChunkList()
for chunk in self:
if chunk.word[-1] == ' ' and chunk.has_cjk():
chunk.word = chunk.word[:-1]
target_chunks.append(chunk)
target_chunks.append(chunk.breakline())
else:
target_chunks.append(chunk)
self.list = target_chunks
|
davidmogar/cucco | cucco/config.py | Config._load_from_file | python | def _load_from_file(path):
config = []
try:
with open(path, 'r') as config_file:
config = yaml.load(config_file)['normalizations']
except EnvironmentError as e:
raise ConfigError('Problem while loading file: %s' %
e.args[1] if len(e.args) > 1 else e)
except (TypeError, KeyError) as e:
raise ConfigError('Config file has an unexpected structure: %s' % e)
except yaml.YAMLError:
raise ConfigError('Invalid YAML file syntax')
return config | Load a config file from the given path.
Load all normalizations from the config file received as
argument. It expects to find a YAML file with a list of
normalizations and arguments under the key 'normalizations'.
Args:
path: Path to YAML file. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/config.py#L53-L76 | null | class Config(object):
"""Class to manage cucco configuration.
This class allows to handle all cucco configuration and is
used by the different modules.
Attributes:
debug: Whether to show debug messages or not.
language: Language to be used for the normalizations.
normalizations: List or path to config file.
verbose: Level of output verbosity.
"""
normalizations = DEFAULT_NORMALIZATIONS
def __init__(self,
normalizations=None,
language='en',
logger=None,
debug=False,
verbose=False):
"""Inits Config class."""
self.debug = debug
self.language = language
self.logger = logger or logging.initialize_logger(debug)
self.verbose = verbose or debug
if normalizations:
if isinstance(normalizations, STR_TYPE):
normalizations = self._load_from_file(normalizations)
self.normalizations = self._parse_normalizations(normalizations)
@staticmethod
@staticmethod
def _parse_normalization(normalization):
"""Parse a normalization item.
Transform dicts into a tuple containing the normalization
options. If a string is found, the actual value is used.
Args:
normalization: Normalization to parse.
Returns:
Tuple or string containing the parsed normalization.
"""
parsed_normalization = None
if isinstance(normalization, dict):
if len(normalization.keys()) == 1:
items = list(normalization.items())[0]
if len(items) == 2: # Two elements tuple
# Convert to string if no normalization options
if items[1] and isinstance(items[1], dict):
parsed_normalization = items
else:
parsed_normalization = items[0]
elif isinstance(normalization, STR_TYPE):
parsed_normalization = normalization
return parsed_normalization
def _parse_normalizations(self, normalizations):
"""Returns a list of parsed normalizations.
Iterates over a list of normalizations, removing those
not correctly defined. It also transform complex items
to have a common format (list of tuples and strings).
Args:
normalizations: List of normalizations to parse.
Returns:
A list of normalizations after being parsed and curated.
"""
parsed_normalizations = []
if isinstance(normalizations, list):
for item in normalizations:
normalization = self._parse_normalization(item)
if normalization:
parsed_normalizations.append(normalization)
else:
raise ConfigError('List expected. Found %s' % type(normalizations))
return parsed_normalizations
|
davidmogar/cucco | cucco/config.py | Config._parse_normalization | python | def _parse_normalization(normalization):
parsed_normalization = None
if isinstance(normalization, dict):
if len(normalization.keys()) == 1:
items = list(normalization.items())[0]
if len(items) == 2: # Two elements tuple
# Convert to string if no normalization options
if items[1] and isinstance(items[1], dict):
parsed_normalization = items
else:
parsed_normalization = items[0]
elif isinstance(normalization, STR_TYPE):
parsed_normalization = normalization
return parsed_normalization | Parse a normalization item.
Transform dicts into a tuple containing the normalization
options. If a string is found, the actual value is used.
Args:
normalization: Normalization to parse.
Returns:
Tuple or string containing the parsed normalization. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/config.py#L79-L105 | null | class Config(object):
"""Class to manage cucco configuration.
This class allows to handle all cucco configuration and is
used by the different modules.
Attributes:
debug: Whether to show debug messages or not.
language: Language to be used for the normalizations.
normalizations: List or path to config file.
verbose: Level of output verbosity.
"""
normalizations = DEFAULT_NORMALIZATIONS
def __init__(self,
normalizations=None,
language='en',
logger=None,
debug=False,
verbose=False):
"""Inits Config class."""
self.debug = debug
self.language = language
self.logger = logger or logging.initialize_logger(debug)
self.verbose = verbose or debug
if normalizations:
if isinstance(normalizations, STR_TYPE):
normalizations = self._load_from_file(normalizations)
self.normalizations = self._parse_normalizations(normalizations)
@staticmethod
def _load_from_file(path):
"""Load a config file from the given path.
Load all normalizations from the config file received as
argument. It expects to find a YAML file with a list of
normalizations and arguments under the key 'normalizations'.
Args:
path: Path to YAML file.
"""
config = []
try:
with open(path, 'r') as config_file:
config = yaml.load(config_file)['normalizations']
except EnvironmentError as e:
raise ConfigError('Problem while loading file: %s' %
e.args[1] if len(e.args) > 1 else e)
except (TypeError, KeyError) as e:
raise ConfigError('Config file has an unexpected structure: %s' % e)
except yaml.YAMLError:
raise ConfigError('Invalid YAML file syntax')
return config
@staticmethod
def _parse_normalizations(self, normalizations):
"""Returns a list of parsed normalizations.
Iterates over a list of normalizations, removing those
not correctly defined. It also transform complex items
to have a common format (list of tuples and strings).
Args:
normalizations: List of normalizations to parse.
Returns:
A list of normalizations after being parsed and curated.
"""
parsed_normalizations = []
if isinstance(normalizations, list):
for item in normalizations:
normalization = self._parse_normalization(item)
if normalization:
parsed_normalizations.append(normalization)
else:
raise ConfigError('List expected. Found %s' % type(normalizations))
return parsed_normalizations
|
davidmogar/cucco | cucco/config.py | Config._parse_normalizations | python | def _parse_normalizations(self, normalizations):
parsed_normalizations = []
if isinstance(normalizations, list):
for item in normalizations:
normalization = self._parse_normalization(item)
if normalization:
parsed_normalizations.append(normalization)
else:
raise ConfigError('List expected. Found %s' % type(normalizations))
return parsed_normalizations | Returns a list of parsed normalizations.
Iterates over a list of normalizations, removing those
not correctly defined. It also transform complex items
to have a common format (list of tuples and strings).
Args:
normalizations: List of normalizations to parse.
Returns:
A list of normalizations after being parsed and curated. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/config.py#L107-L130 | [
"def _parse_normalization(normalization):\n \"\"\"Parse a normalization item.\n\n Transform dicts into a tuple containing the normalization\n options. If a string is found, the actual value is used.\n\n Args:\n normalization: Normalization to parse.\n\n Returns:\n Tuple or string contai... | class Config(object):
"""Class to manage cucco configuration.
This class allows to handle all cucco configuration and is
used by the different modules.
Attributes:
debug: Whether to show debug messages or not.
language: Language to be used for the normalizations.
normalizations: List or path to config file.
verbose: Level of output verbosity.
"""
normalizations = DEFAULT_NORMALIZATIONS
def __init__(self,
normalizations=None,
language='en',
logger=None,
debug=False,
verbose=False):
"""Inits Config class."""
self.debug = debug
self.language = language
self.logger = logger or logging.initialize_logger(debug)
self.verbose = verbose or debug
if normalizations:
if isinstance(normalizations, STR_TYPE):
normalizations = self._load_from_file(normalizations)
self.normalizations = self._parse_normalizations(normalizations)
@staticmethod
def _load_from_file(path):
"""Load a config file from the given path.
Load all normalizations from the config file received as
argument. It expects to find a YAML file with a list of
normalizations and arguments under the key 'normalizations'.
Args:
path: Path to YAML file.
"""
config = []
try:
with open(path, 'r') as config_file:
config = yaml.load(config_file)['normalizations']
except EnvironmentError as e:
raise ConfigError('Problem while loading file: %s' %
e.args[1] if len(e.args) > 1 else e)
except (TypeError, KeyError) as e:
raise ConfigError('Config file has an unexpected structure: %s' % e)
except yaml.YAMLError:
raise ConfigError('Invalid YAML file syntax')
return config
@staticmethod
def _parse_normalization(normalization):
"""Parse a normalization item.
Transform dicts into a tuple containing the normalization
options. If a string is found, the actual value is used.
Args:
normalization: Normalization to parse.
Returns:
Tuple or string containing the parsed normalization.
"""
parsed_normalization = None
if isinstance(normalization, dict):
if len(normalization.keys()) == 1:
items = list(normalization.items())[0]
if len(items) == 2: # Two elements tuple
# Convert to string if no normalization options
if items[1] and isinstance(items[1], dict):
parsed_normalization = items
else:
parsed_normalization = items[0]
elif isinstance(normalization, STR_TYPE):
parsed_normalization = normalization
return parsed_normalization
|
davidmogar/cucco | cucco/logging.py | initialize_logger | python | def initialize_logger(debug):
level = logging.DEBUG if debug else logging.INFO
logger = logging.getLogger('cucco')
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s %(levelname).1s %(message)s')
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger | Set up logger to be used by the library.
Args:
debug: Wheter to use debug level or not.
Returns:
A logger ready to be used. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/logging.py#L5-L23 | null | from __future__ import absolute_import
import logging
|
davidmogar/cucco | cucco/cli.py | batch | python | def batch(ctx, path, recursive, watch):
batch = Batch(ctx.obj['config'], ctx.obj['cucco'])
if os.path.exists(path):
if watch:
batch.watch(path, recursive)
elif os.path.isfile(path):
batch.process_file(path)
else:
batch.process_files(path, recursive)
else:
click.echo('Error: Specified path doesn\'t exists', err=True)
sys.exit(-1) | Normalize files in a path.
Apply normalizations over all files found in a given path.
The normalizations applied will be those defined in the config
file. If no config is specified, the default normalizations will
be used. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cli.py#L23-L43 | [
"def watch(self, path, recursive=False):\n \"\"\"Watch for files in a directory and apply normalizations.\n\n Watch for new or changed files in a directory and apply\n normalizations over them.\n\n Args:\n path: Path to the directory.\n recursive: Whether to find files recursively or not.\... | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import click
import os
import sys
import cucco.logging as logging
from cucco.batch import Batch
from cucco.config import Config
from cucco.cucco import Cucco
from cucco.errors import ConfigError
@click.command()
@click.argument('path')
@click.option('--recursive', '-r', is_flag=True,
help='Whether to search for files recursively.')
@click.option('--watch', '-w', is_flag=True,
help='Watch for new files in the given path.')
@click.pass_context
@click.command()
@click.argument('text', required=False)
@click.pass_context
def normalize(ctx, text):
"""
Normalize text or piped input.
Normalize text passed as an argument to this command using
the specified config (default values if --config option is
not used).
Pipes can be used along this command to process the output
of another cli. This is the default behaviour when no text
is defined.
"""
if text:
click.echo(ctx.obj['cucco'].normalize(text))
else:
for line in sys.stdin:
click.echo(ctx.obj['cucco'].normalize(line))
@click.group()
@click.option('--config', '-c',
help='Path to config file.')
@click.option('--debug', '-d', is_flag=True,
help='Show debug messages.')
@click.option('--language', '-l', default='en',
help='Language to use for the normalization.')
@click.option('--verbose', '-v', is_flag=True,
help='Increase output verbosity.')
@click.version_option()
@click.pass_context
def cli(ctx, config, debug, language, verbose):
"""
Cucco allows to apply normalizations to a given text or file.
This normalizations include, among others, removal of accent
marks, stop words an extra white spaces, replacement of
punctuation symbols, emails, emojis, etc.
For more info on how to use and configure Cucco, check the
project website at https://cucco.io.
"""
ctx.obj = {}
try:
ctx.obj['config'] = Config(normalizations=config,
language=language,
debug=debug,
verbose=verbose)
except ConfigError as e:
click.echo(e.message)
sys.exit(-1)
ctx.obj['cucco'] = Cucco(ctx.obj['config'])
cli.add_command(batch)
cli.add_command(normalize)
|
davidmogar/cucco | cucco/cli.py | normalize | python | def normalize(ctx, text):
if text:
click.echo(ctx.obj['cucco'].normalize(text))
else:
for line in sys.stdin:
click.echo(ctx.obj['cucco'].normalize(line)) | Normalize text or piped input.
Normalize text passed as an argument to this command using
the specified config (default values if --config option is
not used).
Pipes can be used along this command to process the output
of another cli. This is the default behaviour when no text
is defined. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cli.py#L48-L64 | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import click
import os
import sys
import cucco.logging as logging
from cucco.batch import Batch
from cucco.config import Config
from cucco.cucco import Cucco
from cucco.errors import ConfigError
@click.command()
@click.argument('path')
@click.option('--recursive', '-r', is_flag=True,
help='Whether to search for files recursively.')
@click.option('--watch', '-w', is_flag=True,
help='Watch for new files in the given path.')
@click.pass_context
def batch(ctx, path, recursive, watch):
"""
Normalize files in a path.
Apply normalizations over all files found in a given path.
The normalizations applied will be those defined in the config
file. If no config is specified, the default normalizations will
be used.
"""
batch = Batch(ctx.obj['config'], ctx.obj['cucco'])
if os.path.exists(path):
if watch:
batch.watch(path, recursive)
elif os.path.isfile(path):
batch.process_file(path)
else:
batch.process_files(path, recursive)
else:
click.echo('Error: Specified path doesn\'t exists', err=True)
sys.exit(-1)
@click.command()
@click.argument('text', required=False)
@click.pass_context
@click.group()
@click.option('--config', '-c',
help='Path to config file.')
@click.option('--debug', '-d', is_flag=True,
help='Show debug messages.')
@click.option('--language', '-l', default='en',
help='Language to use for the normalization.')
@click.option('--verbose', '-v', is_flag=True,
help='Increase output verbosity.')
@click.version_option()
@click.pass_context
def cli(ctx, config, debug, language, verbose):
"""
Cucco allows to apply normalizations to a given text or file.
This normalizations include, among others, removal of accent
marks, stop words an extra white spaces, replacement of
punctuation symbols, emails, emojis, etc.
For more info on how to use and configure Cucco, check the
project website at https://cucco.io.
"""
ctx.obj = {}
try:
ctx.obj['config'] = Config(normalizations=config,
language=language,
debug=debug,
verbose=verbose)
except ConfigError as e:
click.echo(e.message)
sys.exit(-1)
ctx.obj['cucco'] = Cucco(ctx.obj['config'])
cli.add_command(batch)
cli.add_command(normalize)
|
davidmogar/cucco | cucco/cli.py | cli | python | def cli(ctx, config, debug, language, verbose):
ctx.obj = {}
try:
ctx.obj['config'] = Config(normalizations=config,
language=language,
debug=debug,
verbose=verbose)
except ConfigError as e:
click.echo(e.message)
sys.exit(-1)
ctx.obj['cucco'] = Cucco(ctx.obj['config']) | Cucco allows to apply normalizations to a given text or file.
This normalizations include, among others, removal of accent
marks, stop words an extra white spaces, replacement of
punctuation symbols, emails, emojis, etc.
For more info on how to use and configure Cucco, check the
project website at https://cucco.io. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cli.py#L77-L98 | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import click
import os
import sys
import cucco.logging as logging
from cucco.batch import Batch
from cucco.config import Config
from cucco.cucco import Cucco
from cucco.errors import ConfigError
@click.command()
@click.argument('path')
@click.option('--recursive', '-r', is_flag=True,
help='Whether to search for files recursively.')
@click.option('--watch', '-w', is_flag=True,
help='Watch for new files in the given path.')
@click.pass_context
def batch(ctx, path, recursive, watch):
"""
Normalize files in a path.
Apply normalizations over all files found in a given path.
The normalizations applied will be those defined in the config
file. If no config is specified, the default normalizations will
be used.
"""
batch = Batch(ctx.obj['config'], ctx.obj['cucco'])
if os.path.exists(path):
if watch:
batch.watch(path, recursive)
elif os.path.isfile(path):
batch.process_file(path)
else:
batch.process_files(path, recursive)
else:
click.echo('Error: Specified path doesn\'t exists', err=True)
sys.exit(-1)
@click.command()
@click.argument('text', required=False)
@click.pass_context
def normalize(ctx, text):
"""
Normalize text or piped input.
Normalize text passed as an argument to this command using
the specified config (default values if --config option is
not used).
Pipes can be used along this command to process the output
of another cli. This is the default behaviour when no text
is defined.
"""
if text:
click.echo(ctx.obj['cucco'].normalize(text))
else:
for line in sys.stdin:
click.echo(ctx.obj['cucco'].normalize(line))
@click.group()
@click.option('--config', '-c',
help='Path to config file.')
@click.option('--debug', '-d', is_flag=True,
help='Show debug messages.')
@click.option('--language', '-l', default='en',
help='Language to use for the normalization.')
@click.option('--verbose', '-v', is_flag=True,
help='Increase output verbosity.')
@click.version_option()
@click.pass_context
cli.add_command(batch)
cli.add_command(normalize)
|
davidmogar/cucco | cucco/batch.py | files_generator | python | def files_generator(path, recursive):
if recursive:
for (path, _, files) in os.walk(path):
for file in files:
if not file.endswith(BATCH_EXTENSION):
yield (path, file)
else:
for file in os.listdir(path):
if (os.path.isfile(os.path.join(path, file)) and
not file.endswith(BATCH_EXTENSION)):
yield (path, file) | Yield files found in a given path.
Walk over a given path finding and yielding all
files found on it. This can be done only on the root
directory or recursively.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not.
Yields:
A tuple for each file in the given path containing
the path and the name of the file. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L11-L35 | null | from __future__ import absolute_import
import os
import time
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
BATCH_EXTENSION = '.cucco'
def lines_generator(path):
"""Yield lines in a given file.
Iterates over a file lines yielding them.
Args:
path: Path to the file.
Yields:
Lines on a given file.
"""
with open(path, 'r') as file:
for line in file:
yield line
class Batch(object):
"""Class to apply normalizations in batch mode.
This class permits to apply normalizations over a group
of files. It counts with two modes. The first one works
over all the files in a directory and the second one
watch for new files in a given path. Both modes generate
new files with the result of the normalizations, letting
the original files unchanged.
Attributes:
config: Config to use.
cucco: Reference to cucco object.
"""
def __init__(self, config, cucco):
"""Inits Batch class."""
self._config = config
self._cucco = cucco
self._logger = config.logger
self._observer = None
self._watch = False
def process_file(self, path):
"""Process a file applying normalizations.
Get a file as input and generate a new file with the
result of applying normalizations to every single line
in the original file. The extension for the new file
will be the one defined in BATCH_EXTENSION.
Args:
path: Path to the file.
"""
if self._config.verbose:
self._logger.info('Processing file "%s"', path)
output_path = '%s%s' % (path, BATCH_EXTENSION)
with open(output_path, 'w') as file:
for line in lines_generator(path):
file.write('%s\n' % self._cucco.normalize(
line.encode().decode('utf-8')))
self._logger.debug('Created file "%s"', output_path)
def process_files(self, path, recursive=False):
"""Apply normalizations over all files in the given directory.
Iterate over all files in a given directory. Normalizations
will be applied to each file, storing the result in a new file.
The extension for the new file will be the one defined in
BATCH_EXTENSION.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not.
"""
self._logger.info('Processing files in "%s"', path)
for (path, file) in files_generator(path, recursive):
if not file.endswith(BATCH_EXTENSION):
self.process_file(os.path.join(path, file))
def stop_watching(self):
"""Stop watching for files.
Stop the observer started by watch function and finish
thread life.
"""
self._watch = False
if self._observer:
self._logger.info('Stopping watcher')
self._observer.stop()
self._logger.info('Watcher stopped')
def watch(self, path, recursive=False):
"""Watch for files in a directory and apply normalizations.
Watch for new or changed files in a directory and apply
normalizations over them.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not.
"""
self._logger.info('Initializing watcher for path "%s"', path)
handler = FileHandler(self)
self._observer = Observer()
self._observer.schedule(handler, path, recursive)
self._logger.info('Starting watcher')
self._observer.start()
self._watch = True
try:
self._logger.info('Waiting for file events')
while self._watch:
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
self.stop_watching()
self._observer.join()
class FileHandler(FileSystemEventHandler):
"""Handler to use by Batch watcher.
This class is used by Batch's watch mode. The handler will
listen for new and changed files.
Attributes:
batch: Reference to Batch object.
"""
def __init__(self, batch):
"""Inits Batch class."""
self._batch = batch
self._logger = batch._logger
def _process_event(self, event):
"""Process received events.
Process events received, applying normalization for those
events referencing a new or changed file and only if it's
not the result of a previous normalization.
Args:
event: Event to process.
"""
if (not event.is_directory and
not event.src_path.endswith(BATCH_EXTENSION)):
self._logger.info('Detected file change: %s', event.src_path)
self._batch.process_file(event.src_path)
def on_created(self, event):
"""Function called everytime a new file is created.
Args:
event: Event to process.
"""
self._logger.debug('Detected create event on watched path: %s', event.src_path)
self._process_event(event)
def on_modified(self, event):
"""Function called everytime a new file is modified.
Args:
event: Event to process.
"""
self._logger.debug('Detected modify event on watched path: %s', event.src_path)
self._process_event(event)
|
davidmogar/cucco | cucco/batch.py | Batch.process_file | python | def process_file(self, path):
if self._config.verbose:
self._logger.info('Processing file "%s"', path)
output_path = '%s%s' % (path, BATCH_EXTENSION)
with open(output_path, 'w') as file:
for line in lines_generator(path):
file.write('%s\n' % self._cucco.normalize(
line.encode().decode('utf-8')))
self._logger.debug('Created file "%s"', output_path) | Process a file applying normalizations.
Get a file as input and generate a new file with the
result of applying normalizations to every single line
in the original file. The extension for the new file
will be the one defined in BATCH_EXTENSION.
Args:
path: Path to the file. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L76-L97 | [
"def lines_generator(path):\n \"\"\"Yield lines in a given file.\n\n Iterates over a file lines yielding them.\n\n Args:\n path: Path to the file.\n\n Yields:\n Lines on a given file.\n \"\"\"\n with open(path, 'r') as file:\n for line in file:\n yield line\n"
] | class Batch(object):
"""Class to apply normalizations in batch mode.
This class permits to apply normalizations over a group
of files. It counts with two modes. The first one works
over all the files in a directory and the second one
watch for new files in a given path. Both modes generate
new files with the result of the normalizations, letting
the original files unchanged.
Attributes:
config: Config to use.
cucco: Reference to cucco object.
"""
def __init__(self, config, cucco):
"""Inits Batch class."""
self._config = config
self._cucco = cucco
self._logger = config.logger
self._observer = None
self._watch = False
def process_files(self, path, recursive=False):
"""Apply normalizations over all files in the given directory.
Iterate over all files in a given directory. Normalizations
will be applied to each file, storing the result in a new file.
The extension for the new file will be the one defined in
BATCH_EXTENSION.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not.
"""
self._logger.info('Processing files in "%s"', path)
for (path, file) in files_generator(path, recursive):
if not file.endswith(BATCH_EXTENSION):
self.process_file(os.path.join(path, file))
def stop_watching(self):
"""Stop watching for files.
Stop the observer started by watch function and finish
thread life.
"""
self._watch = False
if self._observer:
self._logger.info('Stopping watcher')
self._observer.stop()
self._logger.info('Watcher stopped')
def watch(self, path, recursive=False):
"""Watch for files in a directory and apply normalizations.
Watch for new or changed files in a directory and apply
normalizations over them.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not.
"""
self._logger.info('Initializing watcher for path "%s"', path)
handler = FileHandler(self)
self._observer = Observer()
self._observer.schedule(handler, path, recursive)
self._logger.info('Starting watcher')
self._observer.start()
self._watch = True
try:
self._logger.info('Waiting for file events')
while self._watch:
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
self.stop_watching()
self._observer.join()
|
davidmogar/cucco | cucco/batch.py | Batch.process_files | python | def process_files(self, path, recursive=False):
self._logger.info('Processing files in "%s"', path)
for (path, file) in files_generator(path, recursive):
if not file.endswith(BATCH_EXTENSION):
self.process_file(os.path.join(path, file)) | Apply normalizations over all files in the given directory.
Iterate over all files in a given directory. Normalizations
will be applied to each file, storing the result in a new file.
The extension for the new file will be the one defined in
BATCH_EXTENSION.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L99-L115 | [
"def files_generator(path, recursive):\n \"\"\"Yield files found in a given path.\n\n Walk over a given path finding and yielding all\n files found on it. This can be done only on the root\n directory or recursively.\n\n Args:\n path: Path to the directory.\n recursive: Whether to find ... | class Batch(object):
"""Class to apply normalizations in batch mode.
This class permits to apply normalizations over a group
of files. It counts with two modes. The first one works
over all the files in a directory and the second one
watch for new files in a given path. Both modes generate
new files with the result of the normalizations, letting
the original files unchanged.
Attributes:
config: Config to use.
cucco: Reference to cucco object.
"""
def __init__(self, config, cucco):
"""Inits Batch class."""
self._config = config
self._cucco = cucco
self._logger = config.logger
self._observer = None
self._watch = False
def process_file(self, path):
"""Process a file applying normalizations.
Get a file as input and generate a new file with the
result of applying normalizations to every single line
in the original file. The extension for the new file
will be the one defined in BATCH_EXTENSION.
Args:
path: Path to the file.
"""
if self._config.verbose:
self._logger.info('Processing file "%s"', path)
output_path = '%s%s' % (path, BATCH_EXTENSION)
with open(output_path, 'w') as file:
for line in lines_generator(path):
file.write('%s\n' % self._cucco.normalize(
line.encode().decode('utf-8')))
self._logger.debug('Created file "%s"', output_path)
def stop_watching(self):
"""Stop watching for files.
Stop the observer started by watch function and finish
thread life.
"""
self._watch = False
if self._observer:
self._logger.info('Stopping watcher')
self._observer.stop()
self._logger.info('Watcher stopped')
def watch(self, path, recursive=False):
"""Watch for files in a directory and apply normalizations.
Watch for new or changed files in a directory and apply
normalizations over them.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not.
"""
self._logger.info('Initializing watcher for path "%s"', path)
handler = FileHandler(self)
self._observer = Observer()
self._observer.schedule(handler, path, recursive)
self._logger.info('Starting watcher')
self._observer.start()
self._watch = True
try:
self._logger.info('Waiting for file events')
while self._watch:
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
self.stop_watching()
self._observer.join()
|
davidmogar/cucco | cucco/batch.py | Batch.stop_watching | python | def stop_watching(self):
self._watch = False
if self._observer:
self._logger.info('Stopping watcher')
self._observer.stop()
self._logger.info('Watcher stopped') | Stop watching for files.
Stop the observer started by watch function and finish
thread life. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L117-L128 | null | class Batch(object):
"""Class to apply normalizations in batch mode.
This class permits to apply normalizations over a group
of files. It counts with two modes. The first one works
over all the files in a directory and the second one
watch for new files in a given path. Both modes generate
new files with the result of the normalizations, letting
the original files unchanged.
Attributes:
config: Config to use.
cucco: Reference to cucco object.
"""
def __init__(self, config, cucco):
"""Inits Batch class."""
self._config = config
self._cucco = cucco
self._logger = config.logger
self._observer = None
self._watch = False
def process_file(self, path):
"""Process a file applying normalizations.
Get a file as input and generate a new file with the
result of applying normalizations to every single line
in the original file. The extension for the new file
will be the one defined in BATCH_EXTENSION.
Args:
path: Path to the file.
"""
if self._config.verbose:
self._logger.info('Processing file "%s"', path)
output_path = '%s%s' % (path, BATCH_EXTENSION)
with open(output_path, 'w') as file:
for line in lines_generator(path):
file.write('%s\n' % self._cucco.normalize(
line.encode().decode('utf-8')))
self._logger.debug('Created file "%s"', output_path)
def process_files(self, path, recursive=False):
"""Apply normalizations over all files in the given directory.
Iterate over all files in a given directory. Normalizations
will be applied to each file, storing the result in a new file.
The extension for the new file will be the one defined in
BATCH_EXTENSION.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not.
"""
self._logger.info('Processing files in "%s"', path)
for (path, file) in files_generator(path, recursive):
if not file.endswith(BATCH_EXTENSION):
self.process_file(os.path.join(path, file))
def watch(self, path, recursive=False):
"""Watch for files in a directory and apply normalizations.
Watch for new or changed files in a directory and apply
normalizations over them.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not.
"""
self._logger.info('Initializing watcher for path "%s"', path)
handler = FileHandler(self)
self._observer = Observer()
self._observer.schedule(handler, path, recursive)
self._logger.info('Starting watcher')
self._observer.start()
self._watch = True
try:
self._logger.info('Waiting for file events')
while self._watch:
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
self.stop_watching()
self._observer.join()
|
davidmogar/cucco | cucco/batch.py | Batch.watch | python | def watch(self, path, recursive=False):
self._logger.info('Initializing watcher for path "%s"', path)
handler = FileHandler(self)
self._observer = Observer()
self._observer.schedule(handler, path, recursive)
self._logger.info('Starting watcher')
self._observer.start()
self._watch = True
try:
self._logger.info('Waiting for file events')
while self._watch:
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
self.stop_watching()
self._observer.join() | Watch for files in a directory and apply normalizations.
Watch for new or changed files in a directory and apply
normalizations over them.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L130-L157 | [
"def stop_watching(self):\n \"\"\"Stop watching for files.\n\n Stop the observer started by watch function and finish\n thread life.\n \"\"\"\n self._watch = False\n\n if self._observer:\n self._logger.info('Stopping watcher')\n self._observer.stop()\n self._logger.info('Watch... | class Batch(object):
"""Class to apply normalizations in batch mode.
This class permits to apply normalizations over a group
of files. It counts with two modes. The first one works
over all the files in a directory and the second one
watch for new files in a given path. Both modes generate
new files with the result of the normalizations, letting
the original files unchanged.
Attributes:
config: Config to use.
cucco: Reference to cucco object.
"""
def __init__(self, config, cucco):
"""Inits Batch class."""
self._config = config
self._cucco = cucco
self._logger = config.logger
self._observer = None
self._watch = False
def process_file(self, path):
"""Process a file applying normalizations.
Get a file as input and generate a new file with the
result of applying normalizations to every single line
in the original file. The extension for the new file
will be the one defined in BATCH_EXTENSION.
Args:
path: Path to the file.
"""
if self._config.verbose:
self._logger.info('Processing file "%s"', path)
output_path = '%s%s' % (path, BATCH_EXTENSION)
with open(output_path, 'w') as file:
for line in lines_generator(path):
file.write('%s\n' % self._cucco.normalize(
line.encode().decode('utf-8')))
self._logger.debug('Created file "%s"', output_path)
def process_files(self, path, recursive=False):
"""Apply normalizations over all files in the given directory.
Iterate over all files in a given directory. Normalizations
will be applied to each file, storing the result in a new file.
The extension for the new file will be the one defined in
BATCH_EXTENSION.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not.
"""
self._logger.info('Processing files in "%s"', path)
for (path, file) in files_generator(path, recursive):
if not file.endswith(BATCH_EXTENSION):
self.process_file(os.path.join(path, file))
def stop_watching(self):
"""Stop watching for files.
Stop the observer started by watch function and finish
thread life.
"""
self._watch = False
if self._observer:
self._logger.info('Stopping watcher')
self._observer.stop()
self._logger.info('Watcher stopped')
|
davidmogar/cucco | cucco/batch.py | FileHandler._process_event | python | def _process_event(self, event):
if (not event.is_directory and
not event.src_path.endswith(BATCH_EXTENSION)):
self._logger.info('Detected file change: %s', event.src_path)
self._batch.process_file(event.src_path) | Process received events.
Process events received, applying normalization for those
events referencing a new or changed file and only if it's
not the result of a previous normalization.
Args:
event: Event to process. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L175-L188 | null | class FileHandler(FileSystemEventHandler):
"""Handler to use by Batch watcher.
This class is used by Batch's watch mode. The handler will
listen for new and changed files.
Attributes:
batch: Reference to Batch object.
"""
def __init__(self, batch):
"""Inits Batch class."""
self._batch = batch
self._logger = batch._logger
def on_created(self, event):
"""Function called everytime a new file is created.
Args:
event: Event to process.
"""
self._logger.debug('Detected create event on watched path: %s', event.src_path)
self._process_event(event)
def on_modified(self, event):
"""Function called everytime a new file is modified.
Args:
event: Event to process.
"""
self._logger.debug('Detected modify event on watched path: %s', event.src_path)
self._process_event(event)
|
davidmogar/cucco | cucco/batch.py | FileHandler.on_created | python | def on_created(self, event):
self._logger.debug('Detected create event on watched path: %s', event.src_path)
self._process_event(event) | Function called everytime a new file is created.
Args:
event: Event to process. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L190-L198 | null | class FileHandler(FileSystemEventHandler):
"""Handler to use by Batch watcher.
This class is used by Batch's watch mode. The handler will
listen for new and changed files.
Attributes:
batch: Reference to Batch object.
"""
def __init__(self, batch):
"""Inits Batch class."""
self._batch = batch
self._logger = batch._logger
def _process_event(self, event):
"""Process received events.
Process events received, applying normalization for those
events referencing a new or changed file and only if it's
not the result of a previous normalization.
Args:
event: Event to process.
"""
if (not event.is_directory and
not event.src_path.endswith(BATCH_EXTENSION)):
self._logger.info('Detected file change: %s', event.src_path)
self._batch.process_file(event.src_path)
def on_modified(self, event):
"""Function called everytime a new file is modified.
Args:
event: Event to process.
"""
self._logger.debug('Detected modify event on watched path: %s', event.src_path)
self._process_event(event)
|
davidmogar/cucco | cucco/batch.py | FileHandler.on_modified | python | def on_modified(self, event):
self._logger.debug('Detected modify event on watched path: %s', event.src_path)
self._process_event(event) | Function called everytime a new file is modified.
Args:
event: Event to process. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L200-L208 | null | class FileHandler(FileSystemEventHandler):
"""Handler to use by Batch watcher.
This class is used by Batch's watch mode. The handler will
listen for new and changed files.
Attributes:
batch: Reference to Batch object.
"""
def __init__(self, batch):
"""Inits Batch class."""
self._batch = batch
self._logger = batch._logger
def _process_event(self, event):
"""Process received events.
Process events received, applying normalization for those
events referencing a new or changed file and only if it's
not the result of a previous normalization.
Args:
event: Event to process.
"""
if (not event.is_directory and
not event.src_path.endswith(BATCH_EXTENSION)):
self._logger.info('Detected file change: %s', event.src_path)
self._batch.process_file(event.src_path)
def on_created(self, event):
"""Function called everytime a new file is created.
Args:
event: Event to process.
"""
self._logger.debug('Detected create event on watched path: %s', event.src_path)
self._process_event(event)
|
davidmogar/cucco | cucco/cucco.py | Cucco._load_stop_words | python | def _load_stop_words(self, language=None):
self._logger.debug('Loading stop words')
loaded = False
if language:
file_path = 'data/stop-' + language
loaded = self._parse_stop_words_file(os.path.join(PATH, file_path))
else:
for file in os.listdir(os.path.join(PATH, 'data')):
loaded = self._parse_stop_words_file(os.path.join(PATH, 'data', file)) or loaded
return loaded | Load stop words into __stop_words set.
Stop words will be loaded according to the language code
received during instantiation.
Args:
language: Language code.
Returns:
A boolean indicating whether a file was loaded. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L39-L62 | [
"def _parse_stop_words_file(self, path):\n \"\"\"Load stop words from the given path.\n\n Parse the stop words file, saving each word found in it in a set\n for the language of the file. This language is obtained from\n the file name. If the file doesn't exist, the method will have\n no effect.\n\n ... | class Cucco(object):
"""This class offers methods for text normalization.
Attributes:
config: Config to use.
lazy_load: Whether or not to lazy load files.
"""
__punctuation = set(string.punctuation)
def __init__(
self,
config=None,
lazy_load=False):
self._config = config if config else Config()
self._characters_regexes = dict()
self._logger = self._config.logger
self.__stop_words = dict()
# Load stop words
self._load_stop_words(self._config.language if lazy_load else None)
@staticmethod
def _parse_normalizations(normalizations):
"""Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization.
"""
str_type = str if sys.version_info[0] > 2 else (str, unicode)
for normalization in normalizations:
yield (normalization, {}) if isinstance(normalization, str_type) else normalization
def _parse_stop_words_file(self, path):
"""Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded.
"""
language = None
loaded = False
if os.path.isfile(path):
self._logger.debug('Loading stop words in %s', path)
language = path.split('-')[-1]
if not language in self.__stop_words:
self.__stop_words[language] = set()
with codecs.open(path, 'r', 'UTF-8') as file:
loaded = True
for word in file:
self.__stop_words[language].add(word.strip())
return loaded
def normalize(self, text, normalizations=None):
"""Normalize a given text applying all normalizations.
Normalizations to apply can be specified through a list of
parameters and will be executed in that order.
Args:
text: The text to be processed.
normalizations: List of normalizations to apply.
Returns:
The text normalized.
"""
for normalization, kwargs in self._parse_normalizations(
normalizations or self._config.normalizations):
try:
text = getattr(self, normalization)(text, **kwargs)
except AttributeError as e:
self._logger.debug('Invalid normalization: %s', e)
return text
@staticmethod
def remove_accent_marks(text, excluded=None):
"""Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks.
"""
if excluded is None:
excluded = set()
return unicodedata.normalize(
'NFKC', ''.join(
c for c in unicodedata.normalize(
'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))
@staticmethod
def remove_extra_white_spaces(text):
"""Remove extra white spaces from input text.
This function removes white spaces from the beginning and
the end of the string, but also duplicates white spaces
between words.
Args:
text: The text to be processed.
Returns:
The text without extra white spaces.
"""
return ' '.join(text.split())
def remove_stop_words(self, text, ignore_case=True, language=None):
"""Remove stop words.
Stop words are loaded on class instantiation according
to the specified language.
Args:
text: The text to be processed.
ignore_case: Whether or not to ignore case.
language: Code of the language to use (defaults to 'en').
Returns:
The text without stop words.
"""
if not language:
language = self._config.language
if language not in self.__stop_words:
if not self._load_stop_words(language):
self._logger.error('No stop words file for the given language')
return text
return ' '.join(word for word in text.split(' ') if (
word.lower() if ignore_case else word) not in self.__stop_words[language])
def replace_characters(self, text, characters, replacement=''):
"""Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
"""
if not characters:
return text
characters = ''.join(sorted(characters))
if characters in self._characters_regexes:
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile("[%s]" % re.escape(characters))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text)
@staticmethod
def replace_emails(text, replacement=''):
"""Remove emails address from text.
Removes email addresses from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace email addresses.
Returns:
The text without email addresses.
"""
return re.sub(regex.EMAIL_REGEX, replacement, text)
@staticmethod
def replace_emojis(text, replacement=''):
"""Remove emojis from text.
Removes emojis from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace emojis.
Returns:
The text without emojis.
"""
return regex.EMOJI_REGEX.sub(replacement, text)
@staticmethod
def replace_hyphens(text, replacement=' '):
"""Replace hyphens in text.
Replaces hyphens from input text with a whitespace or a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace the hyphens.
Returns:
The text without hyphens.
"""
return text.replace('-', replacement)
def replace_punctuation(self, text, excluded=None, replacement=''):
"""Replace punctuation symbols in text.
Removes punctuation from input text or replaces them with a
string if specified. Characters replaced will be those
in string.punctuation.
Args:
text: The text to be processed.
excluded: Set of characters to exclude.
replacement: New text that will replace punctuation.
Returns:
The text without punctuation.
"""
if excluded is None:
excluded = set()
elif not isinstance(excluded, set):
excluded = set(excluded)
punct = ''.join(self.__punctuation.difference(excluded))
return self.replace_characters(
text, characters=punct, replacement=replacement)
@staticmethod
def replace_symbols(
text,
form='NFKD',
excluded=None,
replacement=''):
"""Replace symbols in text.
Removes symbols from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
form: Unicode form.
excluded: Set of unicode characters to exclude.
replacement: New text that will replace symbols.
Returns:
The text without symbols.
"""
if excluded is None:
excluded = set()
categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So'])
return ''.join(c if unicodedata.category(c) not in categories or c in excluded
else replacement for c in unicodedata.normalize(form, text))
@staticmethod
def replace_urls(text, replacement=''):
"""Replace URLs in text.
Removes URLs from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace URLs.
Returns:
The text without URLs.
"""
return re.sub(regex.URL_REGEX, replacement, text)
|
davidmogar/cucco | cucco/cucco.py | Cucco._parse_normalizations | python | def _parse_normalizations(normalizations):
str_type = str if sys.version_info[0] > 2 else (str, unicode)
for normalization in normalizations:
yield (normalization, {}) if isinstance(normalization, str_type) else normalization | Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L65-L82 | null | class Cucco(object):
"""This class offers methods for text normalization.
Attributes:
config: Config to use.
lazy_load: Whether or not to lazy load files.
"""
__punctuation = set(string.punctuation)
def __init__(
self,
config=None,
lazy_load=False):
self._config = config if config else Config()
self._characters_regexes = dict()
self._logger = self._config.logger
self.__stop_words = dict()
# Load stop words
self._load_stop_words(self._config.language if lazy_load else None)
def _load_stop_words(self, language=None):
"""Load stop words into __stop_words set.
Stop words will be loaded according to the language code
received during instantiation.
Args:
language: Language code.
Returns:
A boolean indicating whether a file was loaded.
"""
self._logger.debug('Loading stop words')
loaded = False
if language:
file_path = 'data/stop-' + language
loaded = self._parse_stop_words_file(os.path.join(PATH, file_path))
else:
for file in os.listdir(os.path.join(PATH, 'data')):
loaded = self._parse_stop_words_file(os.path.join(PATH, 'data', file)) or loaded
return loaded
@staticmethod
def _parse_stop_words_file(self, path):
"""Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded.
"""
language = None
loaded = False
if os.path.isfile(path):
self._logger.debug('Loading stop words in %s', path)
language = path.split('-')[-1]
if not language in self.__stop_words:
self.__stop_words[language] = set()
with codecs.open(path, 'r', 'UTF-8') as file:
loaded = True
for word in file:
self.__stop_words[language].add(word.strip())
return loaded
def normalize(self, text, normalizations=None):
"""Normalize a given text applying all normalizations.
Normalizations to apply can be specified through a list of
parameters and will be executed in that order.
Args:
text: The text to be processed.
normalizations: List of normalizations to apply.
Returns:
The text normalized.
"""
for normalization, kwargs in self._parse_normalizations(
normalizations or self._config.normalizations):
try:
text = getattr(self, normalization)(text, **kwargs)
except AttributeError as e:
self._logger.debug('Invalid normalization: %s', e)
return text
@staticmethod
def remove_accent_marks(text, excluded=None):
"""Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks.
"""
if excluded is None:
excluded = set()
return unicodedata.normalize(
'NFKC', ''.join(
c for c in unicodedata.normalize(
'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))
@staticmethod
def remove_extra_white_spaces(text):
"""Remove extra white spaces from input text.
This function removes white spaces from the beginning and
the end of the string, but also duplicates white spaces
between words.
Args:
text: The text to be processed.
Returns:
The text without extra white spaces.
"""
return ' '.join(text.split())
def remove_stop_words(self, text, ignore_case=True, language=None):
"""Remove stop words.
Stop words are loaded on class instantiation according
to the specified language.
Args:
text: The text to be processed.
ignore_case: Whether or not to ignore case.
language: Code of the language to use (defaults to 'en').
Returns:
The text without stop words.
"""
if not language:
language = self._config.language
if language not in self.__stop_words:
if not self._load_stop_words(language):
self._logger.error('No stop words file for the given language')
return text
return ' '.join(word for word in text.split(' ') if (
word.lower() if ignore_case else word) not in self.__stop_words[language])
def replace_characters(self, text, characters, replacement=''):
"""Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
"""
if not characters:
return text
characters = ''.join(sorted(characters))
if characters in self._characters_regexes:
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile("[%s]" % re.escape(characters))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text)
@staticmethod
def replace_emails(text, replacement=''):
"""Remove emails address from text.
Removes email addresses from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace email addresses.
Returns:
The text without email addresses.
"""
return re.sub(regex.EMAIL_REGEX, replacement, text)
@staticmethod
def replace_emojis(text, replacement=''):
"""Remove emojis from text.
Removes emojis from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace emojis.
Returns:
The text without emojis.
"""
return regex.EMOJI_REGEX.sub(replacement, text)
@staticmethod
def replace_hyphens(text, replacement=' '):
"""Replace hyphens in text.
Replaces hyphens from input text with a whitespace or a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace the hyphens.
Returns:
The text without hyphens.
"""
return text.replace('-', replacement)
def replace_punctuation(self, text, excluded=None, replacement=''):
"""Replace punctuation symbols in text.
Removes punctuation from input text or replaces them with a
string if specified. Characters replaced will be those
in string.punctuation.
Args:
text: The text to be processed.
excluded: Set of characters to exclude.
replacement: New text that will replace punctuation.
Returns:
The text without punctuation.
"""
if excluded is None:
excluded = set()
elif not isinstance(excluded, set):
excluded = set(excluded)
punct = ''.join(self.__punctuation.difference(excluded))
return self.replace_characters(
text, characters=punct, replacement=replacement)
@staticmethod
def replace_symbols(
text,
form='NFKD',
excluded=None,
replacement=''):
"""Replace symbols in text.
Removes symbols from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
form: Unicode form.
excluded: Set of unicode characters to exclude.
replacement: New text that will replace symbols.
Returns:
The text without symbols.
"""
if excluded is None:
excluded = set()
categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So'])
return ''.join(c if unicodedata.category(c) not in categories or c in excluded
else replacement for c in unicodedata.normalize(form, text))
@staticmethod
def replace_urls(text, replacement=''):
"""Replace URLs in text.
Removes URLs from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace URLs.
Returns:
The text without URLs.
"""
return re.sub(regex.URL_REGEX, replacement, text)
|
davidmogar/cucco | cucco/cucco.py | Cucco._parse_stop_words_file | python | def _parse_stop_words_file(self, path):
language = None
loaded = False
if os.path.isfile(path):
self._logger.debug('Loading stop words in %s', path)
language = path.split('-')[-1]
if not language in self.__stop_words:
self.__stop_words[language] = set()
with codecs.open(path, 'r', 'UTF-8') as file:
loaded = True
for word in file:
self.__stop_words[language].add(word.strip())
return loaded | Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L84-L114 | null | class Cucco(object):
"""This class offers methods for text normalization.
Attributes:
config: Config to use.
lazy_load: Whether or not to lazy load files.
"""
__punctuation = set(string.punctuation)
def __init__(
self,
config=None,
lazy_load=False):
self._config = config if config else Config()
self._characters_regexes = dict()
self._logger = self._config.logger
self.__stop_words = dict()
# Load stop words
self._load_stop_words(self._config.language if lazy_load else None)
def _load_stop_words(self, language=None):
"""Load stop words into __stop_words set.
Stop words will be loaded according to the language code
received during instantiation.
Args:
language: Language code.
Returns:
A boolean indicating whether a file was loaded.
"""
self._logger.debug('Loading stop words')
loaded = False
if language:
file_path = 'data/stop-' + language
loaded = self._parse_stop_words_file(os.path.join(PATH, file_path))
else:
for file in os.listdir(os.path.join(PATH, 'data')):
loaded = self._parse_stop_words_file(os.path.join(PATH, 'data', file)) or loaded
return loaded
@staticmethod
def _parse_normalizations(normalizations):
"""Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization.
"""
str_type = str if sys.version_info[0] > 2 else (str, unicode)
for normalization in normalizations:
yield (normalization, {}) if isinstance(normalization, str_type) else normalization
def normalize(self, text, normalizations=None):
"""Normalize a given text applying all normalizations.
Normalizations to apply can be specified through a list of
parameters and will be executed in that order.
Args:
text: The text to be processed.
normalizations: List of normalizations to apply.
Returns:
The text normalized.
"""
for normalization, kwargs in self._parse_normalizations(
normalizations or self._config.normalizations):
try:
text = getattr(self, normalization)(text, **kwargs)
except AttributeError as e:
self._logger.debug('Invalid normalization: %s', e)
return text
@staticmethod
def remove_accent_marks(text, excluded=None):
"""Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks.
"""
if excluded is None:
excluded = set()
return unicodedata.normalize(
'NFKC', ''.join(
c for c in unicodedata.normalize(
'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))
@staticmethod
def remove_extra_white_spaces(text):
"""Remove extra white spaces from input text.
This function removes white spaces from the beginning and
the end of the string, but also duplicates white spaces
between words.
Args:
text: The text to be processed.
Returns:
The text without extra white spaces.
"""
return ' '.join(text.split())
def remove_stop_words(self, text, ignore_case=True, language=None):
"""Remove stop words.
Stop words are loaded on class instantiation according
to the specified language.
Args:
text: The text to be processed.
ignore_case: Whether or not to ignore case.
language: Code of the language to use (defaults to 'en').
Returns:
The text without stop words.
"""
if not language:
language = self._config.language
if language not in self.__stop_words:
if not self._load_stop_words(language):
self._logger.error('No stop words file for the given language')
return text
return ' '.join(word for word in text.split(' ') if (
word.lower() if ignore_case else word) not in self.__stop_words[language])
def replace_characters(self, text, characters, replacement=''):
"""Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
"""
if not characters:
return text
characters = ''.join(sorted(characters))
if characters in self._characters_regexes:
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile("[%s]" % re.escape(characters))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text)
@staticmethod
def replace_emails(text, replacement=''):
"""Remove emails address from text.
Removes email addresses from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace email addresses.
Returns:
The text without email addresses.
"""
return re.sub(regex.EMAIL_REGEX, replacement, text)
@staticmethod
def replace_emojis(text, replacement=''):
"""Remove emojis from text.
Removes emojis from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace emojis.
Returns:
The text without emojis.
"""
return regex.EMOJI_REGEX.sub(replacement, text)
@staticmethod
def replace_hyphens(text, replacement=' '):
"""Replace hyphens in text.
Replaces hyphens from input text with a whitespace or a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace the hyphens.
Returns:
The text without hyphens.
"""
return text.replace('-', replacement)
def replace_punctuation(self, text, excluded=None, replacement=''):
"""Replace punctuation symbols in text.
Removes punctuation from input text or replaces them with a
string if specified. Characters replaced will be those
in string.punctuation.
Args:
text: The text to be processed.
excluded: Set of characters to exclude.
replacement: New text that will replace punctuation.
Returns:
The text without punctuation.
"""
if excluded is None:
excluded = set()
elif not isinstance(excluded, set):
excluded = set(excluded)
punct = ''.join(self.__punctuation.difference(excluded))
return self.replace_characters(
text, characters=punct, replacement=replacement)
@staticmethod
def replace_symbols(
text,
form='NFKD',
excluded=None,
replacement=''):
"""Replace symbols in text.
Removes symbols from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
form: Unicode form.
excluded: Set of unicode characters to exclude.
replacement: New text that will replace symbols.
Returns:
The text without symbols.
"""
if excluded is None:
excluded = set()
categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So'])
return ''.join(c if unicodedata.category(c) not in categories or c in excluded
else replacement for c in unicodedata.normalize(form, text))
@staticmethod
def replace_urls(text, replacement=''):
"""Replace URLs in text.
Removes URLs from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace URLs.
Returns:
The text without URLs.
"""
return re.sub(regex.URL_REGEX, replacement, text)
|
davidmogar/cucco | cucco/cucco.py | Cucco.normalize | python | def normalize(self, text, normalizations=None):
for normalization, kwargs in self._parse_normalizations(
normalizations or self._config.normalizations):
try:
text = getattr(self, normalization)(text, **kwargs)
except AttributeError as e:
self._logger.debug('Invalid normalization: %s', e)
return text | Normalize a given text applying all normalizations.
Normalizations to apply can be specified through a list of
parameters and will be executed in that order.
Args:
text: The text to be processed.
normalizations: List of normalizations to apply.
Returns:
The text normalized. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L116-L137 | [
"def _parse_normalizations(normalizations):\n \"\"\"Parse and yield normalizations.\n\n Parse normalizations parameter that yield all normalizations and\n arguments found on it.\n\n Args:\n normalizations: List of normalizations.\n\n Yields:\n A tuple with a parsed normalization. The fi... | class Cucco(object):
"""This class offers methods for text normalization.
Attributes:
config: Config to use.
lazy_load: Whether or not to lazy load files.
"""
__punctuation = set(string.punctuation)
def __init__(
self,
config=None,
lazy_load=False):
self._config = config if config else Config()
self._characters_regexes = dict()
self._logger = self._config.logger
self.__stop_words = dict()
# Load stop words
self._load_stop_words(self._config.language if lazy_load else None)
def _load_stop_words(self, language=None):
"""Load stop words into __stop_words set.
Stop words will be loaded according to the language code
received during instantiation.
Args:
language: Language code.
Returns:
A boolean indicating whether a file was loaded.
"""
self._logger.debug('Loading stop words')
loaded = False
if language:
file_path = 'data/stop-' + language
loaded = self._parse_stop_words_file(os.path.join(PATH, file_path))
else:
for file in os.listdir(os.path.join(PATH, 'data')):
loaded = self._parse_stop_words_file(os.path.join(PATH, 'data', file)) or loaded
return loaded
@staticmethod
def _parse_normalizations(normalizations):
"""Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization.
"""
str_type = str if sys.version_info[0] > 2 else (str, unicode)
for normalization in normalizations:
yield (normalization, {}) if isinstance(normalization, str_type) else normalization
def _parse_stop_words_file(self, path):
"""Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded.
"""
language = None
loaded = False
if os.path.isfile(path):
self._logger.debug('Loading stop words in %s', path)
language = path.split('-')[-1]
if not language in self.__stop_words:
self.__stop_words[language] = set()
with codecs.open(path, 'r', 'UTF-8') as file:
loaded = True
for word in file:
self.__stop_words[language].add(word.strip())
return loaded
@staticmethod
def remove_accent_marks(text, excluded=None):
"""Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks.
"""
if excluded is None:
excluded = set()
return unicodedata.normalize(
'NFKC', ''.join(
c for c in unicodedata.normalize(
'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))
@staticmethod
def remove_extra_white_spaces(text):
"""Remove extra white spaces from input text.
This function removes white spaces from the beginning and
the end of the string, but also duplicates white spaces
between words.
Args:
text: The text to be processed.
Returns:
The text without extra white spaces.
"""
return ' '.join(text.split())
def remove_stop_words(self, text, ignore_case=True, language=None):
"""Remove stop words.
Stop words are loaded on class instantiation according
to the specified language.
Args:
text: The text to be processed.
ignore_case: Whether or not to ignore case.
language: Code of the language to use (defaults to 'en').
Returns:
The text without stop words.
"""
if not language:
language = self._config.language
if language not in self.__stop_words:
if not self._load_stop_words(language):
self._logger.error('No stop words file for the given language')
return text
return ' '.join(word for word in text.split(' ') if (
word.lower() if ignore_case else word) not in self.__stop_words[language])
def replace_characters(self, text, characters, replacement=''):
"""Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
"""
if not characters:
return text
characters = ''.join(sorted(characters))
if characters in self._characters_regexes:
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile("[%s]" % re.escape(characters))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text)
@staticmethod
def replace_emails(text, replacement=''):
"""Remove emails address from text.
Removes email addresses from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace email addresses.
Returns:
The text without email addresses.
"""
return re.sub(regex.EMAIL_REGEX, replacement, text)
@staticmethod
def replace_emojis(text, replacement=''):
"""Remove emojis from text.
Removes emojis from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace emojis.
Returns:
The text without emojis.
"""
return regex.EMOJI_REGEX.sub(replacement, text)
@staticmethod
def replace_hyphens(text, replacement=' '):
"""Replace hyphens in text.
Replaces hyphens from input text with a whitespace or a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace the hyphens.
Returns:
The text without hyphens.
"""
return text.replace('-', replacement)
def replace_punctuation(self, text, excluded=None, replacement=''):
"""Replace punctuation symbols in text.
Removes punctuation from input text or replaces them with a
string if specified. Characters replaced will be those
in string.punctuation.
Args:
text: The text to be processed.
excluded: Set of characters to exclude.
replacement: New text that will replace punctuation.
Returns:
The text without punctuation.
"""
if excluded is None:
excluded = set()
elif not isinstance(excluded, set):
excluded = set(excluded)
punct = ''.join(self.__punctuation.difference(excluded))
return self.replace_characters(
text, characters=punct, replacement=replacement)
@staticmethod
def replace_symbols(
text,
form='NFKD',
excluded=None,
replacement=''):
"""Replace symbols in text.
Removes symbols from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
form: Unicode form.
excluded: Set of unicode characters to exclude.
replacement: New text that will replace symbols.
Returns:
The text without symbols.
"""
if excluded is None:
excluded = set()
categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So'])
return ''.join(c if unicodedata.category(c) not in categories or c in excluded
else replacement for c in unicodedata.normalize(form, text))
@staticmethod
def replace_urls(text, replacement=''):
"""Replace URLs in text.
Removes URLs from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace URLs.
Returns:
The text without URLs.
"""
return re.sub(regex.URL_REGEX, replacement, text)
|
davidmogar/cucco | cucco/cucco.py | Cucco.remove_accent_marks | python | def remove_accent_marks(text, excluded=None):
if excluded is None:
excluded = set()
return unicodedata.normalize(
'NFKC', ''.join(
c for c in unicodedata.normalize(
'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded)) | Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L140-L159 | null | class Cucco(object):
"""This class offers methods for text normalization.
Attributes:
config: Config to use.
lazy_load: Whether or not to lazy load files.
"""
__punctuation = set(string.punctuation)
def __init__(
self,
config=None,
lazy_load=False):
self._config = config if config else Config()
self._characters_regexes = dict()
self._logger = self._config.logger
self.__stop_words = dict()
# Load stop words
self._load_stop_words(self._config.language if lazy_load else None)
def _load_stop_words(self, language=None):
"""Load stop words into __stop_words set.
Stop words will be loaded according to the language code
received during instantiation.
Args:
language: Language code.
Returns:
A boolean indicating whether a file was loaded.
"""
self._logger.debug('Loading stop words')
loaded = False
if language:
file_path = 'data/stop-' + language
loaded = self._parse_stop_words_file(os.path.join(PATH, file_path))
else:
for file in os.listdir(os.path.join(PATH, 'data')):
loaded = self._parse_stop_words_file(os.path.join(PATH, 'data', file)) or loaded
return loaded
@staticmethod
def _parse_normalizations(normalizations):
"""Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization.
"""
str_type = str if sys.version_info[0] > 2 else (str, unicode)
for normalization in normalizations:
yield (normalization, {}) if isinstance(normalization, str_type) else normalization
def _parse_stop_words_file(self, path):
"""Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded.
"""
language = None
loaded = False
if os.path.isfile(path):
self._logger.debug('Loading stop words in %s', path)
language = path.split('-')[-1]
if not language in self.__stop_words:
self.__stop_words[language] = set()
with codecs.open(path, 'r', 'UTF-8') as file:
loaded = True
for word in file:
self.__stop_words[language].add(word.strip())
return loaded
def normalize(self, text, normalizations=None):
"""Normalize a given text applying all normalizations.
Normalizations to apply can be specified through a list of
parameters and will be executed in that order.
Args:
text: The text to be processed.
normalizations: List of normalizations to apply.
Returns:
The text normalized.
"""
for normalization, kwargs in self._parse_normalizations(
normalizations or self._config.normalizations):
try:
text = getattr(self, normalization)(text, **kwargs)
except AttributeError as e:
self._logger.debug('Invalid normalization: %s', e)
return text
@staticmethod
@staticmethod
def remove_extra_white_spaces(text):
"""Remove extra white spaces from input text.
This function removes white spaces from the beginning and
the end of the string, but also duplicates white spaces
between words.
Args:
text: The text to be processed.
Returns:
The text without extra white spaces.
"""
return ' '.join(text.split())
def remove_stop_words(self, text, ignore_case=True, language=None):
"""Remove stop words.
Stop words are loaded on class instantiation according
to the specified language.
Args:
text: The text to be processed.
ignore_case: Whether or not to ignore case.
language: Code of the language to use (defaults to 'en').
Returns:
The text without stop words.
"""
if not language:
language = self._config.language
if language not in self.__stop_words:
if not self._load_stop_words(language):
self._logger.error('No stop words file for the given language')
return text
return ' '.join(word for word in text.split(' ') if (
word.lower() if ignore_case else word) not in self.__stop_words[language])
def replace_characters(self, text, characters, replacement=''):
"""Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
"""
if not characters:
return text
characters = ''.join(sorted(characters))
if characters in self._characters_regexes:
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile("[%s]" % re.escape(characters))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text)
@staticmethod
def replace_emails(text, replacement=''):
"""Remove emails address from text.
Removes email addresses from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace email addresses.
Returns:
The text without email addresses.
"""
return re.sub(regex.EMAIL_REGEX, replacement, text)
@staticmethod
def replace_emojis(text, replacement=''):
"""Remove emojis from text.
Removes emojis from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace emojis.
Returns:
The text without emojis.
"""
return regex.EMOJI_REGEX.sub(replacement, text)
@staticmethod
def replace_hyphens(text, replacement=' '):
"""Replace hyphens in text.
Replaces hyphens from input text with a whitespace or a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace the hyphens.
Returns:
The text without hyphens.
"""
return text.replace('-', replacement)
def replace_punctuation(self, text, excluded=None, replacement=''):
"""Replace punctuation symbols in text.
Removes punctuation from input text or replaces them with a
string if specified. Characters replaced will be those
in string.punctuation.
Args:
text: The text to be processed.
excluded: Set of characters to exclude.
replacement: New text that will replace punctuation.
Returns:
The text without punctuation.
"""
if excluded is None:
excluded = set()
elif not isinstance(excluded, set):
excluded = set(excluded)
punct = ''.join(self.__punctuation.difference(excluded))
return self.replace_characters(
text, characters=punct, replacement=replacement)
@staticmethod
def replace_symbols(
text,
form='NFKD',
excluded=None,
replacement=''):
"""Replace symbols in text.
Removes symbols from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
form: Unicode form.
excluded: Set of unicode characters to exclude.
replacement: New text that will replace symbols.
Returns:
The text without symbols.
"""
if excluded is None:
excluded = set()
categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So'])
return ''.join(c if unicodedata.category(c) not in categories or c in excluded
else replacement for c in unicodedata.normalize(form, text))
@staticmethod
def replace_urls(text, replacement=''):
"""Replace URLs in text.
Removes URLs from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace URLs.
Returns:
The text without URLs.
"""
return re.sub(regex.URL_REGEX, replacement, text)
|
davidmogar/cucco | cucco/cucco.py | Cucco.remove_stop_words | python | def remove_stop_words(self, text, ignore_case=True, language=None):
if not language:
language = self._config.language
if language not in self.__stop_words:
if not self._load_stop_words(language):
self._logger.error('No stop words file for the given language')
return text
return ' '.join(word for word in text.split(' ') if (
word.lower() if ignore_case else word) not in self.__stop_words[language]) | Remove stop words.
Stop words are loaded on class instantiation according
to the specified language.
Args:
text: The text to be processed.
ignore_case: Whether or not to ignore case.
language: Code of the language to use (defaults to 'en').
Returns:
The text without stop words. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L177-L200 | [
"def _load_stop_words(self, language=None):\n \"\"\"Load stop words into __stop_words set.\n\n Stop words will be loaded according to the language code\n received during instantiation.\n\n Args:\n language: Language code.\n\n Returns:\n A boolean indicating whether a file was loaded.\n ... | class Cucco(object):
"""This class offers methods for text normalization.
Attributes:
config: Config to use.
lazy_load: Whether or not to lazy load files.
"""
__punctuation = set(string.punctuation)
def __init__(
self,
config=None,
lazy_load=False):
self._config = config if config else Config()
self._characters_regexes = dict()
self._logger = self._config.logger
self.__stop_words = dict()
# Load stop words
self._load_stop_words(self._config.language if lazy_load else None)
def _load_stop_words(self, language=None):
"""Load stop words into __stop_words set.
Stop words will be loaded according to the language code
received during instantiation.
Args:
language: Language code.
Returns:
A boolean indicating whether a file was loaded.
"""
self._logger.debug('Loading stop words')
loaded = False
if language:
file_path = 'data/stop-' + language
loaded = self._parse_stop_words_file(os.path.join(PATH, file_path))
else:
for file in os.listdir(os.path.join(PATH, 'data')):
loaded = self._parse_stop_words_file(os.path.join(PATH, 'data', file)) or loaded
return loaded
@staticmethod
def _parse_normalizations(normalizations):
"""Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization.
"""
str_type = str if sys.version_info[0] > 2 else (str, unicode)
for normalization in normalizations:
yield (normalization, {}) if isinstance(normalization, str_type) else normalization
def _parse_stop_words_file(self, path):
"""Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded.
"""
language = None
loaded = False
if os.path.isfile(path):
self._logger.debug('Loading stop words in %s', path)
language = path.split('-')[-1]
if not language in self.__stop_words:
self.__stop_words[language] = set()
with codecs.open(path, 'r', 'UTF-8') as file:
loaded = True
for word in file:
self.__stop_words[language].add(word.strip())
return loaded
def normalize(self, text, normalizations=None):
"""Normalize a given text applying all normalizations.
Normalizations to apply can be specified through a list of
parameters and will be executed in that order.
Args:
text: The text to be processed.
normalizations: List of normalizations to apply.
Returns:
The text normalized.
"""
for normalization, kwargs in self._parse_normalizations(
normalizations or self._config.normalizations):
try:
text = getattr(self, normalization)(text, **kwargs)
except AttributeError as e:
self._logger.debug('Invalid normalization: %s', e)
return text
@staticmethod
def remove_accent_marks(text, excluded=None):
"""Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks.
"""
if excluded is None:
excluded = set()
return unicodedata.normalize(
'NFKC', ''.join(
c for c in unicodedata.normalize(
'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))
@staticmethod
def remove_extra_white_spaces(text):
"""Remove extra white spaces from input text.
This function removes white spaces from the beginning and
the end of the string, but also duplicates white spaces
between words.
Args:
text: The text to be processed.
Returns:
The text without extra white spaces.
"""
return ' '.join(text.split())
def replace_characters(self, text, characters, replacement=''):
"""Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
"""
if not characters:
return text
characters = ''.join(sorted(characters))
if characters in self._characters_regexes:
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile("[%s]" % re.escape(characters))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text)
@staticmethod
def replace_emails(text, replacement=''):
"""Remove emails address from text.
Removes email addresses from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace email addresses.
Returns:
The text without email addresses.
"""
return re.sub(regex.EMAIL_REGEX, replacement, text)
@staticmethod
def replace_emojis(text, replacement=''):
"""Remove emojis from text.
Removes emojis from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace emojis.
Returns:
The text without emojis.
"""
return regex.EMOJI_REGEX.sub(replacement, text)
@staticmethod
def replace_hyphens(text, replacement=' '):
"""Replace hyphens in text.
Replaces hyphens from input text with a whitespace or a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace the hyphens.
Returns:
The text without hyphens.
"""
return text.replace('-', replacement)
def replace_punctuation(self, text, excluded=None, replacement=''):
"""Replace punctuation symbols in text.
Removes punctuation from input text or replaces them with a
string if specified. Characters replaced will be those
in string.punctuation.
Args:
text: The text to be processed.
excluded: Set of characters to exclude.
replacement: New text that will replace punctuation.
Returns:
The text without punctuation.
"""
if excluded is None:
excluded = set()
elif not isinstance(excluded, set):
excluded = set(excluded)
punct = ''.join(self.__punctuation.difference(excluded))
return self.replace_characters(
text, characters=punct, replacement=replacement)
@staticmethod
def replace_symbols(
text,
form='NFKD',
excluded=None,
replacement=''):
"""Replace symbols in text.
Removes symbols from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
form: Unicode form.
excluded: Set of unicode characters to exclude.
replacement: New text that will replace symbols.
Returns:
The text without symbols.
"""
if excluded is None:
excluded = set()
categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So'])
return ''.join(c if unicodedata.category(c) not in categories or c in excluded
else replacement for c in unicodedata.normalize(form, text))
@staticmethod
def replace_urls(text, replacement=''):
"""Replace URLs in text.
Removes URLs from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace URLs.
Returns:
The text without URLs.
"""
return re.sub(regex.URL_REGEX, replacement, text)
|
davidmogar/cucco | cucco/cucco.py | Cucco.replace_characters | python | def replace_characters(self, text, characters, replacement=''):
if not characters:
return text
characters = ''.join(sorted(characters))
if characters in self._characters_regexes:
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile("[%s]" % re.escape(characters))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text) | Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L202-L226 | null | class Cucco(object):
"""This class offers methods for text normalization.
Attributes:
config: Config to use.
lazy_load: Whether or not to lazy load files.
"""
__punctuation = set(string.punctuation)
def __init__(
self,
config=None,
lazy_load=False):
self._config = config if config else Config()
self._characters_regexes = dict()
self._logger = self._config.logger
self.__stop_words = dict()
# Load stop words
self._load_stop_words(self._config.language if lazy_load else None)
def _load_stop_words(self, language=None):
"""Load stop words into __stop_words set.
Stop words will be loaded according to the language code
received during instantiation.
Args:
language: Language code.
Returns:
A boolean indicating whether a file was loaded.
"""
self._logger.debug('Loading stop words')
loaded = False
if language:
file_path = 'data/stop-' + language
loaded = self._parse_stop_words_file(os.path.join(PATH, file_path))
else:
for file in os.listdir(os.path.join(PATH, 'data')):
loaded = self._parse_stop_words_file(os.path.join(PATH, 'data', file)) or loaded
return loaded
@staticmethod
def _parse_normalizations(normalizations):
"""Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization.
"""
str_type = str if sys.version_info[0] > 2 else (str, unicode)
for normalization in normalizations:
yield (normalization, {}) if isinstance(normalization, str_type) else normalization
def _parse_stop_words_file(self, path):
"""Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded.
"""
language = None
loaded = False
if os.path.isfile(path):
self._logger.debug('Loading stop words in %s', path)
language = path.split('-')[-1]
if not language in self.__stop_words:
self.__stop_words[language] = set()
with codecs.open(path, 'r', 'UTF-8') as file:
loaded = True
for word in file:
self.__stop_words[language].add(word.strip())
return loaded
def normalize(self, text, normalizations=None):
"""Normalize a given text applying all normalizations.
Normalizations to apply can be specified through a list of
parameters and will be executed in that order.
Args:
text: The text to be processed.
normalizations: List of normalizations to apply.
Returns:
The text normalized.
"""
for normalization, kwargs in self._parse_normalizations(
normalizations or self._config.normalizations):
try:
text = getattr(self, normalization)(text, **kwargs)
except AttributeError as e:
self._logger.debug('Invalid normalization: %s', e)
return text
@staticmethod
def remove_accent_marks(text, excluded=None):
"""Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks.
"""
if excluded is None:
excluded = set()
return unicodedata.normalize(
'NFKC', ''.join(
c for c in unicodedata.normalize(
'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))
@staticmethod
def remove_extra_white_spaces(text):
"""Remove extra white spaces from input text.
This function removes white spaces from the beginning and
the end of the string, but also duplicates white spaces
between words.
Args:
text: The text to be processed.
Returns:
The text without extra white spaces.
"""
return ' '.join(text.split())
def remove_stop_words(self, text, ignore_case=True, language=None):
"""Remove stop words.
Stop words are loaded on class instantiation according
to the specified language.
Args:
text: The text to be processed.
ignore_case: Whether or not to ignore case.
language: Code of the language to use (defaults to 'en').
Returns:
The text without stop words.
"""
if not language:
language = self._config.language
if language not in self.__stop_words:
if not self._load_stop_words(language):
self._logger.error('No stop words file for the given language')
return text
return ' '.join(word for word in text.split(' ') if (
word.lower() if ignore_case else word) not in self.__stop_words[language])
@staticmethod
def replace_emails(text, replacement=''):
"""Remove emails address from text.
Removes email addresses from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace email addresses.
Returns:
The text without email addresses.
"""
return re.sub(regex.EMAIL_REGEX, replacement, text)
@staticmethod
def replace_emojis(text, replacement=''):
"""Remove emojis from text.
Removes emojis from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace emojis.
Returns:
The text without emojis.
"""
return regex.EMOJI_REGEX.sub(replacement, text)
@staticmethod
def replace_hyphens(text, replacement=' '):
"""Replace hyphens in text.
Replaces hyphens from input text with a whitespace or a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace the hyphens.
Returns:
The text without hyphens.
"""
return text.replace('-', replacement)
def replace_punctuation(self, text, excluded=None, replacement=''):
"""Replace punctuation symbols in text.
Removes punctuation from input text or replaces them with a
string if specified. Characters replaced will be those
in string.punctuation.
Args:
text: The text to be processed.
excluded: Set of characters to exclude.
replacement: New text that will replace punctuation.
Returns:
The text without punctuation.
"""
if excluded is None:
excluded = set()
elif not isinstance(excluded, set):
excluded = set(excluded)
punct = ''.join(self.__punctuation.difference(excluded))
return self.replace_characters(
text, characters=punct, replacement=replacement)
@staticmethod
def replace_symbols(
text,
form='NFKD',
excluded=None,
replacement=''):
"""Replace symbols in text.
Removes symbols from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
form: Unicode form.
excluded: Set of unicode characters to exclude.
replacement: New text that will replace symbols.
Returns:
The text without symbols.
"""
if excluded is None:
excluded = set()
categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So'])
return ''.join(c if unicodedata.category(c) not in categories or c in excluded
else replacement for c in unicodedata.normalize(form, text))
@staticmethod
def replace_urls(text, replacement=''):
"""Replace URLs in text.
Removes URLs from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace URLs.
Returns:
The text without URLs.
"""
return re.sub(regex.URL_REGEX, replacement, text)
|
davidmogar/cucco | cucco/cucco.py | Cucco.replace_punctuation | python | def replace_punctuation(self, text, excluded=None, replacement=''):
if excluded is None:
excluded = set()
elif not isinstance(excluded, set):
excluded = set(excluded)
punct = ''.join(self.__punctuation.difference(excluded))
return self.replace_characters(
text, characters=punct, replacement=replacement) | Replace punctuation symbols in text.
Removes punctuation from input text or replaces them with a
string if specified. Characters replaced will be those
in string.punctuation.
Args:
text: The text to be processed.
excluded: Set of characters to exclude.
replacement: New text that will replace punctuation.
Returns:
The text without punctuation. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L276-L298 | [
"def replace_characters(self, text, characters, replacement=''):\n \"\"\"Remove characters from text.\n\n Removes custom characters from input text or replaces them\n with a string if specified.\n\n Args:\n text: The text to be processed.\n characters: Characters that will be replaced.\n ... | class Cucco(object):
"""This class offers methods for text normalization.
Attributes:
config: Config to use.
lazy_load: Whether or not to lazy load files.
"""
__punctuation = set(string.punctuation)
def __init__(
self,
config=None,
lazy_load=False):
self._config = config if config else Config()
self._characters_regexes = dict()
self._logger = self._config.logger
self.__stop_words = dict()
# Load stop words
self._load_stop_words(self._config.language if lazy_load else None)
def _load_stop_words(self, language=None):
"""Load stop words into __stop_words set.
Stop words will be loaded according to the language code
received during instantiation.
Args:
language: Language code.
Returns:
A boolean indicating whether a file was loaded.
"""
self._logger.debug('Loading stop words')
loaded = False
if language:
file_path = 'data/stop-' + language
loaded = self._parse_stop_words_file(os.path.join(PATH, file_path))
else:
for file in os.listdir(os.path.join(PATH, 'data')):
loaded = self._parse_stop_words_file(os.path.join(PATH, 'data', file)) or loaded
return loaded
@staticmethod
def _parse_normalizations(normalizations):
"""Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization.
"""
str_type = str if sys.version_info[0] > 2 else (str, unicode)
for normalization in normalizations:
yield (normalization, {}) if isinstance(normalization, str_type) else normalization
def _parse_stop_words_file(self, path):
"""Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded.
"""
language = None
loaded = False
if os.path.isfile(path):
self._logger.debug('Loading stop words in %s', path)
language = path.split('-')[-1]
if not language in self.__stop_words:
self.__stop_words[language] = set()
with codecs.open(path, 'r', 'UTF-8') as file:
loaded = True
for word in file:
self.__stop_words[language].add(word.strip())
return loaded
def normalize(self, text, normalizations=None):
"""Normalize a given text applying all normalizations.
Normalizations to apply can be specified through a list of
parameters and will be executed in that order.
Args:
text: The text to be processed.
normalizations: List of normalizations to apply.
Returns:
The text normalized.
"""
for normalization, kwargs in self._parse_normalizations(
normalizations or self._config.normalizations):
try:
text = getattr(self, normalization)(text, **kwargs)
except AttributeError as e:
self._logger.debug('Invalid normalization: %s', e)
return text
@staticmethod
def remove_accent_marks(text, excluded=None):
"""Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks.
"""
if excluded is None:
excluded = set()
return unicodedata.normalize(
'NFKC', ''.join(
c for c in unicodedata.normalize(
'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))
@staticmethod
def remove_extra_white_spaces(text):
"""Remove extra white spaces from input text.
This function removes white spaces from the beginning and
the end of the string, but also duplicates white spaces
between words.
Args:
text: The text to be processed.
Returns:
The text without extra white spaces.
"""
return ' '.join(text.split())
def remove_stop_words(self, text, ignore_case=True, language=None):
"""Remove stop words.
Stop words are loaded on class instantiation according
to the specified language.
Args:
text: The text to be processed.
ignore_case: Whether or not to ignore case.
language: Code of the language to use (defaults to 'en').
Returns:
The text without stop words.
"""
if not language:
language = self._config.language
if language not in self.__stop_words:
if not self._load_stop_words(language):
self._logger.error('No stop words file for the given language')
return text
return ' '.join(word for word in text.split(' ') if (
word.lower() if ignore_case else word) not in self.__stop_words[language])
def replace_characters(self, text, characters, replacement=''):
"""Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
"""
if not characters:
return text
characters = ''.join(sorted(characters))
if characters in self._characters_regexes:
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile("[%s]" % re.escape(characters))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text)
@staticmethod
def replace_emails(text, replacement=''):
"""Remove emails address from text.
Removes email addresses from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace email addresses.
Returns:
The text without email addresses.
"""
return re.sub(regex.EMAIL_REGEX, replacement, text)
@staticmethod
def replace_emojis(text, replacement=''):
"""Remove emojis from text.
Removes emojis from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace emojis.
Returns:
The text without emojis.
"""
return regex.EMOJI_REGEX.sub(replacement, text)
@staticmethod
def replace_hyphens(text, replacement=' '):
"""Replace hyphens in text.
Replaces hyphens from input text with a whitespace or a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace the hyphens.
Returns:
The text without hyphens.
"""
return text.replace('-', replacement)
@staticmethod
def replace_symbols(
text,
form='NFKD',
excluded=None,
replacement=''):
"""Replace symbols in text.
Removes symbols from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
form: Unicode form.
excluded: Set of unicode characters to exclude.
replacement: New text that will replace symbols.
Returns:
The text without symbols.
"""
if excluded is None:
excluded = set()
categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So'])
return ''.join(c if unicodedata.category(c) not in categories or c in excluded
else replacement for c in unicodedata.normalize(form, text))
@staticmethod
def replace_urls(text, replacement=''):
"""Replace URLs in text.
Removes URLs from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace URLs.
Returns:
The text without URLs.
"""
return re.sub(regex.URL_REGEX, replacement, text)
|
davidmogar/cucco | cucco/cucco.py | Cucco.replace_symbols | python | def replace_symbols(
text,
form='NFKD',
excluded=None,
replacement=''):
if excluded is None:
excluded = set()
categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So'])
return ''.join(c if unicodedata.category(c) not in categories or c in excluded
else replacement for c in unicodedata.normalize(form, text)) | Replace symbols in text.
Removes symbols from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
form: Unicode form.
excluded: Set of unicode characters to exclude.
replacement: New text that will replace symbols.
Returns:
The text without symbols. | train | https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L301-L326 | null | class Cucco(object):
"""This class offers methods for text normalization.
Attributes:
config: Config to use.
lazy_load: Whether or not to lazy load files.
"""
__punctuation = set(string.punctuation)
def __init__(
self,
config=None,
lazy_load=False):
self._config = config if config else Config()
self._characters_regexes = dict()
self._logger = self._config.logger
self.__stop_words = dict()
# Load stop words
self._load_stop_words(self._config.language if lazy_load else None)
def _load_stop_words(self, language=None):
"""Load stop words into __stop_words set.
Stop words will be loaded according to the language code
received during instantiation.
Args:
language: Language code.
Returns:
A boolean indicating whether a file was loaded.
"""
self._logger.debug('Loading stop words')
loaded = False
if language:
file_path = 'data/stop-' + language
loaded = self._parse_stop_words_file(os.path.join(PATH, file_path))
else:
for file in os.listdir(os.path.join(PATH, 'data')):
loaded = self._parse_stop_words_file(os.path.join(PATH, 'data', file)) or loaded
return loaded
@staticmethod
def _parse_normalizations(normalizations):
"""Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization.
"""
str_type = str if sys.version_info[0] > 2 else (str, unicode)
for normalization in normalizations:
yield (normalization, {}) if isinstance(normalization, str_type) else normalization
def _parse_stop_words_file(self, path):
"""Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded.
"""
language = None
loaded = False
if os.path.isfile(path):
self._logger.debug('Loading stop words in %s', path)
language = path.split('-')[-1]
if not language in self.__stop_words:
self.__stop_words[language] = set()
with codecs.open(path, 'r', 'UTF-8') as file:
loaded = True
for word in file:
self.__stop_words[language].add(word.strip())
return loaded
def normalize(self, text, normalizations=None):
"""Normalize a given text applying all normalizations.
Normalizations to apply can be specified through a list of
parameters and will be executed in that order.
Args:
text: The text to be processed.
normalizations: List of normalizations to apply.
Returns:
The text normalized.
"""
for normalization, kwargs in self._parse_normalizations(
normalizations or self._config.normalizations):
try:
text = getattr(self, normalization)(text, **kwargs)
except AttributeError as e:
self._logger.debug('Invalid normalization: %s', e)
return text
@staticmethod
def remove_accent_marks(text, excluded=None):
"""Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks.
"""
if excluded is None:
excluded = set()
return unicodedata.normalize(
'NFKC', ''.join(
c for c in unicodedata.normalize(
'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))
@staticmethod
def remove_extra_white_spaces(text):
"""Remove extra white spaces from input text.
This function removes white spaces from the beginning and
the end of the string, but also duplicates white spaces
between words.
Args:
text: The text to be processed.
Returns:
The text without extra white spaces.
"""
return ' '.join(text.split())
def remove_stop_words(self, text, ignore_case=True, language=None):
"""Remove stop words.
Stop words are loaded on class instantiation according
to the specified language.
Args:
text: The text to be processed.
ignore_case: Whether or not to ignore case.
language: Code of the language to use (defaults to 'en').
Returns:
The text without stop words.
"""
if not language:
language = self._config.language
if language not in self.__stop_words:
if not self._load_stop_words(language):
self._logger.error('No stop words file for the given language')
return text
return ' '.join(word for word in text.split(' ') if (
word.lower() if ignore_case else word) not in self.__stop_words[language])
def replace_characters(self, text, characters, replacement=''):
"""Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
"""
if not characters:
return text
characters = ''.join(sorted(characters))
if characters in self._characters_regexes:
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile("[%s]" % re.escape(characters))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text)
@staticmethod
def replace_emails(text, replacement=''):
"""Remove emails address from text.
Removes email addresses from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace email addresses.
Returns:
The text without email addresses.
"""
return re.sub(regex.EMAIL_REGEX, replacement, text)
@staticmethod
def replace_emojis(text, replacement=''):
"""Remove emojis from text.
Removes emojis from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace emojis.
Returns:
The text without emojis.
"""
return regex.EMOJI_REGEX.sub(replacement, text)
@staticmethod
def replace_hyphens(text, replacement=' '):
"""Replace hyphens in text.
Replaces hyphens from input text with a whitespace or a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace the hyphens.
Returns:
The text without hyphens.
"""
return text.replace('-', replacement)
def replace_punctuation(self, text, excluded=None, replacement=''):
"""Replace punctuation symbols in text.
Removes punctuation from input text or replaces them with a
string if specified. Characters replaced will be those
in string.punctuation.
Args:
text: The text to be processed.
excluded: Set of characters to exclude.
replacement: New text that will replace punctuation.
Returns:
The text without punctuation.
"""
if excluded is None:
excluded = set()
elif not isinstance(excluded, set):
excluded = set(excluded)
punct = ''.join(self.__punctuation.difference(excluded))
return self.replace_characters(
text, characters=punct, replacement=replacement)
@staticmethod
@staticmethod
def replace_urls(text, replacement=''):
"""Replace URLs in text.
Removes URLs from input text or replaces them with a
string if specified.
Args:
text: The text to be processed.
replacement: New text that will replace URLs.
Returns:
The text without URLs.
"""
return re.sub(regex.URL_REGEX, replacement, text)
|
tkarabela/pysubs2 | pysubs2/time.py | make_time | python | def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None):
if frames is None and fps is None:
return times_to_ms(h, m, s, ms)
elif frames is not None and fps is not None:
return frames_to_ms(frames, fps)
else:
raise ValueError("Both fps and frames must be specified") | Convert time to milliseconds.
See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,
:func:`pysubs2.time.frames_to_ms()` is called instead.
Raises:
ValueError: Invalid fps, or one of frames/fps is missing.
Example:
>>> make_time(s=1.5)
1500
>>> make_time(frames=50, fps=25)
2000 | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L12-L34 | [
"def frames_to_ms(frames, fps):\n \"\"\"\n Convert frame-based duration to milliseconds.\n\n Arguments:\n frames: Number of frames (should be int).\n fps: Framerate (must be a positive number, eg. 23.976).\n\n Returns:\n Number of milliseconds (rounded to int).\n\n Raises:\n ... | from __future__ import division
from collections import namedtuple
import re
#: Pattern that matches both SubStation and SubRip timestamps.
TIMESTAMP = re.compile(r"(\d{1,2}):(\d{2}):(\d{2})[.,](\d{2,3})")
Times = namedtuple("Times", ["h", "m", "s", "ms"])
def timestamp_to_ms(groups):
"""
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420
"""
h, m, s, frac = map(int, groups)
ms = frac * 10**(3 - len(groups[-1]))
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return ms
def times_to_ms(h=0, m=0, s=0, ms=0):
"""
Convert hours, minutes, seconds to milliseconds.
Arguments may be positive or negative, int or float,
need not be normalized (``s=120`` is okay).
Returns:
Number of milliseconds (rounded to int).
"""
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return int(round(ms))
def frames_to_ms(frames, fps):
"""
Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round(frames * (1000 / fps)))
def ms_to_frames(ms, fps):
"""
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round((ms / 1000) * fps))
def ms_to_times(ms):
"""
Convert milliseconds to normalized tuple (h, m, s, ms).
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
Should be non-negative.
Returns:
Named tuple (h, m, s, ms) of ints.
Invariants: ``ms in range(1000) and s in range(60) and m in range(60)``
"""
ms = int(round(ms))
h, ms = divmod(ms, 3600000)
m, ms = divmod(ms, 60000)
s, ms = divmod(ms, 1000)
return Times(h, m, s, ms)
def ms_to_str(ms, fractions=False):
"""
Prettyprint milliseconds to [-]H:MM:SS[.mmm]
Handles huge and/or negative times. Non-negative times with ``fractions=True``
are matched by :data:`pysubs2.time.TIMESTAMP`.
Arguments:
ms: Number of milliseconds (int, float or other numeric class).
fractions: Whether to print up to millisecond precision.
Returns:
str
"""
sgn = "-" if ms < 0 else ""
h, m, s, ms = ms_to_times(abs(ms))
if fractions:
return sgn + "{:01d}:{:02d}:{:02d}.{:03d}".format(h, m, s, ms)
else:
return sgn + "{:01d}:{:02d}:{:02d}".format(h, m, s)
|
tkarabela/pysubs2 | pysubs2/time.py | timestamp_to_ms | python | def timestamp_to_ms(groups):
h, m, s, frac = map(int, groups)
ms = frac * 10**(3 - len(groups[-1]))
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return ms | Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420 | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L36-L50 | null | from __future__ import division
from collections import namedtuple
import re
#: Pattern that matches both SubStation and SubRip timestamps.
TIMESTAMP = re.compile(r"(\d{1,2}):(\d{2}):(\d{2})[.,](\d{2,3})")
Times = namedtuple("Times", ["h", "m", "s", "ms"])
def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Convert time to milliseconds.
See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,
:func:`pysubs2.time.frames_to_ms()` is called instead.
Raises:
ValueError: Invalid fps, or one of frames/fps is missing.
Example:
>>> make_time(s=1.5)
1500
>>> make_time(frames=50, fps=25)
2000
"""
if frames is None and fps is None:
return times_to_ms(h, m, s, ms)
elif frames is not None and fps is not None:
return frames_to_ms(frames, fps)
else:
raise ValueError("Both fps and frames must be specified")
def timestamp_to_ms(groups):
"""
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420
"""
h, m, s, frac = map(int, groups)
ms = frac * 10**(3 - len(groups[-1]))
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return ms
def times_to_ms(h=0, m=0, s=0, ms=0):
"""
Convert hours, minutes, seconds to milliseconds.
Arguments may be positive or negative, int or float,
need not be normalized (``s=120`` is okay).
Returns:
Number of milliseconds (rounded to int).
"""
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return int(round(ms))
def frames_to_ms(frames, fps):
"""
Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round(frames * (1000 / fps)))
def ms_to_frames(ms, fps):
"""
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round((ms / 1000) * fps))
def ms_to_times(ms):
"""
Convert milliseconds to normalized tuple (h, m, s, ms).
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
Should be non-negative.
Returns:
Named tuple (h, m, s, ms) of ints.
Invariants: ``ms in range(1000) and s in range(60) and m in range(60)``
"""
ms = int(round(ms))
h, ms = divmod(ms, 3600000)
m, ms = divmod(ms, 60000)
s, ms = divmod(ms, 1000)
return Times(h, m, s, ms)
def ms_to_str(ms, fractions=False):
"""
Prettyprint milliseconds to [-]H:MM:SS[.mmm]
Handles huge and/or negative times. Non-negative times with ``fractions=True``
are matched by :data:`pysubs2.time.TIMESTAMP`.
Arguments:
ms: Number of milliseconds (int, float or other numeric class).
fractions: Whether to print up to millisecond precision.
Returns:
str
"""
sgn = "-" if ms < 0 else ""
h, m, s, ms = ms_to_times(abs(ms))
if fractions:
return sgn + "{:01d}:{:02d}:{:02d}.{:03d}".format(h, m, s, ms)
else:
return sgn + "{:01d}:{:02d}:{:02d}".format(h, m, s)
|
tkarabela/pysubs2 | pysubs2/time.py | times_to_ms | python | def times_to_ms(h=0, m=0, s=0, ms=0):
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return int(round(ms)) | Convert hours, minutes, seconds to milliseconds.
Arguments may be positive or negative, int or float,
need not be normalized (``s=120`` is okay).
Returns:
Number of milliseconds (rounded to int). | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L52-L66 | null | from __future__ import division
from collections import namedtuple
import re
#: Pattern that matches both SubStation and SubRip timestamps.
TIMESTAMP = re.compile(r"(\d{1,2}):(\d{2}):(\d{2})[.,](\d{2,3})")
Times = namedtuple("Times", ["h", "m", "s", "ms"])
def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Convert time to milliseconds.
See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,
:func:`pysubs2.time.frames_to_ms()` is called instead.
Raises:
ValueError: Invalid fps, or one of frames/fps is missing.
Example:
>>> make_time(s=1.5)
1500
>>> make_time(frames=50, fps=25)
2000
"""
if frames is None and fps is None:
return times_to_ms(h, m, s, ms)
elif frames is not None and fps is not None:
return frames_to_ms(frames, fps)
else:
raise ValueError("Both fps and frames must be specified")
def timestamp_to_ms(groups):
"""
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420
"""
h, m, s, frac = map(int, groups)
ms = frac * 10**(3 - len(groups[-1]))
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return ms
def times_to_ms(h=0, m=0, s=0, ms=0):
"""
Convert hours, minutes, seconds to milliseconds.
Arguments may be positive or negative, int or float,
need not be normalized (``s=120`` is okay).
Returns:
Number of milliseconds (rounded to int).
"""
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return int(round(ms))
def frames_to_ms(frames, fps):
"""
Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round(frames * (1000 / fps)))
def ms_to_frames(ms, fps):
"""
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round((ms / 1000) * fps))
def ms_to_times(ms):
"""
Convert milliseconds to normalized tuple (h, m, s, ms).
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
Should be non-negative.
Returns:
Named tuple (h, m, s, ms) of ints.
Invariants: ``ms in range(1000) and s in range(60) and m in range(60)``
"""
ms = int(round(ms))
h, ms = divmod(ms, 3600000)
m, ms = divmod(ms, 60000)
s, ms = divmod(ms, 1000)
return Times(h, m, s, ms)
def ms_to_str(ms, fractions=False):
"""
Prettyprint milliseconds to [-]H:MM:SS[.mmm]
Handles huge and/or negative times. Non-negative times with ``fractions=True``
are matched by :data:`pysubs2.time.TIMESTAMP`.
Arguments:
ms: Number of milliseconds (int, float or other numeric class).
fractions: Whether to print up to millisecond precision.
Returns:
str
"""
sgn = "-" if ms < 0 else ""
h, m, s, ms = ms_to_times(abs(ms))
if fractions:
return sgn + "{:01d}:{:02d}:{:02d}.{:03d}".format(h, m, s, ms)
else:
return sgn + "{:01d}:{:02d}:{:02d}".format(h, m, s)
|
tkarabela/pysubs2 | pysubs2/time.py | frames_to_ms | python | def frames_to_ms(frames, fps):
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round(frames * (1000 / fps))) | Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero. | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L68-L86 | null | from __future__ import division
from collections import namedtuple
import re
#: Pattern that matches both SubStation and SubRip timestamps.
TIMESTAMP = re.compile(r"(\d{1,2}):(\d{2}):(\d{2})[.,](\d{2,3})")
Times = namedtuple("Times", ["h", "m", "s", "ms"])
def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Convert time to milliseconds.
See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,
:func:`pysubs2.time.frames_to_ms()` is called instead.
Raises:
ValueError: Invalid fps, or one of frames/fps is missing.
Example:
>>> make_time(s=1.5)
1500
>>> make_time(frames=50, fps=25)
2000
"""
if frames is None and fps is None:
return times_to_ms(h, m, s, ms)
elif frames is not None and fps is not None:
return frames_to_ms(frames, fps)
else:
raise ValueError("Both fps and frames must be specified")
def timestamp_to_ms(groups):
"""
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420
"""
h, m, s, frac = map(int, groups)
ms = frac * 10**(3 - len(groups[-1]))
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return ms
def times_to_ms(h=0, m=0, s=0, ms=0):
"""
Convert hours, minutes, seconds to milliseconds.
Arguments may be positive or negative, int or float,
need not be normalized (``s=120`` is okay).
Returns:
Number of milliseconds (rounded to int).
"""
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return int(round(ms))
def frames_to_ms(frames, fps):
"""
Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round(frames * (1000 / fps)))
def ms_to_frames(ms, fps):
"""
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round((ms / 1000) * fps))
def ms_to_times(ms):
"""
Convert milliseconds to normalized tuple (h, m, s, ms).
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
Should be non-negative.
Returns:
Named tuple (h, m, s, ms) of ints.
Invariants: ``ms in range(1000) and s in range(60) and m in range(60)``
"""
ms = int(round(ms))
h, ms = divmod(ms, 3600000)
m, ms = divmod(ms, 60000)
s, ms = divmod(ms, 1000)
return Times(h, m, s, ms)
def ms_to_str(ms, fractions=False):
"""
Prettyprint milliseconds to [-]H:MM:SS[.mmm]
Handles huge and/or negative times. Non-negative times with ``fractions=True``
are matched by :data:`pysubs2.time.TIMESTAMP`.
Arguments:
ms: Number of milliseconds (int, float or other numeric class).
fractions: Whether to print up to millisecond precision.
Returns:
str
"""
sgn = "-" if ms < 0 else ""
h, m, s, ms = ms_to_times(abs(ms))
if fractions:
return sgn + "{:01d}:{:02d}:{:02d}.{:03d}".format(h, m, s, ms)
else:
return sgn + "{:01d}:{:02d}:{:02d}".format(h, m, s)
|
tkarabela/pysubs2 | pysubs2/time.py | ms_to_frames | python | def ms_to_frames(ms, fps):
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round((ms / 1000) * fps)) | Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero. | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L88-L106 | null | from __future__ import division
from collections import namedtuple
import re
#: Pattern that matches both SubStation and SubRip timestamps.
TIMESTAMP = re.compile(r"(\d{1,2}):(\d{2}):(\d{2})[.,](\d{2,3})")
Times = namedtuple("Times", ["h", "m", "s", "ms"])
def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Convert time to milliseconds.
See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,
:func:`pysubs2.time.frames_to_ms()` is called instead.
Raises:
ValueError: Invalid fps, or one of frames/fps is missing.
Example:
>>> make_time(s=1.5)
1500
>>> make_time(frames=50, fps=25)
2000
"""
if frames is None and fps is None:
return times_to_ms(h, m, s, ms)
elif frames is not None and fps is not None:
return frames_to_ms(frames, fps)
else:
raise ValueError("Both fps and frames must be specified")
def timestamp_to_ms(groups):
"""
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420
"""
h, m, s, frac = map(int, groups)
ms = frac * 10**(3 - len(groups[-1]))
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return ms
def times_to_ms(h=0, m=0, s=0, ms=0):
"""
Convert hours, minutes, seconds to milliseconds.
Arguments may be positive or negative, int or float,
need not be normalized (``s=120`` is okay).
Returns:
Number of milliseconds (rounded to int).
"""
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return int(round(ms))
def frames_to_ms(frames, fps):
"""
Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round(frames * (1000 / fps)))
def ms_to_frames(ms, fps):
"""
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round((ms / 1000) * fps))
def ms_to_times(ms):
"""
Convert milliseconds to normalized tuple (h, m, s, ms).
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
Should be non-negative.
Returns:
Named tuple (h, m, s, ms) of ints.
Invariants: ``ms in range(1000) and s in range(60) and m in range(60)``
"""
ms = int(round(ms))
h, ms = divmod(ms, 3600000)
m, ms = divmod(ms, 60000)
s, ms = divmod(ms, 1000)
return Times(h, m, s, ms)
def ms_to_str(ms, fractions=False):
"""
Prettyprint milliseconds to [-]H:MM:SS[.mmm]
Handles huge and/or negative times. Non-negative times with ``fractions=True``
are matched by :data:`pysubs2.time.TIMESTAMP`.
Arguments:
ms: Number of milliseconds (int, float or other numeric class).
fractions: Whether to print up to millisecond precision.
Returns:
str
"""
sgn = "-" if ms < 0 else ""
h, m, s, ms = ms_to_times(abs(ms))
if fractions:
return sgn + "{:01d}:{:02d}:{:02d}.{:03d}".format(h, m, s, ms)
else:
return sgn + "{:01d}:{:02d}:{:02d}".format(h, m, s)
|
tkarabela/pysubs2 | pysubs2/time.py | ms_to_times | python | def ms_to_times(ms):
ms = int(round(ms))
h, ms = divmod(ms, 3600000)
m, ms = divmod(ms, 60000)
s, ms = divmod(ms, 1000)
return Times(h, m, s, ms) | Convert milliseconds to normalized tuple (h, m, s, ms).
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
Should be non-negative.
Returns:
Named tuple (h, m, s, ms) of ints.
Invariants: ``ms in range(1000) and s in range(60) and m in range(60)`` | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L108-L125 | null | from __future__ import division
from collections import namedtuple
import re
#: Pattern that matches both SubStation and SubRip timestamps.
TIMESTAMP = re.compile(r"(\d{1,2}):(\d{2}):(\d{2})[.,](\d{2,3})")
Times = namedtuple("Times", ["h", "m", "s", "ms"])
def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Convert time to milliseconds.
See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,
:func:`pysubs2.time.frames_to_ms()` is called instead.
Raises:
ValueError: Invalid fps, or one of frames/fps is missing.
Example:
>>> make_time(s=1.5)
1500
>>> make_time(frames=50, fps=25)
2000
"""
if frames is None and fps is None:
return times_to_ms(h, m, s, ms)
elif frames is not None and fps is not None:
return frames_to_ms(frames, fps)
else:
raise ValueError("Both fps and frames must be specified")
def timestamp_to_ms(groups):
"""
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420
"""
h, m, s, frac = map(int, groups)
ms = frac * 10**(3 - len(groups[-1]))
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return ms
def times_to_ms(h=0, m=0, s=0, ms=0):
"""
Convert hours, minutes, seconds to milliseconds.
Arguments may be positive or negative, int or float,
need not be normalized (``s=120`` is okay).
Returns:
Number of milliseconds (rounded to int).
"""
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return int(round(ms))
def frames_to_ms(frames, fps):
"""
Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round(frames * (1000 / fps)))
def ms_to_frames(ms, fps):
"""
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round((ms / 1000) * fps))
def ms_to_times(ms):
"""
Convert milliseconds to normalized tuple (h, m, s, ms).
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
Should be non-negative.
Returns:
Named tuple (h, m, s, ms) of ints.
Invariants: ``ms in range(1000) and s in range(60) and m in range(60)``
"""
ms = int(round(ms))
h, ms = divmod(ms, 3600000)
m, ms = divmod(ms, 60000)
s, ms = divmod(ms, 1000)
return Times(h, m, s, ms)
def ms_to_str(ms, fractions=False):
"""
Prettyprint milliseconds to [-]H:MM:SS[.mmm]
Handles huge and/or negative times. Non-negative times with ``fractions=True``
are matched by :data:`pysubs2.time.TIMESTAMP`.
Arguments:
ms: Number of milliseconds (int, float or other numeric class).
fractions: Whether to print up to millisecond precision.
Returns:
str
"""
sgn = "-" if ms < 0 else ""
h, m, s, ms = ms_to_times(abs(ms))
if fractions:
return sgn + "{:01d}:{:02d}:{:02d}.{:03d}".format(h, m, s, ms)
else:
return sgn + "{:01d}:{:02d}:{:02d}".format(h, m, s)
|
tkarabela/pysubs2 | pysubs2/time.py | ms_to_str | python | def ms_to_str(ms, fractions=False):
sgn = "-" if ms < 0 else ""
h, m, s, ms = ms_to_times(abs(ms))
if fractions:
return sgn + "{:01d}:{:02d}:{:02d}.{:03d}".format(h, m, s, ms)
else:
return sgn + "{:01d}:{:02d}:{:02d}".format(h, m, s) | Prettyprint milliseconds to [-]H:MM:SS[.mmm]
Handles huge and/or negative times. Non-negative times with ``fractions=True``
are matched by :data:`pysubs2.time.TIMESTAMP`.
Arguments:
ms: Number of milliseconds (int, float or other numeric class).
fractions: Whether to print up to millisecond precision.
Returns:
str | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L127-L147 | [
"def ms_to_times(ms):\n \"\"\"\n Convert milliseconds to normalized tuple (h, m, s, ms).\n\n Arguments:\n ms: Number of milliseconds (may be int, float or other numeric class).\n Should be non-negative.\n\n Returns:\n Named tuple (h, m, s, ms) of ints.\n Invariants: ``ms ... | from __future__ import division
from collections import namedtuple
import re
#: Pattern that matches both SubStation and SubRip timestamps.
TIMESTAMP = re.compile(r"(\d{1,2}):(\d{2}):(\d{2})[.,](\d{2,3})")
Times = namedtuple("Times", ["h", "m", "s", "ms"])
def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Convert time to milliseconds.
See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,
:func:`pysubs2.time.frames_to_ms()` is called instead.
Raises:
ValueError: Invalid fps, or one of frames/fps is missing.
Example:
>>> make_time(s=1.5)
1500
>>> make_time(frames=50, fps=25)
2000
"""
if frames is None and fps is None:
return times_to_ms(h, m, s, ms)
elif frames is not None and fps is not None:
return frames_to_ms(frames, fps)
else:
raise ValueError("Both fps and frames must be specified")
def timestamp_to_ms(groups):
"""
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420
"""
h, m, s, frac = map(int, groups)
ms = frac * 10**(3 - len(groups[-1]))
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return ms
def times_to_ms(h=0, m=0, s=0, ms=0):
"""
Convert hours, minutes, seconds to milliseconds.
Arguments may be positive or negative, int or float,
need not be normalized (``s=120`` is okay).
Returns:
Number of milliseconds (rounded to int).
"""
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return int(round(ms))
def frames_to_ms(frames, fps):
"""
Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round(frames * (1000 / fps)))
def ms_to_frames(ms, fps):
"""
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round((ms / 1000) * fps))
def ms_to_times(ms):
"""
Convert milliseconds to normalized tuple (h, m, s, ms).
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
Should be non-negative.
Returns:
Named tuple (h, m, s, ms) of ints.
Invariants: ``ms in range(1000) and s in range(60) and m in range(60)``
"""
ms = int(round(ms))
h, ms = divmod(ms, 3600000)
m, ms = divmod(ms, 60000)
s, ms = divmod(ms, 1000)
return Times(h, m, s, ms)
def ms_to_str(ms, fractions=False):
"""
Prettyprint milliseconds to [-]H:MM:SS[.mmm]
Handles huge and/or negative times. Non-negative times with ``fractions=True``
are matched by :data:`pysubs2.time.TIMESTAMP`.
Arguments:
ms: Number of milliseconds (int, float or other numeric class).
fractions: Whether to print up to millisecond precision.
Returns:
str
"""
sgn = "-" if ms < 0 else ""
h, m, s, ms = ms_to_times(abs(ms))
if fractions:
return sgn + "{:01d}:{:02d}:{:02d}.{:03d}".format(h, m, s, ms)
else:
return sgn + "{:01d}:{:02d}:{:02d}".format(h, m, s)
|
tkarabela/pysubs2 | pysubs2/substation.py | ms_to_timestamp | python | def ms_to_timestamp(ms):
"""Convert ms to 'H:MM:SS.cc'"""
# XXX throw on overflow/underflow?
if ms < 0: ms = 0
if ms > MAX_REPRESENTABLE_TIME: ms = MAX_REPRESENTABLE_TIME
h, m, s, ms = ms_to_times(ms)
return "%01d:%02d:%02d.%02d" % (h, m, s, ms//10) | Convert ms to 'H:MM:SS.cc | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/substation.py#L49-L55 | [
"def ms_to_times(ms):\n \"\"\"\n Convert milliseconds to normalized tuple (h, m, s, ms).\n\n Arguments:\n ms: Number of milliseconds (may be int, float or other numeric class).\n Should be non-negative.\n\n Returns:\n Named tuple (h, m, s, ms) of ints.\n Invariants: ``ms ... | from __future__ import print_function, division, unicode_literals
import re
from numbers import Number
from .formatbase import FormatBase
from .ssaevent import SSAEvent
from .ssastyle import SSAStyle
from .common import text_type, Color, PY3, binary_string_type
from .time import make_time, ms_to_times, timestamp_to_ms, TIMESTAMP
SSA_ALIGNMENT = (1, 2, 3, 9, 10, 11, 5, 6, 7)
def ass_to_ssa_alignment(i):
return SSA_ALIGNMENT[i-1]
def ssa_to_ass_alignment(i):
return SSA_ALIGNMENT.index(i) + 1
SECTION_HEADING = re.compile(r"^.{,3}\[[^\]]+\]") # allow for UTF-8 BOM, which is 3 bytes
STYLE_FORMAT_LINE = {
"ass": "Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic,"
" Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment,"
" MarginL, MarginR, MarginV, Encoding",
"ssa": "Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, TertiaryColour, BackColour, Bold, Italic,"
" BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, AlphaLevel, Encoding"
}
STYLE_FIELDS = {
"ass": ["fontname", "fontsize", "primarycolor", "secondarycolor", "outlinecolor", "backcolor", "bold", "italic",
"underline", "strikeout", "scalex", "scaley", "spacing", "angle", "borderstyle", "outline", "shadow",
"alignment", "marginl", "marginr", "marginv", "encoding"],
"ssa": ["fontname", "fontsize", "primarycolor", "secondarycolor", "tertiarycolor", "backcolor", "bold", "italic",
"borderstyle", "outline", "shadow", "alignment", "marginl", "marginr", "marginv", "alphalevel", "encoding"]
}
EVENT_FORMAT_LINE = {
"ass": "Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text",
"ssa": "Format: Marked, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text"
}
EVENT_FIELDS = {
"ass": ["layer", "start", "end", "style", "name", "marginl", "marginr", "marginv", "effect", "text"],
"ssa": ["marked", "start", "end", "style", "name", "marginl", "marginr", "marginv", "effect", "text"]
}
#: Largest timestamp allowed in SubStation, ie. 9:59:59.99.
MAX_REPRESENTABLE_TIME = make_time(h=10) - 10
def color_to_ass_rgba(c):
return "&H%08X" % ((c.a << 24) | (c.b << 16) | (c.g << 8) | c.r)
def color_to_ssa_rgb(c):
return "%d" % ((c.b << 16) | (c.g << 8) | c.r)
def ass_rgba_to_color(s):
x = int(s[2:], base=16)
r = x & 0xff
g = (x >> 8) & 0xff
b = (x >> 16) & 0xff
a = (x >> 24) & 0xff
return Color(r, g, b, a)
def ssa_rgb_to_color(s):
x = int(s)
r = x & 0xff
g = (x >> 8) & 0xff
b = (x >> 16) & 0xff
return Color(r, g, b)
def is_valid_field_content(s):
"""
Returns True if string s can be stored in a SubStation field.
Fields are written in CSV-like manner, thus commas and/or newlines
are not acceptable in the string.
"""
return "\n" not in s and "," not in s
def parse_tags(text, style=SSAStyle.DEFAULT_STYLE, styles={}):
"""
Split text into fragments with computed SSAStyles.
Returns list of tuples (fragment, style), where fragment is a part of text
between two brace-delimited override sequences, and style is the computed
styling of the fragment, ie. the original style modified by all override
sequences before the fragment.
Newline and non-breakable space overrides are left as-is.
Supported override tags:
- i, b, u, s
- r (with or without style name)
"""
fragments = SSAEvent.OVERRIDE_SEQUENCE.split(text)
if len(fragments) == 1:
return [(text, style)]
def apply_overrides(all_overrides):
s = style.copy()
for tag in re.findall(r"\\[ibus][10]|\\r[a-zA-Z_0-9 ]*", all_overrides):
if tag == r"\r":
s = style.copy() # reset to original line style
elif tag.startswith(r"\r"):
name = tag[2:]
if name in styles:
s = styles[name].copy() # reset to named style
else:
if "i" in tag: s.italic = "1" in tag
elif "b" in tag: s.bold = "1" in tag
elif "u" in tag: s.underline = "1" in tag
elif "s" in tag: s.strikeout = "1" in tag
return s
overrides = SSAEvent.OVERRIDE_SEQUENCE.findall(text)
overrides_prefix_sum = ["".join(overrides[:i]) for i in range(len(overrides) + 1)]
computed_styles = map(apply_overrides, overrides_prefix_sum)
return list(zip(fragments, computed_styles))
NOTICE = "Script generated by pysubs2\nhttps://pypi.python.org/pypi/pysubs2"
class SubstationFormat(FormatBase):
@classmethod
def guess_format(cls, text):
if "V4+ Styles" in text:
return "ass"
elif "V4 Styles" in text:
return "ssa"
@classmethod
def from_file(cls, subs, fp, format_, **kwargs):
def string_to_field(f, v):
if f in {"start", "end"}:
return timestamp_to_ms(TIMESTAMP.match(v).groups())
elif "color" in f:
if format_ == "ass":
return ass_rgba_to_color(v)
else:
return ssa_rgb_to_color(v)
elif f in {"bold", "underline", "italic", "strikeout"}:
return v == "-1"
elif f in {"borderstyle", "encoding", "marginl", "marginr", "marginv", "layer", "alphalevel"}:
return int(v)
elif f in {"fontsize", "scalex", "scaley", "spacing", "angle", "outline", "shadow"}:
return float(v)
elif f == "marked":
return v.endswith("1")
elif f == "alignment":
i = int(v)
if format_ == "ass":
return i
else:
return ssa_to_ass_alignment(i)
else:
return v
subs.info.clear()
subs.aegisub_project.clear()
subs.styles.clear()
inside_info_section = False
inside_aegisub_section = False
for line in fp:
line = line.strip()
if SECTION_HEADING.match(line):
inside_info_section = "Info" in line
inside_aegisub_section = "Aegisub" in line
elif inside_info_section or inside_aegisub_section:
if line.startswith(";"): continue # skip comments
try:
k, v = line.split(": ", 1)
if inside_info_section:
subs.info[k] = v
elif inside_aegisub_section:
subs.aegisub_project[k] = v
except ValueError:
pass
elif line.startswith("Style:"):
_, rest = line.split(": ", 1)
buf = rest.strip().split(",")
name, raw_fields = buf[0], buf[1:] # splat workaround for Python 2.7
field_dict = {f: string_to_field(f, v) for f, v in zip(STYLE_FIELDS[format_], raw_fields)}
sty = SSAStyle(**field_dict)
subs.styles[name] = sty
elif line.startswith("Dialogue:") or line.startswith("Comment:"):
ev_type, rest = line.split(": ", 1)
raw_fields = rest.strip().split(",", len(EVENT_FIELDS[format_])-1)
field_dict = {f: string_to_field(f, v) for f, v in zip(EVENT_FIELDS[format_], raw_fields)}
field_dict["type"] = ev_type
ev = SSAEvent(**field_dict)
subs.events.append(ev)
@classmethod
def to_file(cls, subs, fp, format_, header_notice=NOTICE, **kwargs):
print("[Script Info]", file=fp)
for line in header_notice.splitlines(False):
print(";", line, file=fp)
subs.info["ScriptType"] = "v4.00+" if format_ == "ass" else "v4.00"
for k, v in subs.info.items():
print(k, v, sep=": ", file=fp)
if subs.aegisub_project:
print("\n[Aegisub Project Garbage]", file=fp)
for k, v in subs.aegisub_project.items():
print(k, v, sep=": ", file=fp)
def field_to_string(f, v, line):
if f in {"start", "end"}:
return ms_to_timestamp(v)
elif f == "marked":
return "Marked=%d" % v
elif f == "alignment" and format_ == "ssa":
return text_type(ass_to_ssa_alignment(v))
elif isinstance(v, bool):
return "-1" if v else "0"
elif isinstance(v, (text_type, Number)):
return text_type(v)
elif not PY3 and isinstance(v, binary_string_type):
# A convenience feature, see issue #12 - accept non-unicode strings
# when they are ASCII; this is useful in Python 2, especially for non-text
# fields like style names, where requiring Unicode type seems too stringent
if all(ord(c) < 128 for c in v):
return text_type(v)
else:
raise TypeError("Encountered binary string with non-ASCII codepoint in SubStation field {!r} for line {!r} - please use unicode string instead of str".format(f, line))
elif isinstance(v, Color):
if format_ == "ass":
return color_to_ass_rgba(v)
else:
return color_to_ssa_rgb(v)
else:
raise TypeError("Unexpected type when writing a SubStation field {!r} for line {!r}".format(f, line))
print("\n[V4+ Styles]" if format_ == "ass" else "\n[V4 Styles]", file=fp)
print(STYLE_FORMAT_LINE[format_], file=fp)
for name, sty in subs.styles.items():
fields = [field_to_string(f, getattr(sty, f), sty) for f in STYLE_FIELDS[format_]]
print("Style: %s" % name, *fields, sep=",", file=fp)
print("\n[Events]", file=fp)
print(EVENT_FORMAT_LINE[format_], file=fp)
for ev in subs.events:
fields = [field_to_string(f, getattr(ev, f), ev) for f in EVENT_FIELDS[format_]]
print(ev.type, end=": ", file=fp)
print(*fields, sep=",", file=fp)
|
tkarabela/pysubs2 | pysubs2/substation.py | parse_tags | python | def parse_tags(text, style=SSAStyle.DEFAULT_STYLE, styles={}):
fragments = SSAEvent.OVERRIDE_SEQUENCE.split(text)
if len(fragments) == 1:
return [(text, style)]
def apply_overrides(all_overrides):
s = style.copy()
for tag in re.findall(r"\\[ibus][10]|\\r[a-zA-Z_0-9 ]*", all_overrides):
if tag == r"\r":
s = style.copy() # reset to original line style
elif tag.startswith(r"\r"):
name = tag[2:]
if name in styles:
s = styles[name].copy() # reset to named style
else:
if "i" in tag: s.italic = "1" in tag
elif "b" in tag: s.bold = "1" in tag
elif "u" in tag: s.underline = "1" in tag
elif "s" in tag: s.strikeout = "1" in tag
return s
overrides = SSAEvent.OVERRIDE_SEQUENCE.findall(text)
overrides_prefix_sum = ["".join(overrides[:i]) for i in range(len(overrides) + 1)]
computed_styles = map(apply_overrides, overrides_prefix_sum)
return list(zip(fragments, computed_styles)) | Split text into fragments with computed SSAStyles.
Returns list of tuples (fragment, style), where fragment is a part of text
between two brace-delimited override sequences, and style is the computed
styling of the fragment, ie. the original style modified by all override
sequences before the fragment.
Newline and non-breakable space overrides are left as-is.
Supported override tags:
- i, b, u, s
- r (with or without style name) | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/substation.py#L89-L130 | null | from __future__ import print_function, division, unicode_literals
import re
from numbers import Number
from .formatbase import FormatBase
from .ssaevent import SSAEvent
from .ssastyle import SSAStyle
from .common import text_type, Color, PY3, binary_string_type
from .time import make_time, ms_to_times, timestamp_to_ms, TIMESTAMP
SSA_ALIGNMENT = (1, 2, 3, 9, 10, 11, 5, 6, 7)
def ass_to_ssa_alignment(i):
return SSA_ALIGNMENT[i-1]
def ssa_to_ass_alignment(i):
return SSA_ALIGNMENT.index(i) + 1
SECTION_HEADING = re.compile(r"^.{,3}\[[^\]]+\]") # allow for UTF-8 BOM, which is 3 bytes
STYLE_FORMAT_LINE = {
"ass": "Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic,"
" Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment,"
" MarginL, MarginR, MarginV, Encoding",
"ssa": "Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, TertiaryColour, BackColour, Bold, Italic,"
" BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, AlphaLevel, Encoding"
}
STYLE_FIELDS = {
"ass": ["fontname", "fontsize", "primarycolor", "secondarycolor", "outlinecolor", "backcolor", "bold", "italic",
"underline", "strikeout", "scalex", "scaley", "spacing", "angle", "borderstyle", "outline", "shadow",
"alignment", "marginl", "marginr", "marginv", "encoding"],
"ssa": ["fontname", "fontsize", "primarycolor", "secondarycolor", "tertiarycolor", "backcolor", "bold", "italic",
"borderstyle", "outline", "shadow", "alignment", "marginl", "marginr", "marginv", "alphalevel", "encoding"]
}
EVENT_FORMAT_LINE = {
"ass": "Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text",
"ssa": "Format: Marked, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text"
}
EVENT_FIELDS = {
"ass": ["layer", "start", "end", "style", "name", "marginl", "marginr", "marginv", "effect", "text"],
"ssa": ["marked", "start", "end", "style", "name", "marginl", "marginr", "marginv", "effect", "text"]
}
#: Largest timestamp allowed in SubStation, ie. 9:59:59.99.
MAX_REPRESENTABLE_TIME = make_time(h=10) - 10
def ms_to_timestamp(ms):
"""Convert ms to 'H:MM:SS.cc'"""
# XXX throw on overflow/underflow?
if ms < 0: ms = 0
if ms > MAX_REPRESENTABLE_TIME: ms = MAX_REPRESENTABLE_TIME
h, m, s, ms = ms_to_times(ms)
return "%01d:%02d:%02d.%02d" % (h, m, s, ms//10)
def color_to_ass_rgba(c):
return "&H%08X" % ((c.a << 24) | (c.b << 16) | (c.g << 8) | c.r)
def color_to_ssa_rgb(c):
return "%d" % ((c.b << 16) | (c.g << 8) | c.r)
def ass_rgba_to_color(s):
x = int(s[2:], base=16)
r = x & 0xff
g = (x >> 8) & 0xff
b = (x >> 16) & 0xff
a = (x >> 24) & 0xff
return Color(r, g, b, a)
def ssa_rgb_to_color(s):
x = int(s)
r = x & 0xff
g = (x >> 8) & 0xff
b = (x >> 16) & 0xff
return Color(r, g, b)
def is_valid_field_content(s):
"""
Returns True if string s can be stored in a SubStation field.
Fields are written in CSV-like manner, thus commas and/or newlines
are not acceptable in the string.
"""
return "\n" not in s and "," not in s
def parse_tags(text, style=SSAStyle.DEFAULT_STYLE, styles={}):
"""
Split text into fragments with computed SSAStyles.
Returns list of tuples (fragment, style), where fragment is a part of text
between two brace-delimited override sequences, and style is the computed
styling of the fragment, ie. the original style modified by all override
sequences before the fragment.
Newline and non-breakable space overrides are left as-is.
Supported override tags:
- i, b, u, s
- r (with or without style name)
"""
fragments = SSAEvent.OVERRIDE_SEQUENCE.split(text)
if len(fragments) == 1:
return [(text, style)]
def apply_overrides(all_overrides):
s = style.copy()
for tag in re.findall(r"\\[ibus][10]|\\r[a-zA-Z_0-9 ]*", all_overrides):
if tag == r"\r":
s = style.copy() # reset to original line style
elif tag.startswith(r"\r"):
name = tag[2:]
if name in styles:
s = styles[name].copy() # reset to named style
else:
if "i" in tag: s.italic = "1" in tag
elif "b" in tag: s.bold = "1" in tag
elif "u" in tag: s.underline = "1" in tag
elif "s" in tag: s.strikeout = "1" in tag
return s
overrides = SSAEvent.OVERRIDE_SEQUENCE.findall(text)
overrides_prefix_sum = ["".join(overrides[:i]) for i in range(len(overrides) + 1)]
computed_styles = map(apply_overrides, overrides_prefix_sum)
return list(zip(fragments, computed_styles))
NOTICE = "Script generated by pysubs2\nhttps://pypi.python.org/pypi/pysubs2"
class SubstationFormat(FormatBase):
@classmethod
def guess_format(cls, text):
if "V4+ Styles" in text:
return "ass"
elif "V4 Styles" in text:
return "ssa"
@classmethod
def from_file(cls, subs, fp, format_, **kwargs):
def string_to_field(f, v):
if f in {"start", "end"}:
return timestamp_to_ms(TIMESTAMP.match(v).groups())
elif "color" in f:
if format_ == "ass":
return ass_rgba_to_color(v)
else:
return ssa_rgb_to_color(v)
elif f in {"bold", "underline", "italic", "strikeout"}:
return v == "-1"
elif f in {"borderstyle", "encoding", "marginl", "marginr", "marginv", "layer", "alphalevel"}:
return int(v)
elif f in {"fontsize", "scalex", "scaley", "spacing", "angle", "outline", "shadow"}:
return float(v)
elif f == "marked":
return v.endswith("1")
elif f == "alignment":
i = int(v)
if format_ == "ass":
return i
else:
return ssa_to_ass_alignment(i)
else:
return v
subs.info.clear()
subs.aegisub_project.clear()
subs.styles.clear()
inside_info_section = False
inside_aegisub_section = False
for line in fp:
line = line.strip()
if SECTION_HEADING.match(line):
inside_info_section = "Info" in line
inside_aegisub_section = "Aegisub" in line
elif inside_info_section or inside_aegisub_section:
if line.startswith(";"): continue # skip comments
try:
k, v = line.split(": ", 1)
if inside_info_section:
subs.info[k] = v
elif inside_aegisub_section:
subs.aegisub_project[k] = v
except ValueError:
pass
elif line.startswith("Style:"):
_, rest = line.split(": ", 1)
buf = rest.strip().split(",")
name, raw_fields = buf[0], buf[1:] # splat workaround for Python 2.7
field_dict = {f: string_to_field(f, v) for f, v in zip(STYLE_FIELDS[format_], raw_fields)}
sty = SSAStyle(**field_dict)
subs.styles[name] = sty
elif line.startswith("Dialogue:") or line.startswith("Comment:"):
ev_type, rest = line.split(": ", 1)
raw_fields = rest.strip().split(",", len(EVENT_FIELDS[format_])-1)
field_dict = {f: string_to_field(f, v) for f, v in zip(EVENT_FIELDS[format_], raw_fields)}
field_dict["type"] = ev_type
ev = SSAEvent(**field_dict)
subs.events.append(ev)
@classmethod
def to_file(cls, subs, fp, format_, header_notice=NOTICE, **kwargs):
print("[Script Info]", file=fp)
for line in header_notice.splitlines(False):
print(";", line, file=fp)
subs.info["ScriptType"] = "v4.00+" if format_ == "ass" else "v4.00"
for k, v in subs.info.items():
print(k, v, sep=": ", file=fp)
if subs.aegisub_project:
print("\n[Aegisub Project Garbage]", file=fp)
for k, v in subs.aegisub_project.items():
print(k, v, sep=": ", file=fp)
def field_to_string(f, v, line):
if f in {"start", "end"}:
return ms_to_timestamp(v)
elif f == "marked":
return "Marked=%d" % v
elif f == "alignment" and format_ == "ssa":
return text_type(ass_to_ssa_alignment(v))
elif isinstance(v, bool):
return "-1" if v else "0"
elif isinstance(v, (text_type, Number)):
return text_type(v)
elif not PY3 and isinstance(v, binary_string_type):
# A convenience feature, see issue #12 - accept non-unicode strings
# when they are ASCII; this is useful in Python 2, especially for non-text
# fields like style names, where requiring Unicode type seems too stringent
if all(ord(c) < 128 for c in v):
return text_type(v)
else:
raise TypeError("Encountered binary string with non-ASCII codepoint in SubStation field {!r} for line {!r} - please use unicode string instead of str".format(f, line))
elif isinstance(v, Color):
if format_ == "ass":
return color_to_ass_rgba(v)
else:
return color_to_ssa_rgb(v)
else:
raise TypeError("Unexpected type when writing a SubStation field {!r} for line {!r}".format(f, line))
print("\n[V4+ Styles]" if format_ == "ass" else "\n[V4 Styles]", file=fp)
print(STYLE_FORMAT_LINE[format_], file=fp)
for name, sty in subs.styles.items():
fields = [field_to_string(f, getattr(sty, f), sty) for f in STYLE_FIELDS[format_]]
print("Style: %s" % name, *fields, sep=",", file=fp)
print("\n[Events]", file=fp)
print(EVENT_FORMAT_LINE[format_], file=fp)
for ev in subs.events:
fields = [field_to_string(f, getattr(ev, f), ev) for f in EVENT_FIELDS[format_]]
print(ev.type, end=": ", file=fp)
print(*fields, sep=",", file=fp)
|
tkarabela/pysubs2 | pysubs2/ssaevent.py | SSAEvent.plaintext | python | def plaintext(self):
text = self.text
text = self.OVERRIDE_SEQUENCE.sub("", text)
text = text.replace(r"\h", " ")
text = text.replace(r"\n", "\n")
text = text.replace(r"\N", "\n")
return text | Subtitle text as multi-line string with no tags (read/write property).
Writing to this property replaces :attr:`SSAEvent.text` with given plain
text. Newlines are converted to ``\\N`` tags. | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssaevent.py#L87-L99 | null | class SSAEvent(object):
"""
A SubStation Event, ie. one subtitle.
In SubStation, each subtitle consists of multiple "fields" like Start, End and Text.
These are exposed as attributes (note that they are lowercase; see :attr:`SSAEvent.FIELDS` for a list).
Additionaly, there are some convenience properties like :attr:`SSAEvent.plaintext` or :attr:`SSAEvent.duration`.
This class defines an ordering with respect to (start, end) timestamps.
.. tip :: Use :func:`pysubs2.make_time()` to get times in milliseconds.
Example::
>>> ev = SSAEvent(start=make_time(s=1), end=make_time(s=2.5), text="Hello World!")
"""
OVERRIDE_SEQUENCE = re.compile(r"{[^}]*}")
#: All fields in SSAEvent.
FIELDS = frozenset([
"start", "end", "text", "marked", "layer", "style",
"name", "marginl", "marginr", "marginv", "effect", "type"
])
def __init__(self, **fields):
self.start = 0 #: Subtitle start time (in milliseconds)
self.end = 10000 #: Subtitle end time (in milliseconds)
self.text = "" #: Text of subtitle (with SubStation override tags)
self.marked = False #: (SSA only)
self.layer = 0 #: Layer number, 0 is the lowest layer (ASS only)
self.style = "Default" #: Style name
self.name = "" #: Actor name
self.marginl = 0 #: Left margin
self.marginr = 0 #: Right margin
self.marginv = 0 #: Vertical margin
self.effect = "" #: Line effect
self.type = "Dialogue" #: Line type (Dialogue/Comment)
for k, v in fields.items():
if k in self.FIELDS:
setattr(self, k, v)
else:
raise ValueError("SSAEvent has no field named %r" % k)
@property
def duration(self):
"""
Subtitle duration in milliseconds (read/write property).
Writing to this property adjusts :attr:`SSAEvent.end`.
Setting negative durations raises :exc:`ValueError`.
"""
return self.end - self.start
@duration.setter
def duration(self, ms):
if ms >= 0:
self.end = self.start + ms
else:
raise ValueError("Subtitle duration cannot be negative")
@property
def is_comment(self):
"""
When true, the subtitle is a comment, ie. not visible (read/write property).
Setting this property is equivalent to changing
:attr:`SSAEvent.type` to ``"Dialogue"`` or ``"Comment"``.
"""
return self.type == "Comment"
@is_comment.setter
def is_comment(self, value):
if value:
self.type = "Comment"
else:
self.type = "Dialogue"
@property
@plaintext.setter
def plaintext(self, text):
self.text = text.replace("\n", r"\N")
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift start and end times.
See :meth:`SSAFile.shift()` for full description.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
self.start += delta
self.end += delta
def copy(self):
"""Return a copy of the SSAEvent."""
return SSAEvent(**self.as_dict())
def as_dict(self):
return {field: getattr(self, field) for field in self.FIELDS}
def equals(self, other):
"""Field-based equality for SSAEvents."""
if isinstance(other, SSAEvent):
return self.as_dict() == other.as_dict()
else:
raise TypeError("Cannot compare to non-SSAEvent object")
def __eq__(self, other):
# XXX document this
return self.start == other.start and self.end == other.end
def __ne__(self, other):
return self.start != other.start or self.end != other.end
def __lt__(self, other):
return (self.start, self.end) < (other.start, other.end)
def __le__(self, other):
return (self.start, self.end) <= (other.start, other.end)
def __gt__(self, other):
return (self.start, self.end) > (other.start, other.end)
def __ge__(self, other):
return (self.start, self.end) >= (other.start, other.end)
def __repr__(self):
s = "<SSAEvent type={self.type} start={start} end={end} text='{self.text}'>".format(
self=self, start=ms_to_str(self.start), end=ms_to_str(self.end))
if not PY3: s = s.encode("utf-8")
return s
|
tkarabela/pysubs2 | pysubs2/ssaevent.py | SSAEvent.shift | python | def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
self.start += delta
self.end += delta | Shift start and end times.
See :meth:`SSAFile.shift()` for full description. | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssaevent.py#L105-L114 | [
"def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None):\n \"\"\"\n Convert time to milliseconds.\n\n See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,\n :func:`pysubs2.time.frames_to_ms()` is called instead.\n\n Raises:\n ValueError: Invalid fps, or one of fra... | class SSAEvent(object):
"""
A SubStation Event, ie. one subtitle.
In SubStation, each subtitle consists of multiple "fields" like Start, End and Text.
These are exposed as attributes (note that they are lowercase; see :attr:`SSAEvent.FIELDS` for a list).
Additionaly, there are some convenience properties like :attr:`SSAEvent.plaintext` or :attr:`SSAEvent.duration`.
This class defines an ordering with respect to (start, end) timestamps.
.. tip :: Use :func:`pysubs2.make_time()` to get times in milliseconds.
Example::
>>> ev = SSAEvent(start=make_time(s=1), end=make_time(s=2.5), text="Hello World!")
"""
OVERRIDE_SEQUENCE = re.compile(r"{[^}]*}")
#: All fields in SSAEvent.
FIELDS = frozenset([
"start", "end", "text", "marked", "layer", "style",
"name", "marginl", "marginr", "marginv", "effect", "type"
])
def __init__(self, **fields):
self.start = 0 #: Subtitle start time (in milliseconds)
self.end = 10000 #: Subtitle end time (in milliseconds)
self.text = "" #: Text of subtitle (with SubStation override tags)
self.marked = False #: (SSA only)
self.layer = 0 #: Layer number, 0 is the lowest layer (ASS only)
self.style = "Default" #: Style name
self.name = "" #: Actor name
self.marginl = 0 #: Left margin
self.marginr = 0 #: Right margin
self.marginv = 0 #: Vertical margin
self.effect = "" #: Line effect
self.type = "Dialogue" #: Line type (Dialogue/Comment)
for k, v in fields.items():
if k in self.FIELDS:
setattr(self, k, v)
else:
raise ValueError("SSAEvent has no field named %r" % k)
@property
def duration(self):
"""
Subtitle duration in milliseconds (read/write property).
Writing to this property adjusts :attr:`SSAEvent.end`.
Setting negative durations raises :exc:`ValueError`.
"""
return self.end - self.start
@duration.setter
def duration(self, ms):
if ms >= 0:
self.end = self.start + ms
else:
raise ValueError("Subtitle duration cannot be negative")
@property
def is_comment(self):
"""
When true, the subtitle is a comment, ie. not visible (read/write property).
Setting this property is equivalent to changing
:attr:`SSAEvent.type` to ``"Dialogue"`` or ``"Comment"``.
"""
return self.type == "Comment"
@is_comment.setter
def is_comment(self, value):
if value:
self.type = "Comment"
else:
self.type = "Dialogue"
@property
def plaintext(self):
"""
Subtitle text as multi-line string with no tags (read/write property).
Writing to this property replaces :attr:`SSAEvent.text` with given plain
text. Newlines are converted to ``\\N`` tags.
"""
text = self.text
text = self.OVERRIDE_SEQUENCE.sub("", text)
text = text.replace(r"\h", " ")
text = text.replace(r"\n", "\n")
text = text.replace(r"\N", "\n")
return text
@plaintext.setter
def plaintext(self, text):
self.text = text.replace("\n", r"\N")
def copy(self):
"""Return a copy of the SSAEvent."""
return SSAEvent(**self.as_dict())
def as_dict(self):
return {field: getattr(self, field) for field in self.FIELDS}
def equals(self, other):
"""Field-based equality for SSAEvents."""
if isinstance(other, SSAEvent):
return self.as_dict() == other.as_dict()
else:
raise TypeError("Cannot compare to non-SSAEvent object")
def __eq__(self, other):
# XXX document this
return self.start == other.start and self.end == other.end
def __ne__(self, other):
return self.start != other.start or self.end != other.end
def __lt__(self, other):
return (self.start, self.end) < (other.start, other.end)
def __le__(self, other):
return (self.start, self.end) <= (other.start, other.end)
def __gt__(self, other):
return (self.start, self.end) > (other.start, other.end)
def __ge__(self, other):
return (self.start, self.end) >= (other.start, other.end)
def __repr__(self):
s = "<SSAEvent type={self.type} start={start} end={end} text='{self.text}'>".format(
self=self, start=ms_to_str(self.start), end=ms_to_str(self.end))
if not PY3: s = s.encode("utf-8")
return s
|
tkarabela/pysubs2 | pysubs2/ssaevent.py | SSAEvent.equals | python | def equals(self, other):
if isinstance(other, SSAEvent):
return self.as_dict() == other.as_dict()
else:
raise TypeError("Cannot compare to non-SSAEvent object") | Field-based equality for SSAEvents. | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssaevent.py#L123-L128 | [
"def as_dict(self):\n return {field: getattr(self, field) for field in self.FIELDS}\n"
] | class SSAEvent(object):
"""
A SubStation Event, ie. one subtitle.
In SubStation, each subtitle consists of multiple "fields" like Start, End and Text.
These are exposed as attributes (note that they are lowercase; see :attr:`SSAEvent.FIELDS` for a list).
Additionaly, there are some convenience properties like :attr:`SSAEvent.plaintext` or :attr:`SSAEvent.duration`.
This class defines an ordering with respect to (start, end) timestamps.
.. tip :: Use :func:`pysubs2.make_time()` to get times in milliseconds.
Example::
>>> ev = SSAEvent(start=make_time(s=1), end=make_time(s=2.5), text="Hello World!")
"""
OVERRIDE_SEQUENCE = re.compile(r"{[^}]*}")
#: All fields in SSAEvent.
FIELDS = frozenset([
"start", "end", "text", "marked", "layer", "style",
"name", "marginl", "marginr", "marginv", "effect", "type"
])
def __init__(self, **fields):
self.start = 0 #: Subtitle start time (in milliseconds)
self.end = 10000 #: Subtitle end time (in milliseconds)
self.text = "" #: Text of subtitle (with SubStation override tags)
self.marked = False #: (SSA only)
self.layer = 0 #: Layer number, 0 is the lowest layer (ASS only)
self.style = "Default" #: Style name
self.name = "" #: Actor name
self.marginl = 0 #: Left margin
self.marginr = 0 #: Right margin
self.marginv = 0 #: Vertical margin
self.effect = "" #: Line effect
self.type = "Dialogue" #: Line type (Dialogue/Comment)
for k, v in fields.items():
if k in self.FIELDS:
setattr(self, k, v)
else:
raise ValueError("SSAEvent has no field named %r" % k)
@property
def duration(self):
"""
Subtitle duration in milliseconds (read/write property).
Writing to this property adjusts :attr:`SSAEvent.end`.
Setting negative durations raises :exc:`ValueError`.
"""
return self.end - self.start
@duration.setter
def duration(self, ms):
if ms >= 0:
self.end = self.start + ms
else:
raise ValueError("Subtitle duration cannot be negative")
@property
def is_comment(self):
"""
When true, the subtitle is a comment, ie. not visible (read/write property).
Setting this property is equivalent to changing
:attr:`SSAEvent.type` to ``"Dialogue"`` or ``"Comment"``.
"""
return self.type == "Comment"
@is_comment.setter
def is_comment(self, value):
if value:
self.type = "Comment"
else:
self.type = "Dialogue"
@property
def plaintext(self):
"""
Subtitle text as multi-line string with no tags (read/write property).
Writing to this property replaces :attr:`SSAEvent.text` with given plain
text. Newlines are converted to ``\\N`` tags.
"""
text = self.text
text = self.OVERRIDE_SEQUENCE.sub("", text)
text = text.replace(r"\h", " ")
text = text.replace(r"\n", "\n")
text = text.replace(r"\N", "\n")
return text
@plaintext.setter
def plaintext(self, text):
self.text = text.replace("\n", r"\N")
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift start and end times.
See :meth:`SSAFile.shift()` for full description.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
self.start += delta
self.end += delta
def copy(self):
"""Return a copy of the SSAEvent."""
return SSAEvent(**self.as_dict())
def as_dict(self):
return {field: getattr(self, field) for field in self.FIELDS}
def __eq__(self, other):
# XXX document this
return self.start == other.start and self.end == other.end
def __ne__(self, other):
return self.start != other.start or self.end != other.end
def __lt__(self, other):
return (self.start, self.end) < (other.start, other.end)
def __le__(self, other):
return (self.start, self.end) <= (other.start, other.end)
def __gt__(self, other):
return (self.start, self.end) > (other.start, other.end)
def __ge__(self, other):
return (self.start, self.end) >= (other.start, other.end)
def __repr__(self):
s = "<SSAEvent type={self.type} start={start} end={end} text='{self.text}'>".format(
self=self, start=ms_to_str(self.start), end=ms_to_str(self.end))
if not PY3: s = s.encode("utf-8")
return s
|
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.load | python | def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs):
with open(path, encoding=encoding) as fp:
return cls.from_file(fp, format_, fps=fps, **kwargs) | Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976) | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L52-L92 | null | class SSAFile(MutableSequence):
"""
Subtitle file in SubStation Alpha format.
This class has a list-like interface which exposes :attr:`SSAFile.events`,
list of subtitles in the file::
subs = SSAFile.load("subtitles.srt")
for line in subs:
print(line.text)
subs.insert(0, SSAEvent(start=0, end=make_time(s=2.5), text="New first subtitle"))
del subs[0]
"""
DEFAULT_INFO = OrderedDict([
("WrapStyle", "0"),
("ScaledBorderAndShadow", "yes"),
("Collisions", "Normal")])
def __init__(self):
self.events = [] #: List of :class:`SSAEvent` instances, ie. individual subtitles.
self.styles = OrderedDict([("Default", SSAStyle.DEFAULT_STYLE.copy())]) #: Dict of :class:`SSAStyle` instances.
self.info = self.DEFAULT_INFO.copy() #: Dict with script metadata, ie. ``[Script Info]``.
self.aegisub_project = OrderedDict() #: Dict with Aegisub project, ie. ``[Aegisub Project Garbage]``.
self.fps = None #: Framerate used when reading the file, if applicable.
self.format = None #: Format of source subtitle file, if applicable, eg. ``"srt"``.
# ------------------------------------------------------------------------
# I/O methods
# ------------------------------------------------------------------------
@classmethod
@classmethod
def from_string(cls, string, format_=None, fps=None, **kwargs):
"""
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
"""
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_file(cls, fp, format_=None, fps=None, **kwargs):
"""
Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile
"""
if format_ is None:
# Autodetect subtitle format, then read again using correct parser.
# The file might be a pipe and we need to read it twice,
# so just buffer everything.
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls() # an empty subtitle file
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs
def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError
"""
if format_ is None:
ext = os.path.splitext(path)[1].lower()
format_ = get_format_identifier(ext)
with open(path, "w", encoding=encoding) as fp:
self.to_file(fp, format_, fps=fps, **kwargs)
def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue()
def to_file(self, fp, format_, fps=None, **kwargs):
"""
Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
"""
impl = get_format_class(format_)
impl.to_file(self, fp, format_, fps=fps, **kwargs)
# ------------------------------------------------------------------------
# Retiming subtitles
# ------------------------------------------------------------------------
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift all subtitles by constant time amount.
Shift may be time-based (the default) or frame-based. In the latter
case, specify both frames and fps. h, m, s, ms will be ignored.
Arguments:
h, m, s, ms: Integer or float values, may be positive or negative.
frames (int): When specified, must be an integer number of frames.
May be positive or negative. fps must be also specified.
fps (float): When specified, must be a positive number.
Raises:
ValueError: Invalid fps or missing number of frames.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
for line in self:
line.start += delta
line.end += delta
def transform_framerate(self, in_fps, out_fps):
"""
Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given.
"""
if in_fps <= 0 or out_fps <= 0:
raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps))
ratio = in_fps / out_fps
for line in self:
line.start = int(round(line.start * ratio))
line.end = int(round(line.end * ratio))
# ------------------------------------------------------------------------
# Working with styles
# ------------------------------------------------------------------------
def rename_style(self, old_name, new_name):
"""
Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken.
"""
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
# XXX also handle \r override tag
if line.style == old_name:
line.style = new_name
def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style
# ------------------------------------------------------------------------
# Helper methods
# ------------------------------------------------------------------------
def equals(self, other):
"""
Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level.
"""
if isinstance(other, SSAFile):
for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}:
sv, ov = self.info.get(key), other.info.get(key)
if sv is None:
logging.debug("%r missing in self.info", key)
return False
elif ov is None:
logging.debug("%r missing in other.info", key)
return False
elif sv != ov:
logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov)
return False
for key in set(chain(self.styles.keys(), other.styles.keys())):
sv, ov = self.styles.get(key), other.styles.get(key)
if sv is None:
logging.debug("%r missing in self.styles", key)
return False
elif ov is None:
logging.debug("%r missing in other.styles", key)
return False
elif sv != ov:
for k in sv.FIELDS:
if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict())
return False
if len(self) != len(other):
logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other))
return False
for i, (se, oe) in enumerate(zip(self.events, other.events)):
if not se.equals(oe):
for k in se.FIELDS:
if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k)
logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict())
return False
return True
else:
raise TypeError("Cannot compare to non-SSAFile object")
def __repr__(self):
if self.events:
max_time = max(ev.end for ev in self)
s = "<SSAFile with %d events and %d styles, last timestamp %s>" % \
(len(self), len(self.styles), ms_to_str(max_time))
else:
s = "<SSAFile with 0 events and %d styles>" % len(self.styles)
if not PY3: s = s.encode("utf-8")
return s
# ------------------------------------------------------------------------
# MutableSequence implementation + sort()
# ------------------------------------------------------------------------
def sort(self):
"""Sort subtitles time-wise, in-place."""
self.events.sort()
def __getitem__(self, item):
return self.events[item]
def __setitem__(self, key, value):
if isinstance(value, SSAEvent):
self.events[key] = value
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
def __delitem__(self, key):
del self.events[key]
def __len__(self):
return len(self.events)
def insert(self, index, value):
if isinstance(value, SSAEvent):
self.events.insert(index, value)
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
|
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.from_string | python | def from_string(cls, string, format_=None, fps=None, **kwargs):
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs) | Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text) | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L95-L118 | null | class SSAFile(MutableSequence):
"""
Subtitle file in SubStation Alpha format.
This class has a list-like interface which exposes :attr:`SSAFile.events`,
list of subtitles in the file::
subs = SSAFile.load("subtitles.srt")
for line in subs:
print(line.text)
subs.insert(0, SSAEvent(start=0, end=make_time(s=2.5), text="New first subtitle"))
del subs[0]
"""
DEFAULT_INFO = OrderedDict([
("WrapStyle", "0"),
("ScaledBorderAndShadow", "yes"),
("Collisions", "Normal")])
def __init__(self):
self.events = [] #: List of :class:`SSAEvent` instances, ie. individual subtitles.
self.styles = OrderedDict([("Default", SSAStyle.DEFAULT_STYLE.copy())]) #: Dict of :class:`SSAStyle` instances.
self.info = self.DEFAULT_INFO.copy() #: Dict with script metadata, ie. ``[Script Info]``.
self.aegisub_project = OrderedDict() #: Dict with Aegisub project, ie. ``[Aegisub Project Garbage]``.
self.fps = None #: Framerate used when reading the file, if applicable.
self.format = None #: Format of source subtitle file, if applicable, eg. ``"srt"``.
# ------------------------------------------------------------------------
# I/O methods
# ------------------------------------------------------------------------
@classmethod
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
"""
with open(path, encoding=encoding) as fp:
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
@classmethod
def from_file(cls, fp, format_=None, fps=None, **kwargs):
"""
Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile
"""
if format_ is None:
# Autodetect subtitle format, then read again using correct parser.
# The file might be a pipe and we need to read it twice,
# so just buffer everything.
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls() # an empty subtitle file
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs
def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError
"""
if format_ is None:
ext = os.path.splitext(path)[1].lower()
format_ = get_format_identifier(ext)
with open(path, "w", encoding=encoding) as fp:
self.to_file(fp, format_, fps=fps, **kwargs)
def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue()
def to_file(self, fp, format_, fps=None, **kwargs):
"""
Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
"""
impl = get_format_class(format_)
impl.to_file(self, fp, format_, fps=fps, **kwargs)
# ------------------------------------------------------------------------
# Retiming subtitles
# ------------------------------------------------------------------------
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift all subtitles by constant time amount.
Shift may be time-based (the default) or frame-based. In the latter
case, specify both frames and fps. h, m, s, ms will be ignored.
Arguments:
h, m, s, ms: Integer or float values, may be positive or negative.
frames (int): When specified, must be an integer number of frames.
May be positive or negative. fps must be also specified.
fps (float): When specified, must be a positive number.
Raises:
ValueError: Invalid fps or missing number of frames.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
for line in self:
line.start += delta
line.end += delta
def transform_framerate(self, in_fps, out_fps):
"""
Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given.
"""
if in_fps <= 0 or out_fps <= 0:
raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps))
ratio = in_fps / out_fps
for line in self:
line.start = int(round(line.start * ratio))
line.end = int(round(line.end * ratio))
# ------------------------------------------------------------------------
# Working with styles
# ------------------------------------------------------------------------
def rename_style(self, old_name, new_name):
"""
Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken.
"""
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
# XXX also handle \r override tag
if line.style == old_name:
line.style = new_name
def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style
# ------------------------------------------------------------------------
# Helper methods
# ------------------------------------------------------------------------
def equals(self, other):
"""
Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level.
"""
if isinstance(other, SSAFile):
for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}:
sv, ov = self.info.get(key), other.info.get(key)
if sv is None:
logging.debug("%r missing in self.info", key)
return False
elif ov is None:
logging.debug("%r missing in other.info", key)
return False
elif sv != ov:
logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov)
return False
for key in set(chain(self.styles.keys(), other.styles.keys())):
sv, ov = self.styles.get(key), other.styles.get(key)
if sv is None:
logging.debug("%r missing in self.styles", key)
return False
elif ov is None:
logging.debug("%r missing in other.styles", key)
return False
elif sv != ov:
for k in sv.FIELDS:
if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict())
return False
if len(self) != len(other):
logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other))
return False
for i, (se, oe) in enumerate(zip(self.events, other.events)):
if not se.equals(oe):
for k in se.FIELDS:
if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k)
logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict())
return False
return True
else:
raise TypeError("Cannot compare to non-SSAFile object")
def __repr__(self):
if self.events:
max_time = max(ev.end for ev in self)
s = "<SSAFile with %d events and %d styles, last timestamp %s>" % \
(len(self), len(self.styles), ms_to_str(max_time))
else:
s = "<SSAFile with 0 events and %d styles>" % len(self.styles)
if not PY3: s = s.encode("utf-8")
return s
# ------------------------------------------------------------------------
# MutableSequence implementation + sort()
# ------------------------------------------------------------------------
def sort(self):
"""Sort subtitles time-wise, in-place."""
self.events.sort()
def __getitem__(self, item):
return self.events[item]
def __setitem__(self, key, value):
if isinstance(value, SSAEvent):
self.events[key] = value
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
def __delitem__(self, key):
del self.events[key]
def __len__(self):
return len(self.events)
def insert(self, index, value):
if isinstance(value, SSAEvent):
self.events.insert(index, value)
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
|
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.from_file | python | def from_file(cls, fp, format_=None, fps=None, **kwargs):
if format_ is None:
# Autodetect subtitle format, then read again using correct parser.
# The file might be a pipe and we need to read it twice,
# so just buffer everything.
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls() # an empty subtitle file
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs | Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L121-L153 | [
"def get_format_class(format_):\n \"\"\"Format identifier -> format class (ie. subclass of FormatBase)\"\"\"\n try:\n return FORMAT_IDENTIFIER_TO_FORMAT_CLASS[format_]\n except KeyError:\n raise UnknownFormatIdentifierError(format_)\n",
"def autodetect_format(content):\n \"\"\"Return for... | class SSAFile(MutableSequence):
"""
Subtitle file in SubStation Alpha format.
This class has a list-like interface which exposes :attr:`SSAFile.events`,
list of subtitles in the file::
subs = SSAFile.load("subtitles.srt")
for line in subs:
print(line.text)
subs.insert(0, SSAEvent(start=0, end=make_time(s=2.5), text="New first subtitle"))
del subs[0]
"""
DEFAULT_INFO = OrderedDict([
("WrapStyle", "0"),
("ScaledBorderAndShadow", "yes"),
("Collisions", "Normal")])
def __init__(self):
self.events = [] #: List of :class:`SSAEvent` instances, ie. individual subtitles.
self.styles = OrderedDict([("Default", SSAStyle.DEFAULT_STYLE.copy())]) #: Dict of :class:`SSAStyle` instances.
self.info = self.DEFAULT_INFO.copy() #: Dict with script metadata, ie. ``[Script Info]``.
self.aegisub_project = OrderedDict() #: Dict with Aegisub project, ie. ``[Aegisub Project Garbage]``.
self.fps = None #: Framerate used when reading the file, if applicable.
self.format = None #: Format of source subtitle file, if applicable, eg. ``"srt"``.
# ------------------------------------------------------------------------
# I/O methods
# ------------------------------------------------------------------------
@classmethod
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
"""
with open(path, encoding=encoding) as fp:
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_string(cls, string, format_=None, fps=None, **kwargs):
"""
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
"""
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError
"""
if format_ is None:
ext = os.path.splitext(path)[1].lower()
format_ = get_format_identifier(ext)
with open(path, "w", encoding=encoding) as fp:
self.to_file(fp, format_, fps=fps, **kwargs)
def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue()
def to_file(self, fp, format_, fps=None, **kwargs):
"""
Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
"""
impl = get_format_class(format_)
impl.to_file(self, fp, format_, fps=fps, **kwargs)
# ------------------------------------------------------------------------
# Retiming subtitles
# ------------------------------------------------------------------------
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift all subtitles by constant time amount.
Shift may be time-based (the default) or frame-based. In the latter
case, specify both frames and fps. h, m, s, ms will be ignored.
Arguments:
h, m, s, ms: Integer or float values, may be positive or negative.
frames (int): When specified, must be an integer number of frames.
May be positive or negative. fps must be also specified.
fps (float): When specified, must be a positive number.
Raises:
ValueError: Invalid fps or missing number of frames.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
for line in self:
line.start += delta
line.end += delta
def transform_framerate(self, in_fps, out_fps):
"""
Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given.
"""
if in_fps <= 0 or out_fps <= 0:
raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps))
ratio = in_fps / out_fps
for line in self:
line.start = int(round(line.start * ratio))
line.end = int(round(line.end * ratio))
# ------------------------------------------------------------------------
# Working with styles
# ------------------------------------------------------------------------
def rename_style(self, old_name, new_name):
"""
Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken.
"""
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
# XXX also handle \r override tag
if line.style == old_name:
line.style = new_name
def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style
# ------------------------------------------------------------------------
# Helper methods
# ------------------------------------------------------------------------
def equals(self, other):
"""
Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level.
"""
if isinstance(other, SSAFile):
for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}:
sv, ov = self.info.get(key), other.info.get(key)
if sv is None:
logging.debug("%r missing in self.info", key)
return False
elif ov is None:
logging.debug("%r missing in other.info", key)
return False
elif sv != ov:
logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov)
return False
for key in set(chain(self.styles.keys(), other.styles.keys())):
sv, ov = self.styles.get(key), other.styles.get(key)
if sv is None:
logging.debug("%r missing in self.styles", key)
return False
elif ov is None:
logging.debug("%r missing in other.styles", key)
return False
elif sv != ov:
for k in sv.FIELDS:
if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict())
return False
if len(self) != len(other):
logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other))
return False
for i, (se, oe) in enumerate(zip(self.events, other.events)):
if not se.equals(oe):
for k in se.FIELDS:
if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k)
logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict())
return False
return True
else:
raise TypeError("Cannot compare to non-SSAFile object")
def __repr__(self):
if self.events:
max_time = max(ev.end for ev in self)
s = "<SSAFile with %d events and %d styles, last timestamp %s>" % \
(len(self), len(self.styles), ms_to_str(max_time))
else:
s = "<SSAFile with 0 events and %d styles>" % len(self.styles)
if not PY3: s = s.encode("utf-8")
return s
# ------------------------------------------------------------------------
# MutableSequence implementation + sort()
# ------------------------------------------------------------------------
def sort(self):
"""Sort subtitles time-wise, in-place."""
self.events.sort()
def __getitem__(self, item):
return self.events[item]
def __setitem__(self, key, value):
if isinstance(value, SSAEvent):
self.events[key] = value
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
def __delitem__(self, key):
del self.events[key]
def __len__(self):
return len(self.events)
def insert(self, index, value):
if isinstance(value, SSAEvent):
self.events.insert(index, value)
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
|
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.save | python | def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs):
if format_ is None:
ext = os.path.splitext(path)[1].lower()
format_ = get_format_identifier(ext)
with open(path, "w", encoding=encoding) as fp:
self.to_file(fp, format_, fps=fps, **kwargs) | Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L155-L190 | [
"def get_format_identifier(ext):\n \"\"\"File extension -> format identifier\"\"\"\n try:\n return FILE_EXTENSION_TO_FORMAT_IDENTIFIER[ext]\n except KeyError:\n raise UnknownFileExtensionError(ext)\n"
] | class SSAFile(MutableSequence):
"""
Subtitle file in SubStation Alpha format.
This class has a list-like interface which exposes :attr:`SSAFile.events`,
list of subtitles in the file::
subs = SSAFile.load("subtitles.srt")
for line in subs:
print(line.text)
subs.insert(0, SSAEvent(start=0, end=make_time(s=2.5), text="New first subtitle"))
del subs[0]
"""
DEFAULT_INFO = OrderedDict([
("WrapStyle", "0"),
("ScaledBorderAndShadow", "yes"),
("Collisions", "Normal")])
def __init__(self):
self.events = [] #: List of :class:`SSAEvent` instances, ie. individual subtitles.
self.styles = OrderedDict([("Default", SSAStyle.DEFAULT_STYLE.copy())]) #: Dict of :class:`SSAStyle` instances.
self.info = self.DEFAULT_INFO.copy() #: Dict with script metadata, ie. ``[Script Info]``.
self.aegisub_project = OrderedDict() #: Dict with Aegisub project, ie. ``[Aegisub Project Garbage]``.
self.fps = None #: Framerate used when reading the file, if applicable.
self.format = None #: Format of source subtitle file, if applicable, eg. ``"srt"``.
# ------------------------------------------------------------------------
# I/O methods
# ------------------------------------------------------------------------
@classmethod
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
"""
with open(path, encoding=encoding) as fp:
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_string(cls, string, format_=None, fps=None, **kwargs):
"""
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
"""
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_file(cls, fp, format_=None, fps=None, **kwargs):
"""
Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile
"""
if format_ is None:
# Autodetect subtitle format, then read again using correct parser.
# The file might be a pipe and we need to read it twice,
# so just buffer everything.
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls() # an empty subtitle file
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs
def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue()
def to_file(self, fp, format_, fps=None, **kwargs):
"""
Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
"""
impl = get_format_class(format_)
impl.to_file(self, fp, format_, fps=fps, **kwargs)
# ------------------------------------------------------------------------
# Retiming subtitles
# ------------------------------------------------------------------------
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift all subtitles by constant time amount.
Shift may be time-based (the default) or frame-based. In the latter
case, specify both frames and fps. h, m, s, ms will be ignored.
Arguments:
h, m, s, ms: Integer or float values, may be positive or negative.
frames (int): When specified, must be an integer number of frames.
May be positive or negative. fps must be also specified.
fps (float): When specified, must be a positive number.
Raises:
ValueError: Invalid fps or missing number of frames.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
for line in self:
line.start += delta
line.end += delta
def transform_framerate(self, in_fps, out_fps):
"""
Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given.
"""
if in_fps <= 0 or out_fps <= 0:
raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps))
ratio = in_fps / out_fps
for line in self:
line.start = int(round(line.start * ratio))
line.end = int(round(line.end * ratio))
# ------------------------------------------------------------------------
# Working with styles
# ------------------------------------------------------------------------
def rename_style(self, old_name, new_name):
"""
Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken.
"""
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
# XXX also handle \r override tag
if line.style == old_name:
line.style = new_name
def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style
# ------------------------------------------------------------------------
# Helper methods
# ------------------------------------------------------------------------
def equals(self, other):
"""
Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level.
"""
if isinstance(other, SSAFile):
for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}:
sv, ov = self.info.get(key), other.info.get(key)
if sv is None:
logging.debug("%r missing in self.info", key)
return False
elif ov is None:
logging.debug("%r missing in other.info", key)
return False
elif sv != ov:
logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov)
return False
for key in set(chain(self.styles.keys(), other.styles.keys())):
sv, ov = self.styles.get(key), other.styles.get(key)
if sv is None:
logging.debug("%r missing in self.styles", key)
return False
elif ov is None:
logging.debug("%r missing in other.styles", key)
return False
elif sv != ov:
for k in sv.FIELDS:
if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict())
return False
if len(self) != len(other):
logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other))
return False
for i, (se, oe) in enumerate(zip(self.events, other.events)):
if not se.equals(oe):
for k in se.FIELDS:
if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k)
logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict())
return False
return True
else:
raise TypeError("Cannot compare to non-SSAFile object")
def __repr__(self):
if self.events:
max_time = max(ev.end for ev in self)
s = "<SSAFile with %d events and %d styles, last timestamp %s>" % \
(len(self), len(self.styles), ms_to_str(max_time))
else:
s = "<SSAFile with 0 events and %d styles>" % len(self.styles)
if not PY3: s = s.encode("utf-8")
return s
# ------------------------------------------------------------------------
# MutableSequence implementation + sort()
# ------------------------------------------------------------------------
def sort(self):
"""Sort subtitles time-wise, in-place."""
self.events.sort()
def __getitem__(self, item):
return self.events[item]
def __setitem__(self, key, value):
if isinstance(value, SSAEvent):
self.events[key] = value
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
def __delitem__(self, key):
del self.events[key]
def __len__(self):
return len(self.events)
def insert(self, index, value):
if isinstance(value, SSAEvent):
self.events.insert(index, value)
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
|
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.to_string | python | def to_string(self, format_, fps=None, **kwargs):
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue() | Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L192-L204 | null | class SSAFile(MutableSequence):
"""
Subtitle file in SubStation Alpha format.
This class has a list-like interface which exposes :attr:`SSAFile.events`,
list of subtitles in the file::
subs = SSAFile.load("subtitles.srt")
for line in subs:
print(line.text)
subs.insert(0, SSAEvent(start=0, end=make_time(s=2.5), text="New first subtitle"))
del subs[0]
"""
DEFAULT_INFO = OrderedDict([
("WrapStyle", "0"),
("ScaledBorderAndShadow", "yes"),
("Collisions", "Normal")])
def __init__(self):
self.events = [] #: List of :class:`SSAEvent` instances, ie. individual subtitles.
self.styles = OrderedDict([("Default", SSAStyle.DEFAULT_STYLE.copy())]) #: Dict of :class:`SSAStyle` instances.
self.info = self.DEFAULT_INFO.copy() #: Dict with script metadata, ie. ``[Script Info]``.
self.aegisub_project = OrderedDict() #: Dict with Aegisub project, ie. ``[Aegisub Project Garbage]``.
self.fps = None #: Framerate used when reading the file, if applicable.
self.format = None #: Format of source subtitle file, if applicable, eg. ``"srt"``.
# ------------------------------------------------------------------------
# I/O methods
# ------------------------------------------------------------------------
@classmethod
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
"""
with open(path, encoding=encoding) as fp:
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_string(cls, string, format_=None, fps=None, **kwargs):
"""
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
"""
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_file(cls, fp, format_=None, fps=None, **kwargs):
"""
Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile
"""
if format_ is None:
# Autodetect subtitle format, then read again using correct parser.
# The file might be a pipe and we need to read it twice,
# so just buffer everything.
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls() # an empty subtitle file
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs
def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError
"""
if format_ is None:
ext = os.path.splitext(path)[1].lower()
format_ = get_format_identifier(ext)
with open(path, "w", encoding=encoding) as fp:
self.to_file(fp, format_, fps=fps, **kwargs)
def to_file(self, fp, format_, fps=None, **kwargs):
"""
Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
"""
impl = get_format_class(format_)
impl.to_file(self, fp, format_, fps=fps, **kwargs)
# ------------------------------------------------------------------------
# Retiming subtitles
# ------------------------------------------------------------------------
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift all subtitles by constant time amount.
Shift may be time-based (the default) or frame-based. In the latter
case, specify both frames and fps. h, m, s, ms will be ignored.
Arguments:
h, m, s, ms: Integer or float values, may be positive or negative.
frames (int): When specified, must be an integer number of frames.
May be positive or negative. fps must be also specified.
fps (float): When specified, must be a positive number.
Raises:
ValueError: Invalid fps or missing number of frames.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
for line in self:
line.start += delta
line.end += delta
def transform_framerate(self, in_fps, out_fps):
"""
Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given.
"""
if in_fps <= 0 or out_fps <= 0:
raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps))
ratio = in_fps / out_fps
for line in self:
line.start = int(round(line.start * ratio))
line.end = int(round(line.end * ratio))
# ------------------------------------------------------------------------
# Working with styles
# ------------------------------------------------------------------------
def rename_style(self, old_name, new_name):
"""
Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken.
"""
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
# XXX also handle \r override tag
if line.style == old_name:
line.style = new_name
def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style
# ------------------------------------------------------------------------
# Helper methods
# ------------------------------------------------------------------------
def equals(self, other):
"""
Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level.
"""
if isinstance(other, SSAFile):
for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}:
sv, ov = self.info.get(key), other.info.get(key)
if sv is None:
logging.debug("%r missing in self.info", key)
return False
elif ov is None:
logging.debug("%r missing in other.info", key)
return False
elif sv != ov:
logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov)
return False
for key in set(chain(self.styles.keys(), other.styles.keys())):
sv, ov = self.styles.get(key), other.styles.get(key)
if sv is None:
logging.debug("%r missing in self.styles", key)
return False
elif ov is None:
logging.debug("%r missing in other.styles", key)
return False
elif sv != ov:
for k in sv.FIELDS:
if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict())
return False
if len(self) != len(other):
logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other))
return False
for i, (se, oe) in enumerate(zip(self.events, other.events)):
if not se.equals(oe):
for k in se.FIELDS:
if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k)
logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict())
return False
return True
else:
raise TypeError("Cannot compare to non-SSAFile object")
def __repr__(self):
if self.events:
max_time = max(ev.end for ev in self)
s = "<SSAFile with %d events and %d styles, last timestamp %s>" % \
(len(self), len(self.styles), ms_to_str(max_time))
else:
s = "<SSAFile with 0 events and %d styles>" % len(self.styles)
if not PY3: s = s.encode("utf-8")
return s
# ------------------------------------------------------------------------
# MutableSequence implementation + sort()
# ------------------------------------------------------------------------
def sort(self):
"""Sort subtitles time-wise, in-place."""
self.events.sort()
def __getitem__(self, item):
return self.events[item]
def __setitem__(self, key, value):
if isinstance(value, SSAEvent):
self.events[key] = value
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
def __delitem__(self, key):
del self.events[key]
def __len__(self):
return len(self.events)
def insert(self, index, value):
if isinstance(value, SSAEvent):
self.events.insert(index, value)
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
|
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.to_file | python | def to_file(self, fp, format_, fps=None, **kwargs):
impl = get_format_class(format_)
impl.to_file(self, fp, format_, fps=fps, **kwargs) | Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary). | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L206-L222 | [
"def get_format_class(format_):\n \"\"\"Format identifier -> format class (ie. subclass of FormatBase)\"\"\"\n try:\n return FORMAT_IDENTIFIER_TO_FORMAT_CLASS[format_]\n except KeyError:\n raise UnknownFormatIdentifierError(format_)\n"
] | class SSAFile(MutableSequence):
"""
Subtitle file in SubStation Alpha format.
This class has a list-like interface which exposes :attr:`SSAFile.events`,
list of subtitles in the file::
subs = SSAFile.load("subtitles.srt")
for line in subs:
print(line.text)
subs.insert(0, SSAEvent(start=0, end=make_time(s=2.5), text="New first subtitle"))
del subs[0]
"""
DEFAULT_INFO = OrderedDict([
("WrapStyle", "0"),
("ScaledBorderAndShadow", "yes"),
("Collisions", "Normal")])
def __init__(self):
self.events = [] #: List of :class:`SSAEvent` instances, ie. individual subtitles.
self.styles = OrderedDict([("Default", SSAStyle.DEFAULT_STYLE.copy())]) #: Dict of :class:`SSAStyle` instances.
self.info = self.DEFAULT_INFO.copy() #: Dict with script metadata, ie. ``[Script Info]``.
self.aegisub_project = OrderedDict() #: Dict with Aegisub project, ie. ``[Aegisub Project Garbage]``.
self.fps = None #: Framerate used when reading the file, if applicable.
self.format = None #: Format of source subtitle file, if applicable, eg. ``"srt"``.
# ------------------------------------------------------------------------
# I/O methods
# ------------------------------------------------------------------------
@classmethod
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
"""
with open(path, encoding=encoding) as fp:
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_string(cls, string, format_=None, fps=None, **kwargs):
"""
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
"""
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_file(cls, fp, format_=None, fps=None, **kwargs):
"""
Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile
"""
if format_ is None:
# Autodetect subtitle format, then read again using correct parser.
# The file might be a pipe and we need to read it twice,
# so just buffer everything.
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls() # an empty subtitle file
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs
def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError
"""
if format_ is None:
ext = os.path.splitext(path)[1].lower()
format_ = get_format_identifier(ext)
with open(path, "w", encoding=encoding) as fp:
self.to_file(fp, format_, fps=fps, **kwargs)
def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue()
# ------------------------------------------------------------------------
# Retiming subtitles
# ------------------------------------------------------------------------
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift all subtitles by constant time amount.
Shift may be time-based (the default) or frame-based. In the latter
case, specify both frames and fps. h, m, s, ms will be ignored.
Arguments:
h, m, s, ms: Integer or float values, may be positive or negative.
frames (int): When specified, must be an integer number of frames.
May be positive or negative. fps must be also specified.
fps (float): When specified, must be a positive number.
Raises:
ValueError: Invalid fps or missing number of frames.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
for line in self:
line.start += delta
line.end += delta
def transform_framerate(self, in_fps, out_fps):
"""
Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given.
"""
if in_fps <= 0 or out_fps <= 0:
raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps))
ratio = in_fps / out_fps
for line in self:
line.start = int(round(line.start * ratio))
line.end = int(round(line.end * ratio))
# ------------------------------------------------------------------------
# Working with styles
# ------------------------------------------------------------------------
def rename_style(self, old_name, new_name):
"""
Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken.
"""
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
# XXX also handle \r override tag
if line.style == old_name:
line.style = new_name
def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style
# ------------------------------------------------------------------------
# Helper methods
# ------------------------------------------------------------------------
def equals(self, other):
"""
Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level.
"""
if isinstance(other, SSAFile):
for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}:
sv, ov = self.info.get(key), other.info.get(key)
if sv is None:
logging.debug("%r missing in self.info", key)
return False
elif ov is None:
logging.debug("%r missing in other.info", key)
return False
elif sv != ov:
logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov)
return False
for key in set(chain(self.styles.keys(), other.styles.keys())):
sv, ov = self.styles.get(key), other.styles.get(key)
if sv is None:
logging.debug("%r missing in self.styles", key)
return False
elif ov is None:
logging.debug("%r missing in other.styles", key)
return False
elif sv != ov:
for k in sv.FIELDS:
if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict())
return False
if len(self) != len(other):
logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other))
return False
for i, (se, oe) in enumerate(zip(self.events, other.events)):
if not se.equals(oe):
for k in se.FIELDS:
if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k)
logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict())
return False
return True
else:
raise TypeError("Cannot compare to non-SSAFile object")
def __repr__(self):
if self.events:
max_time = max(ev.end for ev in self)
s = "<SSAFile with %d events and %d styles, last timestamp %s>" % \
(len(self), len(self.styles), ms_to_str(max_time))
else:
s = "<SSAFile with 0 events and %d styles>" % len(self.styles)
if not PY3: s = s.encode("utf-8")
return s
# ------------------------------------------------------------------------
# MutableSequence implementation + sort()
# ------------------------------------------------------------------------
def sort(self):
"""Sort subtitles time-wise, in-place."""
self.events.sort()
def __getitem__(self, item):
return self.events[item]
def __setitem__(self, key, value):
if isinstance(value, SSAEvent):
self.events[key] = value
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
def __delitem__(self, key):
del self.events[key]
def __len__(self):
return len(self.events)
def insert(self, index, value):
if isinstance(value, SSAEvent):
self.events.insert(index, value)
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
|
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.transform_framerate | python | def transform_framerate(self, in_fps, out_fps):
if in_fps <= 0 or out_fps <= 0:
raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps))
ratio = in_fps / out_fps
for line in self:
line.start = int(round(line.start * ratio))
line.end = int(round(line.end * ratio)) | Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given. | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L250-L271 | null | class SSAFile(MutableSequence):
"""
Subtitle file in SubStation Alpha format.
This class has a list-like interface which exposes :attr:`SSAFile.events`,
list of subtitles in the file::
subs = SSAFile.load("subtitles.srt")
for line in subs:
print(line.text)
subs.insert(0, SSAEvent(start=0, end=make_time(s=2.5), text="New first subtitle"))
del subs[0]
"""
DEFAULT_INFO = OrderedDict([
("WrapStyle", "0"),
("ScaledBorderAndShadow", "yes"),
("Collisions", "Normal")])
def __init__(self):
self.events = [] #: List of :class:`SSAEvent` instances, ie. individual subtitles.
self.styles = OrderedDict([("Default", SSAStyle.DEFAULT_STYLE.copy())]) #: Dict of :class:`SSAStyle` instances.
self.info = self.DEFAULT_INFO.copy() #: Dict with script metadata, ie. ``[Script Info]``.
self.aegisub_project = OrderedDict() #: Dict with Aegisub project, ie. ``[Aegisub Project Garbage]``.
self.fps = None #: Framerate used when reading the file, if applicable.
self.format = None #: Format of source subtitle file, if applicable, eg. ``"srt"``.
# ------------------------------------------------------------------------
# I/O methods
# ------------------------------------------------------------------------
@classmethod
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
"""
with open(path, encoding=encoding) as fp:
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_string(cls, string, format_=None, fps=None, **kwargs):
"""
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
"""
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_file(cls, fp, format_=None, fps=None, **kwargs):
"""
Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile
"""
if format_ is None:
# Autodetect subtitle format, then read again using correct parser.
# The file might be a pipe and we need to read it twice,
# so just buffer everything.
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls() # an empty subtitle file
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs
def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError
"""
if format_ is None:
ext = os.path.splitext(path)[1].lower()
format_ = get_format_identifier(ext)
with open(path, "w", encoding=encoding) as fp:
self.to_file(fp, format_, fps=fps, **kwargs)
def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue()
def to_file(self, fp, format_, fps=None, **kwargs):
"""
Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
"""
impl = get_format_class(format_)
impl.to_file(self, fp, format_, fps=fps, **kwargs)
# ------------------------------------------------------------------------
# Retiming subtitles
# ------------------------------------------------------------------------
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift all subtitles by constant time amount.
Shift may be time-based (the default) or frame-based. In the latter
case, specify both frames and fps. h, m, s, ms will be ignored.
Arguments:
h, m, s, ms: Integer or float values, may be positive or negative.
frames (int): When specified, must be an integer number of frames.
May be positive or negative. fps must be also specified.
fps (float): When specified, must be a positive number.
Raises:
ValueError: Invalid fps or missing number of frames.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
for line in self:
line.start += delta
line.end += delta
# ------------------------------------------------------------------------
# Working with styles
# ------------------------------------------------------------------------
def rename_style(self, old_name, new_name):
"""
Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken.
"""
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
# XXX also handle \r override tag
if line.style == old_name:
line.style = new_name
def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style
# ------------------------------------------------------------------------
# Helper methods
# ------------------------------------------------------------------------
def equals(self, other):
"""
Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level.
"""
if isinstance(other, SSAFile):
for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}:
sv, ov = self.info.get(key), other.info.get(key)
if sv is None:
logging.debug("%r missing in self.info", key)
return False
elif ov is None:
logging.debug("%r missing in other.info", key)
return False
elif sv != ov:
logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov)
return False
for key in set(chain(self.styles.keys(), other.styles.keys())):
sv, ov = self.styles.get(key), other.styles.get(key)
if sv is None:
logging.debug("%r missing in self.styles", key)
return False
elif ov is None:
logging.debug("%r missing in other.styles", key)
return False
elif sv != ov:
for k in sv.FIELDS:
if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict())
return False
if len(self) != len(other):
logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other))
return False
for i, (se, oe) in enumerate(zip(self.events, other.events)):
if not se.equals(oe):
for k in se.FIELDS:
if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k)
logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict())
return False
return True
else:
raise TypeError("Cannot compare to non-SSAFile object")
def __repr__(self):
if self.events:
max_time = max(ev.end for ev in self)
s = "<SSAFile with %d events and %d styles, last timestamp %s>" % \
(len(self), len(self.styles), ms_to_str(max_time))
else:
s = "<SSAFile with 0 events and %d styles>" % len(self.styles)
if not PY3: s = s.encode("utf-8")
return s
# ------------------------------------------------------------------------
# MutableSequence implementation + sort()
# ------------------------------------------------------------------------
def sort(self):
"""Sort subtitles time-wise, in-place."""
self.events.sort()
def __getitem__(self, item):
return self.events[item]
def __setitem__(self, key, value):
if isinstance(value, SSAEvent):
self.events[key] = value
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
def __delitem__(self, key):
del self.events[key]
def __len__(self):
return len(self.events)
def insert(self, index, value):
if isinstance(value, SSAEvent):
self.events.insert(index, value)
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
|
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.rename_style | python | def rename_style(self, old_name, new_name):
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
# XXX also handle \r override tag
if line.style == old_name:
line.style = new_name | Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken. | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L277-L304 | [
"def is_valid_field_content(s):\n \"\"\"\n Returns True if string s can be stored in a SubStation field.\n\n Fields are written in CSV-like manner, thus commas and/or newlines\n are not acceptable in the string.\n\n \"\"\"\n return \"\\n\" not in s and \",\" not in s\n"
] | class SSAFile(MutableSequence):
"""
Subtitle file in SubStation Alpha format.
This class has a list-like interface which exposes :attr:`SSAFile.events`,
list of subtitles in the file::
subs = SSAFile.load("subtitles.srt")
for line in subs:
print(line.text)
subs.insert(0, SSAEvent(start=0, end=make_time(s=2.5), text="New first subtitle"))
del subs[0]
"""
DEFAULT_INFO = OrderedDict([
("WrapStyle", "0"),
("ScaledBorderAndShadow", "yes"),
("Collisions", "Normal")])
def __init__(self):
self.events = [] #: List of :class:`SSAEvent` instances, ie. individual subtitles.
self.styles = OrderedDict([("Default", SSAStyle.DEFAULT_STYLE.copy())]) #: Dict of :class:`SSAStyle` instances.
self.info = self.DEFAULT_INFO.copy() #: Dict with script metadata, ie. ``[Script Info]``.
self.aegisub_project = OrderedDict() #: Dict with Aegisub project, ie. ``[Aegisub Project Garbage]``.
self.fps = None #: Framerate used when reading the file, if applicable.
self.format = None #: Format of source subtitle file, if applicable, eg. ``"srt"``.
# ------------------------------------------------------------------------
# I/O methods
# ------------------------------------------------------------------------
@classmethod
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
"""
with open(path, encoding=encoding) as fp:
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_string(cls, string, format_=None, fps=None, **kwargs):
"""
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
"""
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_file(cls, fp, format_=None, fps=None, **kwargs):
"""
Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile
"""
if format_ is None:
# Autodetect subtitle format, then read again using correct parser.
# The file might be a pipe and we need to read it twice,
# so just buffer everything.
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls() # an empty subtitle file
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs
def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError
"""
if format_ is None:
ext = os.path.splitext(path)[1].lower()
format_ = get_format_identifier(ext)
with open(path, "w", encoding=encoding) as fp:
self.to_file(fp, format_, fps=fps, **kwargs)
def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue()
def to_file(self, fp, format_, fps=None, **kwargs):
"""
Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
"""
impl = get_format_class(format_)
impl.to_file(self, fp, format_, fps=fps, **kwargs)
# ------------------------------------------------------------------------
# Retiming subtitles
# ------------------------------------------------------------------------
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift all subtitles by constant time amount.
Shift may be time-based (the default) or frame-based. In the latter
case, specify both frames and fps. h, m, s, ms will be ignored.
Arguments:
h, m, s, ms: Integer or float values, may be positive or negative.
frames (int): When specified, must be an integer number of frames.
May be positive or negative. fps must be also specified.
fps (float): When specified, must be a positive number.
Raises:
ValueError: Invalid fps or missing number of frames.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
for line in self:
line.start += delta
line.end += delta
def transform_framerate(self, in_fps, out_fps):
"""
Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given.
"""
if in_fps <= 0 or out_fps <= 0:
raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps))
ratio = in_fps / out_fps
for line in self:
line.start = int(round(line.start * ratio))
line.end = int(round(line.end * ratio))
# ------------------------------------------------------------------------
# Working with styles
# ------------------------------------------------------------------------
def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style
# ------------------------------------------------------------------------
# Helper methods
# ------------------------------------------------------------------------
def equals(self, other):
"""
Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level.
"""
if isinstance(other, SSAFile):
for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}:
sv, ov = self.info.get(key), other.info.get(key)
if sv is None:
logging.debug("%r missing in self.info", key)
return False
elif ov is None:
logging.debug("%r missing in other.info", key)
return False
elif sv != ov:
logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov)
return False
for key in set(chain(self.styles.keys(), other.styles.keys())):
sv, ov = self.styles.get(key), other.styles.get(key)
if sv is None:
logging.debug("%r missing in self.styles", key)
return False
elif ov is None:
logging.debug("%r missing in other.styles", key)
return False
elif sv != ov:
for k in sv.FIELDS:
if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict())
return False
if len(self) != len(other):
logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other))
return False
for i, (se, oe) in enumerate(zip(self.events, other.events)):
if not se.equals(oe):
for k in se.FIELDS:
if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k)
logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict())
return False
return True
else:
raise TypeError("Cannot compare to non-SSAFile object")
def __repr__(self):
if self.events:
max_time = max(ev.end for ev in self)
s = "<SSAFile with %d events and %d styles, last timestamp %s>" % \
(len(self), len(self.styles), ms_to_str(max_time))
else:
s = "<SSAFile with 0 events and %d styles>" % len(self.styles)
if not PY3: s = s.encode("utf-8")
return s
# ------------------------------------------------------------------------
# MutableSequence implementation + sort()
# ------------------------------------------------------------------------
def sort(self):
"""Sort subtitles time-wise, in-place."""
self.events.sort()
def __getitem__(self, item):
return self.events[item]
def __setitem__(self, key, value):
if isinstance(value, SSAEvent):
self.events[key] = value
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
def __delitem__(self, key):
del self.events[key]
def __len__(self):
return len(self.events)
def insert(self, index, value):
if isinstance(value, SSAEvent):
self.events.insert(index, value)
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
|
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.import_styles | python | def import_styles(self, subs, overwrite=True):
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style | Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True). | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L306-L321 | null | class SSAFile(MutableSequence):
"""
Subtitle file in SubStation Alpha format.
This class has a list-like interface which exposes :attr:`SSAFile.events`,
list of subtitles in the file::
subs = SSAFile.load("subtitles.srt")
for line in subs:
print(line.text)
subs.insert(0, SSAEvent(start=0, end=make_time(s=2.5), text="New first subtitle"))
del subs[0]
"""
DEFAULT_INFO = OrderedDict([
("WrapStyle", "0"),
("ScaledBorderAndShadow", "yes"),
("Collisions", "Normal")])
def __init__(self):
self.events = [] #: List of :class:`SSAEvent` instances, ie. individual subtitles.
self.styles = OrderedDict([("Default", SSAStyle.DEFAULT_STYLE.copy())]) #: Dict of :class:`SSAStyle` instances.
self.info = self.DEFAULT_INFO.copy() #: Dict with script metadata, ie. ``[Script Info]``.
self.aegisub_project = OrderedDict() #: Dict with Aegisub project, ie. ``[Aegisub Project Garbage]``.
self.fps = None #: Framerate used when reading the file, if applicable.
self.format = None #: Format of source subtitle file, if applicable, eg. ``"srt"``.
# ------------------------------------------------------------------------
# I/O methods
# ------------------------------------------------------------------------
@classmethod
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
"""
with open(path, encoding=encoding) as fp:
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_string(cls, string, format_=None, fps=None, **kwargs):
"""
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
"""
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_file(cls, fp, format_=None, fps=None, **kwargs):
"""
Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile
"""
if format_ is None:
# Autodetect subtitle format, then read again using correct parser.
# The file might be a pipe and we need to read it twice,
# so just buffer everything.
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls() # an empty subtitle file
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs
def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError
"""
if format_ is None:
ext = os.path.splitext(path)[1].lower()
format_ = get_format_identifier(ext)
with open(path, "w", encoding=encoding) as fp:
self.to_file(fp, format_, fps=fps, **kwargs)
def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue()
def to_file(self, fp, format_, fps=None, **kwargs):
"""
Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
"""
impl = get_format_class(format_)
impl.to_file(self, fp, format_, fps=fps, **kwargs)
# ------------------------------------------------------------------------
# Retiming subtitles
# ------------------------------------------------------------------------
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift all subtitles by constant time amount.
Shift may be time-based (the default) or frame-based. In the latter
case, specify both frames and fps. h, m, s, ms will be ignored.
Arguments:
h, m, s, ms: Integer or float values, may be positive or negative.
frames (int): When specified, must be an integer number of frames.
May be positive or negative. fps must be also specified.
fps (float): When specified, must be a positive number.
Raises:
ValueError: Invalid fps or missing number of frames.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
for line in self:
line.start += delta
line.end += delta
def transform_framerate(self, in_fps, out_fps):
"""
Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given.
"""
if in_fps <= 0 or out_fps <= 0:
raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps))
ratio = in_fps / out_fps
for line in self:
line.start = int(round(line.start * ratio))
line.end = int(round(line.end * ratio))
# ------------------------------------------------------------------------
# Working with styles
# ------------------------------------------------------------------------
def rename_style(self, old_name, new_name):
"""
Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken.
"""
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
# XXX also handle \r override tag
if line.style == old_name:
line.style = new_name
# ------------------------------------------------------------------------
# Helper methods
# ------------------------------------------------------------------------
def equals(self, other):
"""
Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level.
"""
if isinstance(other, SSAFile):
for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}:
sv, ov = self.info.get(key), other.info.get(key)
if sv is None:
logging.debug("%r missing in self.info", key)
return False
elif ov is None:
logging.debug("%r missing in other.info", key)
return False
elif sv != ov:
logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov)
return False
for key in set(chain(self.styles.keys(), other.styles.keys())):
sv, ov = self.styles.get(key), other.styles.get(key)
if sv is None:
logging.debug("%r missing in self.styles", key)
return False
elif ov is None:
logging.debug("%r missing in other.styles", key)
return False
elif sv != ov:
for k in sv.FIELDS:
if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict())
return False
if len(self) != len(other):
logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other))
return False
for i, (se, oe) in enumerate(zip(self.events, other.events)):
if not se.equals(oe):
for k in se.FIELDS:
if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k)
logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict())
return False
return True
else:
raise TypeError("Cannot compare to non-SSAFile object")
def __repr__(self):
if self.events:
max_time = max(ev.end for ev in self)
s = "<SSAFile with %d events and %d styles, last timestamp %s>" % \
(len(self), len(self.styles), ms_to_str(max_time))
else:
s = "<SSAFile with 0 events and %d styles>" % len(self.styles)
if not PY3: s = s.encode("utf-8")
return s
# ------------------------------------------------------------------------
# MutableSequence implementation + sort()
# ------------------------------------------------------------------------
def sort(self):
"""Sort subtitles time-wise, in-place."""
self.events.sort()
def __getitem__(self, item):
return self.events[item]
def __setitem__(self, key, value):
if isinstance(value, SSAEvent):
self.events[key] = value
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
def __delitem__(self, key):
del self.events[key]
def __len__(self):
return len(self.events)
def insert(self, index, value):
if isinstance(value, SSAEvent):
self.events.insert(index, value)
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
|
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.equals | python | def equals(self, other):
if isinstance(other, SSAFile):
for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}:
sv, ov = self.info.get(key), other.info.get(key)
if sv is None:
logging.debug("%r missing in self.info", key)
return False
elif ov is None:
logging.debug("%r missing in other.info", key)
return False
elif sv != ov:
logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov)
return False
for key in set(chain(self.styles.keys(), other.styles.keys())):
sv, ov = self.styles.get(key), other.styles.get(key)
if sv is None:
logging.debug("%r missing in self.styles", key)
return False
elif ov is None:
logging.debug("%r missing in other.styles", key)
return False
elif sv != ov:
for k in sv.FIELDS:
if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict())
return False
if len(self) != len(other):
logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other))
return False
for i, (se, oe) in enumerate(zip(self.events, other.events)):
if not se.equals(oe):
for k in se.FIELDS:
if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k)
logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict())
return False
return True
else:
raise TypeError("Cannot compare to non-SSAFile object") | Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level. | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L327-L379 | null | class SSAFile(MutableSequence):
"""
Subtitle file in SubStation Alpha format.
This class has a list-like interface which exposes :attr:`SSAFile.events`,
list of subtitles in the file::
subs = SSAFile.load("subtitles.srt")
for line in subs:
print(line.text)
subs.insert(0, SSAEvent(start=0, end=make_time(s=2.5), text="New first subtitle"))
del subs[0]
"""
DEFAULT_INFO = OrderedDict([
("WrapStyle", "0"),
("ScaledBorderAndShadow", "yes"),
("Collisions", "Normal")])
def __init__(self):
self.events = [] #: List of :class:`SSAEvent` instances, ie. individual subtitles.
self.styles = OrderedDict([("Default", SSAStyle.DEFAULT_STYLE.copy())]) #: Dict of :class:`SSAStyle` instances.
self.info = self.DEFAULT_INFO.copy() #: Dict with script metadata, ie. ``[Script Info]``.
self.aegisub_project = OrderedDict() #: Dict with Aegisub project, ie. ``[Aegisub Project Garbage]``.
self.fps = None #: Framerate used when reading the file, if applicable.
self.format = None #: Format of source subtitle file, if applicable, eg. ``"srt"``.
# ------------------------------------------------------------------------
# I/O methods
# ------------------------------------------------------------------------
@classmethod
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
"""
with open(path, encoding=encoding) as fp:
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_string(cls, string, format_=None, fps=None, **kwargs):
"""
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
"""
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs)
@classmethod
def from_file(cls, fp, format_=None, fps=None, **kwargs):
"""
Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile
"""
if format_ is None:
# Autodetect subtitle format, then read again using correct parser.
# The file might be a pipe and we need to read it twice,
# so just buffer everything.
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls() # an empty subtitle file
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs
def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError
"""
if format_ is None:
ext = os.path.splitext(path)[1].lower()
format_ = get_format_identifier(ext)
with open(path, "w", encoding=encoding) as fp:
self.to_file(fp, format_, fps=fps, **kwargs)
def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue()
def to_file(self, fp, format_, fps=None, **kwargs):
"""
Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
"""
impl = get_format_class(format_)
impl.to_file(self, fp, format_, fps=fps, **kwargs)
# ------------------------------------------------------------------------
# Retiming subtitles
# ------------------------------------------------------------------------
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift all subtitles by constant time amount.
Shift may be time-based (the default) or frame-based. In the latter
case, specify both frames and fps. h, m, s, ms will be ignored.
Arguments:
h, m, s, ms: Integer or float values, may be positive or negative.
frames (int): When specified, must be an integer number of frames.
May be positive or negative. fps must be also specified.
fps (float): When specified, must be a positive number.
Raises:
ValueError: Invalid fps or missing number of frames.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
for line in self:
line.start += delta
line.end += delta
def transform_framerate(self, in_fps, out_fps):
"""
Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given.
"""
if in_fps <= 0 or out_fps <= 0:
raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps))
ratio = in_fps / out_fps
for line in self:
line.start = int(round(line.start * ratio))
line.end = int(round(line.end * ratio))
# ------------------------------------------------------------------------
# Working with styles
# ------------------------------------------------------------------------
def rename_style(self, old_name, new_name):
"""
Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken.
"""
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
# XXX also handle \r override tag
if line.style == old_name:
line.style = new_name
def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style
# ------------------------------------------------------------------------
# Helper methods
# ------------------------------------------------------------------------
def __repr__(self):
if self.events:
max_time = max(ev.end for ev in self)
s = "<SSAFile with %d events and %d styles, last timestamp %s>" % \
(len(self), len(self.styles), ms_to_str(max_time))
else:
s = "<SSAFile with 0 events and %d styles>" % len(self.styles)
if not PY3: s = s.encode("utf-8")
return s
# ------------------------------------------------------------------------
# MutableSequence implementation + sort()
# ------------------------------------------------------------------------
def sort(self):
"""Sort subtitles time-wise, in-place."""
self.events.sort()
def __getitem__(self, item):
return self.events[item]
def __setitem__(self, key, value):
if isinstance(value, SSAEvent):
self.events[key] = value
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
def __delitem__(self, key):
del self.events[key]
def __len__(self):
return len(self.events)
def insert(self, index, value):
if isinstance(value, SSAEvent):
self.events.insert(index, value)
else:
raise TypeError("SSAFile.events must contain only SSAEvent objects")
|
tkarabela/pysubs2 | pysubs2/formats.py | get_file_extension | python | def get_file_extension(format_):
if format_ not in FORMAT_IDENTIFIER_TO_FORMAT_CLASS:
raise UnknownFormatIdentifierError(format_)
for ext, f in FILE_EXTENSION_TO_FORMAT_IDENTIFIER.items():
if f == format_:
return ext
raise RuntimeError("No file extension for format %r" % format_) | Format identifier -> file extension | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/formats.py#L42-L51 | null | from .formatbase import FormatBase
from .microdvd import MicroDVDFormat
from .subrip import SubripFormat
from .jsonformat import JSONFormat
from .substation import SubstationFormat
from .mpl2 import MPL2Format
from .exceptions import *
#: Dict mapping file extensions to format identifiers.
FILE_EXTENSION_TO_FORMAT_IDENTIFIER = {
".srt": "srt",
".ass": "ass",
".ssa": "ssa",
".sub": "microdvd",
".json": "json",
}
#: Dict mapping format identifiers to implementations (FormatBase subclasses).
FORMAT_IDENTIFIER_TO_FORMAT_CLASS = {
"srt": SubripFormat,
"ass": SubstationFormat,
"ssa": SubstationFormat,
"microdvd": MicroDVDFormat,
"json": JSONFormat,
"mpl2": MPL2Format,
}
def get_format_class(format_):
"""Format identifier -> format class (ie. subclass of FormatBase)"""
try:
return FORMAT_IDENTIFIER_TO_FORMAT_CLASS[format_]
except KeyError:
raise UnknownFormatIdentifierError(format_)
def get_format_identifier(ext):
"""File extension -> format identifier"""
try:
return FILE_EXTENSION_TO_FORMAT_IDENTIFIER[ext]
except KeyError:
raise UnknownFileExtensionError(ext)
def autodetect_format(content):
"""Return format identifier for given fragment or raise FormatAutodetectionError."""
formats = set()
for impl in FORMAT_IDENTIFIER_TO_FORMAT_CLASS.values():
guess = impl.guess_format(content)
if guess is not None:
formats.add(guess)
if len(formats) == 1:
return formats.pop()
elif not formats:
raise FormatAutodetectionError("No suitable formats")
else:
raise FormatAutodetectionError("Multiple suitable formats (%r)" % formats)
|
tkarabela/pysubs2 | pysubs2/formats.py | autodetect_format | python | def autodetect_format(content):
formats = set()
for impl in FORMAT_IDENTIFIER_TO_FORMAT_CLASS.values():
guess = impl.guess_format(content)
if guess is not None:
formats.add(guess)
if len(formats) == 1:
return formats.pop()
elif not formats:
raise FormatAutodetectionError("No suitable formats")
else:
raise FormatAutodetectionError("Multiple suitable formats (%r)" % formats) | Return format identifier for given fragment or raise FormatAutodetectionError. | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/formats.py#L53-L66 | null | from .formatbase import FormatBase
from .microdvd import MicroDVDFormat
from .subrip import SubripFormat
from .jsonformat import JSONFormat
from .substation import SubstationFormat
from .mpl2 import MPL2Format
from .exceptions import *
#: Dict mapping file extensions to format identifiers.
FILE_EXTENSION_TO_FORMAT_IDENTIFIER = {
".srt": "srt",
".ass": "ass",
".ssa": "ssa",
".sub": "microdvd",
".json": "json",
}
#: Dict mapping format identifiers to implementations (FormatBase subclasses).
FORMAT_IDENTIFIER_TO_FORMAT_CLASS = {
"srt": SubripFormat,
"ass": SubstationFormat,
"ssa": SubstationFormat,
"microdvd": MicroDVDFormat,
"json": JSONFormat,
"mpl2": MPL2Format,
}
def get_format_class(format_):
"""Format identifier -> format class (ie. subclass of FormatBase)"""
try:
return FORMAT_IDENTIFIER_TO_FORMAT_CLASS[format_]
except KeyError:
raise UnknownFormatIdentifierError(format_)
def get_format_identifier(ext):
"""File extension -> format identifier"""
try:
return FILE_EXTENSION_TO_FORMAT_IDENTIFIER[ext]
except KeyError:
raise UnknownFileExtensionError(ext)
def get_file_extension(format_):
"""Format identifier -> file extension"""
if format_ not in FORMAT_IDENTIFIER_TO_FORMAT_CLASS:
raise UnknownFormatIdentifierError(format_)
for ext, f in FILE_EXTENSION_TO_FORMAT_IDENTIFIER.items():
if f == format_:
return ext
raise RuntimeError("No file extension for format %r" % format_)
|
mozilla/crontabber | crontabber/base.py | toposort | python | def toposort(data):
# Special case empty input.
if len(data) == 0:
return
# Copy the input so as to leave it unmodified.
data = data.copy()
# Ignore self dependencies.
for k, v in data.items():
v.discard(k)
# Find all items that don't depend on anything.
extra_items_in_deps = functools.reduce(
set.union, data.values()
) - set(data.keys())
# Add empty dependences where needed.
data.update(dict((item, set()) for item in extra_items_in_deps))
while True:
ordered = set(item for item, dep in data.items() if len(dep) == 0)
if not ordered:
break
yield ordered
data = dict(
(item, (dep - ordered))
for item, dep in data.items()
if item not in ordered
)
if len(data) != 0:
raise ValueError(
'Cyclic dependencies exist among these items: {}'
.format(', '.join(repr(x) for x in data.items()))
) | Dependencies are expressed as a dictionary whose keys are items
and whose values are a set of dependent items. Output is a list of
sets in topological order. The first set consists of items with no
dependences, each subsequent set consists of items that depend upon
items in the preceeding sets. | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/base.py#L30-L69 | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import collections
import datetime
import re
import functools
from datetimeutil import utc_now
from configman import Namespace, RequiredConfig
#==============================================================================
class FrequencyDefinitionError(Exception):
pass
#==============================================================================
class CircularDAGError(Exception):
pass
#==============================================================================
# The following two functions have been plucked from
# https://bitbucket.org/ericvsmith/toposort but modified so it works
# in Python 2.6.
def toposort_flatten(data, sort=True):
"""Returns a single list of dependencies. For any set returned by
toposort(), those items are sorted and appended to the result (just to
make the results deterministic)."""
result = []
for d in toposort(data):
result.extend((sorted if sort else list)(d))
return result
#==============================================================================
def reorder_dag(sequence,
depends_getter=lambda x: x.depends_on,
name_getter=lambda x: x.app_name,
impatience_max=100):
"""
DAG = Directed Acyclic Graph
If we have something like:
C depends on B
B depends on A
A doesn't depend on any
Given the order of [C, B, A] expect it to return [A, B, C]
parameters:
:sequence: some sort of iterable list
:depends_getter: a callable that extracts the depends on sub-list
:name_getter: a callable that extracts the name
:impatience_max: a max count that is reached before we end up in
an infinite loop.
"""
jobs = collections.defaultdict(list)
map_ = {}
_count_roots = 0
for each in sequence:
name = name_getter(each)
depends_on = depends_getter(each)
if depends_on is None:
depends_on = []
elif isinstance(depends_on, tuple):
depends_on = list(depends_on)
elif not isinstance(depends_on, list):
depends_on = [depends_on]
if not depends_on:
_count_roots += 1
jobs[name] += depends_on
map_[name] = each
if not _count_roots:
raise CircularDAGError("No job is at the root")
try:
jobs = dict(zip(jobs.keys(), map(set, jobs.values())))
ordered_jobs = list(toposort_flatten(jobs))
except ValueError, e:
raise CircularDAGError(e)
return [map_[x] for x in ordered_jobs if x in map_]
#==============================================================================
def convert_frequency(frequency):
"""return the number of seconds that a certain frequency string represents.
For example: `1d` means 1 day which means 60 * 60 * 24 seconds.
The recognized formats are:
10d : 10 days
3m : 3 minutes
12h : 12 hours
"""
number = int(re.findall('\d+', frequency)[0])
unit = re.findall('[^\d]+', frequency)[0]
if unit == 'h':
number *= 60 * 60
elif unit == 'm':
number *= 60
elif unit == 'd':
number *= 60 * 60 * 24
elif unit:
raise FrequencyDefinitionError(unit)
return number
#==============================================================================
class BaseCronApp(RequiredConfig):
"""The base class from which Socorro cron apps are based. Subclasses
should use the cron app class decorators below to add features such as
PostgreSQL connections or backfill capability."""
required_config = Namespace()
#--------------------------------------------------------------------------
def __init__(self, config, job_information):
self.config = config
self.job_information = job_information
# commented out because it doesn't work and I don't know why!
# def __repr__(self): # pragma: no cover
# return ('<%s (app_name: %r, app_version:%r)>' % (
# self.__class__,
# self.app_name,
# self.app_version))
#--------------------------------------------------------------------------
def main(self, function=None, once=True):
if not function:
function = self._run_proxy
now = utc_now()
# handle one of four possible cases
# case 1: no backfill, just run this job now
if once:
function()
yield now
return
# case 2: this could be a backfil, but we don't have any
# job information. Run it with today's date
if not self.job_information:
function(now)
yield now
return
# back fill cases:
# figure out when it was last run successfully
last_success = self.job_information.get(
'last_success',
self.job_information.get('first_run')
)
# case 3: either it has never run successfully or it was previously run
# before the 'first_run' key was added (legacy).
if not last_success:
self.config.logger.warning(
'No previous last_success information available'
)
# just run it for the time 'now'
function(now)
yield now
return
# case 4:
when = last_success
# The last_success datetime is originally based on the
# first_run. From then onwards it just adds the interval to
# it so the hour is not likely to drift from that original
# time.
# However, what can happen is that on a given day, "now" is
# LESS than the day before. This can happen because the jobs
# that are run BEFORE are variable in terms of how long it
# takes. Thus, one day, now might be "18:02" and the next day
# the it's "18:01". If this happens the original difference
# will prevent it from running the backfill again.
#
# For more info see the
# test_backfilling_with_configured_time_slow_job unit test.
if self.config.time:
# So, reset the hour/minute part to always match the
# intention.
h, m = [int(x) for x in self.config.time.split(':')]
when = when.replace(
hour=h,
minute=m,
second=0,
microsecond=0
)
seconds = convert_frequency(self.config.frequency)
interval = datetime.timedelta(seconds=seconds)
# loop over each missed interval from the time of the last success,
# forward by each interval until it reaches the time 'now'. Run the
# cron app once for each interval.
while (when + interval) < now:
when += interval
function(when)
yield when
#--------------------------------------------------------------------------
def _run_proxy(self, *args, **kwargs):
"""this is indirection to the run function. By exectuting this method
instead of the actual "run" method directly, we can use inheritance
to provide some resources to the run function via the run function's
arguments"""
return self.run(*args, **kwargs)
#--------------------------------------------------------------------------
def run(self): # pragma: no cover
# crontabber apps should define their own run functions and not rely
# on these base classes. This default base method threatens a runtime
# error
raise NotImplementedError("Your fault!")
|
mozilla/crontabber | crontabber/base.py | reorder_dag | python | def reorder_dag(sequence,
depends_getter=lambda x: x.depends_on,
name_getter=lambda x: x.app_name,
impatience_max=100):
jobs = collections.defaultdict(list)
map_ = {}
_count_roots = 0
for each in sequence:
name = name_getter(each)
depends_on = depends_getter(each)
if depends_on is None:
depends_on = []
elif isinstance(depends_on, tuple):
depends_on = list(depends_on)
elif not isinstance(depends_on, list):
depends_on = [depends_on]
if not depends_on:
_count_roots += 1
jobs[name] += depends_on
map_[name] = each
if not _count_roots:
raise CircularDAGError("No job is at the root")
try:
jobs = dict(zip(jobs.keys(), map(set, jobs.values())))
ordered_jobs = list(toposort_flatten(jobs))
except ValueError, e:
raise CircularDAGError(e)
return [map_[x] for x in ordered_jobs if x in map_] | DAG = Directed Acyclic Graph
If we have something like:
C depends on B
B depends on A
A doesn't depend on any
Given the order of [C, B, A] expect it to return [A, B, C]
parameters:
:sequence: some sort of iterable list
:depends_getter: a callable that extracts the depends on sub-list
:name_getter: a callable that extracts the name
:impatience_max: a max count that is reached before we end up in
an infinite loop. | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/base.py#L84-L135 | [
"depends_getter=lambda x: x.depends_on,\n",
"name_getter=lambda x: x.app_name,\n",
"def toposort_flatten(data, sort=True):\n \"\"\"Returns a single list of dependencies. For any set returned by\ntoposort(), those items are sorted and appended to the result (just to\nmake the results deterministic).\"\"\"\n\n... | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import collections
import datetime
import re
import functools
from datetimeutil import utc_now
from configman import Namespace, RequiredConfig
#==============================================================================
class FrequencyDefinitionError(Exception):
pass
#==============================================================================
class CircularDAGError(Exception):
pass
#==============================================================================
# The following two functions have been plucked from
# https://bitbucket.org/ericvsmith/toposort but modified so it works
# in Python 2.6.
def toposort(data):
"""Dependencies are expressed as a dictionary whose keys are items
and whose values are a set of dependent items. Output is a list of
sets in topological order. The first set consists of items with no
dependences, each subsequent set consists of items that depend upon
items in the preceeding sets.
"""
# Special case empty input.
if len(data) == 0:
return
# Copy the input so as to leave it unmodified.
data = data.copy()
# Ignore self dependencies.
for k, v in data.items():
v.discard(k)
# Find all items that don't depend on anything.
extra_items_in_deps = functools.reduce(
set.union, data.values()
) - set(data.keys())
# Add empty dependences where needed.
data.update(dict((item, set()) for item in extra_items_in_deps))
while True:
ordered = set(item for item, dep in data.items() if len(dep) == 0)
if not ordered:
break
yield ordered
data = dict(
(item, (dep - ordered))
for item, dep in data.items()
if item not in ordered
)
if len(data) != 0:
raise ValueError(
'Cyclic dependencies exist among these items: {}'
.format(', '.join(repr(x) for x in data.items()))
)
def toposort_flatten(data, sort=True):
"""Returns a single list of dependencies. For any set returned by
toposort(), those items are sorted and appended to the result (just to
make the results deterministic)."""
result = []
for d in toposort(data):
result.extend((sorted if sort else list)(d))
return result
#==============================================================================
#==============================================================================
def convert_frequency(frequency):
"""return the number of seconds that a certain frequency string represents.
For example: `1d` means 1 day which means 60 * 60 * 24 seconds.
The recognized formats are:
10d : 10 days
3m : 3 minutes
12h : 12 hours
"""
number = int(re.findall('\d+', frequency)[0])
unit = re.findall('[^\d]+', frequency)[0]
if unit == 'h':
number *= 60 * 60
elif unit == 'm':
number *= 60
elif unit == 'd':
number *= 60 * 60 * 24
elif unit:
raise FrequencyDefinitionError(unit)
return number
#==============================================================================
class BaseCronApp(RequiredConfig):
"""The base class from which Socorro cron apps are based. Subclasses
should use the cron app class decorators below to add features such as
PostgreSQL connections or backfill capability."""
required_config = Namespace()
#--------------------------------------------------------------------------
def __init__(self, config, job_information):
self.config = config
self.job_information = job_information
# commented out because it doesn't work and I don't know why!
# def __repr__(self): # pragma: no cover
# return ('<%s (app_name: %r, app_version:%r)>' % (
# self.__class__,
# self.app_name,
# self.app_version))
#--------------------------------------------------------------------------
def main(self, function=None, once=True):
if not function:
function = self._run_proxy
now = utc_now()
# handle one of four possible cases
# case 1: no backfill, just run this job now
if once:
function()
yield now
return
# case 2: this could be a backfil, but we don't have any
# job information. Run it with today's date
if not self.job_information:
function(now)
yield now
return
# back fill cases:
# figure out when it was last run successfully
last_success = self.job_information.get(
'last_success',
self.job_information.get('first_run')
)
# case 3: either it has never run successfully or it was previously run
# before the 'first_run' key was added (legacy).
if not last_success:
self.config.logger.warning(
'No previous last_success information available'
)
# just run it for the time 'now'
function(now)
yield now
return
# case 4:
when = last_success
# The last_success datetime is originally based on the
# first_run. From then onwards it just adds the interval to
# it so the hour is not likely to drift from that original
# time.
# However, what can happen is that on a given day, "now" is
# LESS than the day before. This can happen because the jobs
# that are run BEFORE are variable in terms of how long it
# takes. Thus, one day, now might be "18:02" and the next day
# the it's "18:01". If this happens the original difference
# will prevent it from running the backfill again.
#
# For more info see the
# test_backfilling_with_configured_time_slow_job unit test.
if self.config.time:
# So, reset the hour/minute part to always match the
# intention.
h, m = [int(x) for x in self.config.time.split(':')]
when = when.replace(
hour=h,
minute=m,
second=0,
microsecond=0
)
seconds = convert_frequency(self.config.frequency)
interval = datetime.timedelta(seconds=seconds)
# loop over each missed interval from the time of the last success,
# forward by each interval until it reaches the time 'now'. Run the
# cron app once for each interval.
while (when + interval) < now:
when += interval
function(when)
yield when
#--------------------------------------------------------------------------
def _run_proxy(self, *args, **kwargs):
"""this is indirection to the run function. By exectuting this method
instead of the actual "run" method directly, we can use inheritance
to provide some resources to the run function via the run function's
arguments"""
return self.run(*args, **kwargs)
#--------------------------------------------------------------------------
def run(self): # pragma: no cover
# crontabber apps should define their own run functions and not rely
# on these base classes. This default base method threatens a runtime
# error
raise NotImplementedError("Your fault!")
|
mozilla/crontabber | crontabber/base.py | convert_frequency | python | def convert_frequency(frequency):
number = int(re.findall('\d+', frequency)[0])
unit = re.findall('[^\d]+', frequency)[0]
if unit == 'h':
number *= 60 * 60
elif unit == 'm':
number *= 60
elif unit == 'd':
number *= 60 * 60 * 24
elif unit:
raise FrequencyDefinitionError(unit)
return number | return the number of seconds that a certain frequency string represents.
For example: `1d` means 1 day which means 60 * 60 * 24 seconds.
The recognized formats are:
10d : 10 days
3m : 3 minutes
12h : 12 hours | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/base.py#L139-L157 | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import collections
import datetime
import re
import functools
from datetimeutil import utc_now
from configman import Namespace, RequiredConfig
#==============================================================================
class FrequencyDefinitionError(Exception):
pass
#==============================================================================
class CircularDAGError(Exception):
pass
#==============================================================================
# The following two functions have been plucked from
# https://bitbucket.org/ericvsmith/toposort but modified so it works
# in Python 2.6.
def toposort(data):
"""Dependencies are expressed as a dictionary whose keys are items
and whose values are a set of dependent items. Output is a list of
sets in topological order. The first set consists of items with no
dependences, each subsequent set consists of items that depend upon
items in the preceeding sets.
"""
# Special case empty input.
if len(data) == 0:
return
# Copy the input so as to leave it unmodified.
data = data.copy()
# Ignore self dependencies.
for k, v in data.items():
v.discard(k)
# Find all items that don't depend on anything.
extra_items_in_deps = functools.reduce(
set.union, data.values()
) - set(data.keys())
# Add empty dependences where needed.
data.update(dict((item, set()) for item in extra_items_in_deps))
while True:
ordered = set(item for item, dep in data.items() if len(dep) == 0)
if not ordered:
break
yield ordered
data = dict(
(item, (dep - ordered))
for item, dep in data.items()
if item not in ordered
)
if len(data) != 0:
raise ValueError(
'Cyclic dependencies exist among these items: {}'
.format(', '.join(repr(x) for x in data.items()))
)
def toposort_flatten(data, sort=True):
"""Returns a single list of dependencies. For any set returned by
toposort(), those items are sorted and appended to the result (just to
make the results deterministic)."""
result = []
for d in toposort(data):
result.extend((sorted if sort else list)(d))
return result
#==============================================================================
def reorder_dag(sequence,
depends_getter=lambda x: x.depends_on,
name_getter=lambda x: x.app_name,
impatience_max=100):
"""
DAG = Directed Acyclic Graph
If we have something like:
C depends on B
B depends on A
A doesn't depend on any
Given the order of [C, B, A] expect it to return [A, B, C]
parameters:
:sequence: some sort of iterable list
:depends_getter: a callable that extracts the depends on sub-list
:name_getter: a callable that extracts the name
:impatience_max: a max count that is reached before we end up in
an infinite loop.
"""
jobs = collections.defaultdict(list)
map_ = {}
_count_roots = 0
for each in sequence:
name = name_getter(each)
depends_on = depends_getter(each)
if depends_on is None:
depends_on = []
elif isinstance(depends_on, tuple):
depends_on = list(depends_on)
elif not isinstance(depends_on, list):
depends_on = [depends_on]
if not depends_on:
_count_roots += 1
jobs[name] += depends_on
map_[name] = each
if not _count_roots:
raise CircularDAGError("No job is at the root")
try:
jobs = dict(zip(jobs.keys(), map(set, jobs.values())))
ordered_jobs = list(toposort_flatten(jobs))
except ValueError, e:
raise CircularDAGError(e)
return [map_[x] for x in ordered_jobs if x in map_]
#==============================================================================
#==============================================================================
class BaseCronApp(RequiredConfig):
"""The base class from which Socorro cron apps are based. Subclasses
should use the cron app class decorators below to add features such as
PostgreSQL connections or backfill capability."""
required_config = Namespace()
#--------------------------------------------------------------------------
def __init__(self, config, job_information):
self.config = config
self.job_information = job_information
# commented out because it doesn't work and I don't know why!
# def __repr__(self): # pragma: no cover
# return ('<%s (app_name: %r, app_version:%r)>' % (
# self.__class__,
# self.app_name,
# self.app_version))
#--------------------------------------------------------------------------
def main(self, function=None, once=True):
if not function:
function = self._run_proxy
now = utc_now()
# handle one of four possible cases
# case 1: no backfill, just run this job now
if once:
function()
yield now
return
# case 2: this could be a backfil, but we don't have any
# job information. Run it with today's date
if not self.job_information:
function(now)
yield now
return
# back fill cases:
# figure out when it was last run successfully
last_success = self.job_information.get(
'last_success',
self.job_information.get('first_run')
)
# case 3: either it has never run successfully or it was previously run
# before the 'first_run' key was added (legacy).
if not last_success:
self.config.logger.warning(
'No previous last_success information available'
)
# just run it for the time 'now'
function(now)
yield now
return
# case 4:
when = last_success
# The last_success datetime is originally based on the
# first_run. From then onwards it just adds the interval to
# it so the hour is not likely to drift from that original
# time.
# However, what can happen is that on a given day, "now" is
# LESS than the day before. This can happen because the jobs
# that are run BEFORE are variable in terms of how long it
# takes. Thus, one day, now might be "18:02" and the next day
# the it's "18:01". If this happens the original difference
# will prevent it from running the backfill again.
#
# For more info see the
# test_backfilling_with_configured_time_slow_job unit test.
if self.config.time:
# So, reset the hour/minute part to always match the
# intention.
h, m = [int(x) for x in self.config.time.split(':')]
when = when.replace(
hour=h,
minute=m,
second=0,
microsecond=0
)
seconds = convert_frequency(self.config.frequency)
interval = datetime.timedelta(seconds=seconds)
# loop over each missed interval from the time of the last success,
# forward by each interval until it reaches the time 'now'. Run the
# cron app once for each interval.
while (when + interval) < now:
when += interval
function(when)
yield when
#--------------------------------------------------------------------------
def _run_proxy(self, *args, **kwargs):
"""this is indirection to the run function. By exectuting this method
instead of the actual "run" method directly, we can use inheritance
to provide some resources to the run function via the run function's
arguments"""
return self.run(*args, **kwargs)
#--------------------------------------------------------------------------
def run(self): # pragma: no cover
# crontabber apps should define their own run functions and not rely
# on these base classes. This default base method threatens a runtime
# error
raise NotImplementedError("Your fault!")
|
mozilla/crontabber | crontabber/datetimeutil.py | timesince | python | def timesince(d, now):
def pluralize(a, b):
def inner(n):
if n == 1:
return a % n
return b % n
return inner
def ugettext(s):
return s
chunks = (
(60 * 60 * 24 * 365, pluralize('%d year', '%d years')),
(60 * 60 * 24 * 30, pluralize('%d month', '%d months')),
(60 * 60 * 24 * 7, pluralize('%d week', '%d weeks')),
(60 * 60 * 24, pluralize('%d day', '%d days')),
(60 * 60, pluralize('%d hour', '%d hours')),
(60, pluralize('%d minute', '%d minutes')),
(0, pluralize('%d second', '%d seconds'))
)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
delta = now - d
# ignore microseconds
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
# We'll use the last chunk (highest granularity)
_, name = chunks[-1]
return name(0)
for i, (seconds, name) in enumerate(chunks):
if seconds > 0:
count = since // seconds
if count != 0:
break
else:
count = since
result = name(count)
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
if seconds2 > 0:
count2 = (since - (seconds * count)) // seconds2
else:
count2 = since - (seconds * count)
if count2 != 0:
result += ugettext(', ') + name2(count2)
return result | Taken from django.utils.timesince and modified to simpler requirements.
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from
http://web.archive.org/web/20060617175230/\
http://blog.natbat.co.uk/archive/2003/Jun/14/time_since | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/datetimeutil.py#L48-L117 | [
"def pluralize(a, b):\n def inner(n):\n if n == 1:\n return a % n\n return b % n\n return inner\n"
] | import datetime
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self): # pragma: no cover
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def utc_now():
"""Return a timezone aware datetime instance in UTC timezone
This funciton is mainly for convenience. Compare:
>>> from datetimeutil import utc_now
>>> utc_now()
datetime.datetime(2012, 1, 5, 16, 42, 13, 639834,
tzinfo=<isodate.tzinfo.Utc object at 0x101475210>)
Versus:
>>> import datetime
>>> from datetimeutil import UTC
>>> datetime.datetime.now(UTC)
datetime.datetime(2012, 1, 5, 16, 42, 13, 639834,
tzinfo=<isodate.tzinfo.Utc object at 0x101475210>)
"""
return datetime.datetime.now(UTC())
|
mozilla/crontabber | crontabber/connection_factory.py | ConnectionFactory.connection | python | def connection(self, name=None):
if not name:
name = self._get_default_connection_name()
if name in self.pool:
return self.pool[name]
self.pool[name] = psycopg2.connect(self.dsn)
return self.pool[name] | return a named connection.
This function will return a named connection by either finding one
in its pool by the name or creating a new one. If no name is given,
it will use the name of the current executing thread as the name of
the connection.
parameters:
name - a name as a string | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/connection_factory.py#L94-L110 | null | class ConnectionFactory(RequiredConfig):
"""a configman compliant class that pools Postgres database connections"""
# configman parameter definition section
# here we're setting up the minimal parameters required for connecting
# to a database.
required_config = Namespace()
required_config.add_option(
name='host',
default='localhost',
doc='the hostname of the database',
reference_value_from='resource.postgresql',
)
required_config.add_option(
name='dbname',
default='',
doc='the name of the database',
reference_value_from='resource.postgresql',
)
required_config.add_option(
name='port',
default=5432,
doc='the port for the database',
reference_value_from='resource.postgresql',
)
required_config.add_option(
name='user',
default='',
doc='the name of the user within the database',
reference_value_from='resource.postgresql',
)
required_config.add_option(
name='password',
default='',
doc="the user's database password",
reference_value_from='resource.postgresql',
)
# clients of this class may need to detect Exceptions raised in the
# underlying dbapi2 database module. Rather that forcing them to import
# what should be a hidden module, we expose just the Exception. Clients
# can then just refer to it as ConnectionFactory.IntegrityError
IntegrityError = psycopg2.IntegrityError
#--------------------------------------------------------------------------
def __init__(self, config, local_config=None):
"""Initialize the parts needed to start making database connections
parameters:
config - the complete config for the app. If a real app, this
would be where a logger or other resources could be
found.
local_config - this is the namespace within the complete config
where the actual database parameters are found"""
super(ConnectionFactory, self).__init__()
self.config = config
if local_config is None:
local_config = config
self.dsn = (
"host=%(host)s "
"dbname=%(dbname)s "
"port=%(port)s "
"user=%(user)s "
"password=%(password)s"
% local_config
)
self.operational_exceptions = (
psycopg2.OperationalError,
psycopg2.InterfaceError,
socket.timeout
)
self.conditional_exceptions = (
psycopg2.ProgrammingError,
)
self.pool = {}
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
@contextlib.contextmanager
def __call__(self, name=None):
"""returns a database connection wrapped in a contextmanager.
The context manager will assure that the connection is closed but will
not try to commit or rollback lingering transactions.
parameters:
name - an optional name for the database connection"""
conn = self.connection(name)
try:
yield conn
finally:
self.close_connection(conn)
#--------------------------------------------------------------------------
def close_connection(self, connection, force=False):
"""overriding the baseclass function, this routine will decline to
close a connection at the end of a transaction context. This allows
for reuse of connections."""
if force:
try:
connection.close()
except self.operational_exceptions:
self.config.logger.error('ConnectionFactory - failed closing')
for name, conn in self.pool.iteritems():
if conn is connection:
break
del self.pool[name]
else:
pass
#--------------------------------------------------------------------------
def close(self):
"""close all pooled connections"""
for conn in self.pool.itervalues():
conn.close()
#--------------------------------------------------------------------------
def force_reconnect(self):
name = self._get_default_connection_name()
if name in self.pool:
del self.pool[name]
#--------------------------------------------------------------------------
def is_operational_exception(self, msg):
"""return True if a conditional exception is actually an operational
error. Return False if it's a genuine error that should probably be
raised and propagate up.
Some conditional exceptions might be actually be some form of
operational exception "labelled" wrong by the psycopg2 code error
handler.
"""
if msg.pgerror in ('SSL SYSCALL error: EOF detected',):
# Ideally we'd like to check against msg.pgcode values
# but certain odd ProgrammingError exceptions don't have
# pgcodes so we have to rely on reading the pgerror :(
return True
# at the of writing, the list of exceptions is short but this would be
# where you add more as you discover more odd cases of psycopg2
return False
#--------------------------------------------------------------------------
@staticmethod
def _get_default_connection_name():
return threading.current_thread().getName()
|
mozilla/crontabber | crontabber/connection_factory.py | ConnectionFactory.close_connection | python | def close_connection(self, connection, force=False):
if force:
try:
connection.close()
except self.operational_exceptions:
self.config.logger.error('ConnectionFactory - failed closing')
for name, conn in self.pool.iteritems():
if conn is connection:
break
del self.pool[name]
else:
pass | overriding the baseclass function, this routine will decline to
close a connection at the end of a transaction context. This allows
for reuse of connections. | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/connection_factory.py#L129-L143 | null | class ConnectionFactory(RequiredConfig):
"""a configman compliant class that pools Postgres database connections"""
# configman parameter definition section
# here we're setting up the minimal parameters required for connecting
# to a database.
required_config = Namespace()
required_config.add_option(
name='host',
default='localhost',
doc='the hostname of the database',
reference_value_from='resource.postgresql',
)
required_config.add_option(
name='dbname',
default='',
doc='the name of the database',
reference_value_from='resource.postgresql',
)
required_config.add_option(
name='port',
default=5432,
doc='the port for the database',
reference_value_from='resource.postgresql',
)
required_config.add_option(
name='user',
default='',
doc='the name of the user within the database',
reference_value_from='resource.postgresql',
)
required_config.add_option(
name='password',
default='',
doc="the user's database password",
reference_value_from='resource.postgresql',
)
# clients of this class may need to detect Exceptions raised in the
# underlying dbapi2 database module. Rather that forcing them to import
# what should be a hidden module, we expose just the Exception. Clients
# can then just refer to it as ConnectionFactory.IntegrityError
IntegrityError = psycopg2.IntegrityError
#--------------------------------------------------------------------------
def __init__(self, config, local_config=None):
"""Initialize the parts needed to start making database connections
parameters:
config - the complete config for the app. If a real app, this
would be where a logger or other resources could be
found.
local_config - this is the namespace within the complete config
where the actual database parameters are found"""
super(ConnectionFactory, self).__init__()
self.config = config
if local_config is None:
local_config = config
self.dsn = (
"host=%(host)s "
"dbname=%(dbname)s "
"port=%(port)s "
"user=%(user)s "
"password=%(password)s"
% local_config
)
self.operational_exceptions = (
psycopg2.OperationalError,
psycopg2.InterfaceError,
socket.timeout
)
self.conditional_exceptions = (
psycopg2.ProgrammingError,
)
self.pool = {}
#--------------------------------------------------------------------------
def connection(self, name=None):
"""return a named connection.
This function will return a named connection by either finding one
in its pool by the name or creating a new one. If no name is given,
it will use the name of the current executing thread as the name of
the connection.
parameters:
name - a name as a string
"""
if not name:
name = self._get_default_connection_name()
if name in self.pool:
return self.pool[name]
self.pool[name] = psycopg2.connect(self.dsn)
return self.pool[name]
#--------------------------------------------------------------------------
@contextlib.contextmanager
def __call__(self, name=None):
"""returns a database connection wrapped in a contextmanager.
The context manager will assure that the connection is closed but will
not try to commit or rollback lingering transactions.
parameters:
name - an optional name for the database connection"""
conn = self.connection(name)
try:
yield conn
finally:
self.close_connection(conn)
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
def close(self):
"""close all pooled connections"""
for conn in self.pool.itervalues():
conn.close()
#--------------------------------------------------------------------------
def force_reconnect(self):
name = self._get_default_connection_name()
if name in self.pool:
del self.pool[name]
#--------------------------------------------------------------------------
def is_operational_exception(self, msg):
"""return True if a conditional exception is actually an operational
error. Return False if it's a genuine error that should probably be
raised and propagate up.
Some conditional exceptions might be actually be some form of
operational exception "labelled" wrong by the psycopg2 code error
handler.
"""
if msg.pgerror in ('SSL SYSCALL error: EOF detected',):
# Ideally we'd like to check against msg.pgcode values
# but certain odd ProgrammingError exceptions don't have
# pgcodes so we have to rely on reading the pgerror :(
return True
# at the of writing, the list of exceptions is short but this would be
# where you add more as you discover more odd cases of psycopg2
return False
#--------------------------------------------------------------------------
@staticmethod
def _get_default_connection_name():
return threading.current_thread().getName()
|
mozilla/crontabber | crontabber/generic_app.py | respond_to_SIGHUP | python | def respond_to_SIGHUP(signal_number, frame, logger=None):
global restart
restart = True
if logger:
logger.info('detected SIGHUP')
raise KeyboardInterrupt | raise the KeyboardInterrupt which will cause the app to effectively
shutdown, closing all it resources. Then, because it sets 'restart' to
True, the app will reread all the configuration information, rebuild all
of its structures and resources and start running again | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/generic_app.py#L195-L204 | null | #! /usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import re
import inspect
import threading
import logging
import logging.handlers
import functools
import signal
from configman import (
ConfigurationManager,
Namespace,
RequiredConfig,
command_line,
ConfigFileFutureProxy,
class_converter
)
from configman.dotdict import DotDictWithAcquisition
environment = DotDictWithAcquisition(os.environ)
environment.always_ignore_mismatches = True
#==============================================================================
class AppDetailMissingError(AttributeError):
pass
#==============================================================================
class App(RequiredConfig):
"""The base class from which Socorro apps are based"""
#--------------------------------------------------------------------------
def __init__(self, config):
self.config = config
#--------------------------------------------------------------------------
def main(self): # pragma: no cover
"""derived classes must override this function with business logic"""
raise NotImplementedError(
"A definition of 'main' in a derived class is required"
)
#------------------------------------------------------------------------------
def logging_required_config(app_name):
lc = Namespace()
lc.namespace('logging')
lc.logging.add_option(
'syslog_host',
doc='syslog hostname',
default='localhost',
reference_value_from='resource.logging',
)
lc.logging.add_option(
'syslog_port',
doc='syslog port',
default=514,
reference_value_from='resource.logging',
)
lc.logging.add_option(
'syslog_facility_string',
doc='syslog facility string ("user", "local0", etc)',
default='user',
reference_value_from='resource.logging',
)
lc.logging.add_option(
'syslog_line_format_string',
doc='python logging system format for syslog entries',
default='%s (pid {process}): '
'{asctime} {levelname} - {threadName} - '
'{message}' % app_name,
reference_value_from='resource.logging',
)
lc.logging.add_option(
'syslog_error_logging_level',
doc='logging level for the log file (10 - DEBUG, 20 '
'- INFO, 30 - WARNING, 40 - ERROR, 50 - CRITICAL)',
default=40,
reference_value_from='resource.logging',
)
lc.logging.add_option(
'stderr_line_format_string',
doc='python logging system format for logging to stderr',
default='{asctime} {levelname} - {threadName} - '
'{message}',
reference_value_from='resource.logging',
)
lc.logging.add_option(
'stderr_error_logging_level',
doc='logging level for the logging to stderr (10 - '
'DEBUG, 20 - INFO, 30 - WARNING, 40 - ERROR, '
'50 - CRITICAL)',
default=10,
reference_value_from='resource.logging',
)
return lc
#==============================================================================
class LoggerWrapper(object):
#--------------------------------------------------------------------------
def __init__(self, logger, config):
self.config = config
self.logger = logger
#--------------------------------------------------------------------------
def executor_identity(self):
return " - %s - " % threading.currentThread().getName()
#--------------------------------------------------------------------------
def debug(self, message, *args, **kwargs):
self.logger.debug(self.executor_identity() + message, *args, **kwargs)
#--------------------------------------------------------------------------
def info(self, message, *args, **kwargs):
self.logger.info(self.executor_identity() + message, *args, **kwargs)
#--------------------------------------------------------------------------
def error(self, message, *args, **kwargs):
self.logger.error(self.executor_identity() + message, *args, **kwargs)
#--------------------------------------------------------------------------
def warning(self, message, *args, **kwargs):
self.logger.warning(self.executor_identity() + message, *args, **kwargs)
#--------------------------------------------------------------------------
def critical(self, message, *args, **kwargs):
self.logger.critical(self.executor_identity() + message, *args, **kwargs)
#--------------------------------------------------------------------------
#def __getattribute__(self, name):
#return getattr(self.logger, name)
#------------------------------------------------------------------------------
def setup_logger(app_name, config, local_unused, args_unused):
logger = logging.getLogger(app_name)
# if this is a restart, loggers must be removed before being recreated
tear_down_logger(app_name)
logger.setLevel(logging.DEBUG)
stderr_log = logging.StreamHandler()
stderr_log.setLevel(config.logging.stderr_error_logging_level)
stderr_format = config.logging.stderr_line_format_string.replace(
'{app_name}',
app_name
)
stderr_log_formatter = logging.Formatter(
_convert_format_string(stderr_format)
)
stderr_log.setFormatter(stderr_log_formatter)
logger.addHandler(stderr_log)
syslog = logging.handlers.SysLogHandler(
facility=config.logging.syslog_facility_string
)
syslog.setLevel(config.logging.syslog_error_logging_level)
syslog_format = config.logging.syslog_line_format_string.replace(
'{app_name}',
app_name
)
syslog_formatter = logging.Formatter(
_convert_format_string(syslog_format)
)
syslog.setFormatter(syslog_formatter)
logger.addHandler(syslog)
wrapped_logger = LoggerWrapper(logger, config)
return wrapped_logger
#------------------------------------------------------------------------------
def tear_down_logger(app_name):
logger = logging.getLogger(app_name)
# must have a copy of the handlers list since we cannot modify the original
# list while we're deleting items from that list
handlers = [x for x in logger.handlers]
for x in handlers:
logger.removeHandler(x)
#------------------------------------------------------------------------------
def _convert_format_string(s):
"""return '%(foo)s %(bar)s' if the input is '{foo} {bar}'"""
return re.sub('{(\w+)}', r'%(\1)s', s)
#------------------------------------------------------------------------------
restart = True
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
def main(
initial_app,
values_source_list=None,
config_path=None,
config_manager_cls=ConfigurationManager
):
global restart
restart = True
while restart:
app_exit_code = _do_main(
initial_app,
values_source_list,
config_path,
config_manager_cls
)
return app_exit_code
#------------------------------------------------------------------------------
# This _do_main function will load an application object, initialize it and
# then call its 'main' function
def _do_main(
initial_app,
values_source_list=None,
config_path=None,
config_manager_cls=ConfigurationManager
):
if values_source_list is None:
values_source_list = [
ConfigFileFutureProxy,
environment,
command_line
]
global restart
restart = False
if isinstance(initial_app, basestring):
initial_app = class_converter(initial_app)
if config_path is None:
default = './config'
config_path = os.environ.get(
'DEFAULT_SOCORRO_CONFIG_PATH',
default
)
if config_path != default:
# you tried to set it, then it must be a valid directory
if not os.path.isdir(config_path):
raise IOError('%s is not a valid directory' % config_path)
# the only config parameter is a special one that refers to a class or
# module that defines an application. In order to qualify, a class must
# have a constructor that accepts a DotDict derivative as the sole
# input parameter. It must also have a 'main' function that accepts no
# parameters. For a module to be acceptable, it must have a main
# function that accepts a DotDict derivative as its input parameter.
app_definition = Namespace()
app_definition.add_option(
'application',
doc='the fully qualified module or class of the application',
default=initial_app,
from_string_converter=class_converter
)
try:
app_name = initial_app.app_name # this will be used as the default
# b
app_version = initial_app.app_version
app_description = initial_app.app_description
except AttributeError as x:
raise AppDetailMissingError(x)
app_definition.add_aggregation(
'logger',
functools.partial(setup_logger, app_name)
)
definitions = (
app_definition,
logging_required_config(app_name)
)
config_manager = config_manager_cls(
definitions,
app_name=app_name,
app_version=app_version,
app_description=app_description,
values_source_list=values_source_list,
config_pathname=config_path
)
def fix_exit_code(code):
# some apps don't return a code so you might get None
# which isn't good enough to send to sys.exit()
if code is None:
return 0
return code
with config_manager.context() as config:
#config.logger.config = config
config.executor_identity = lambda: threading.currentThread().getName()
config_manager.log_config(config.logger)
# install the signal handler for SIGHUP to be the action defined in
# 'respond_to_SIGHUP'
respond_to_SIGHUP_with_logging = functools.partial(
respond_to_SIGHUP,
logger=config.logger
)
signal.signal(signal.SIGHUP, respond_to_SIGHUP_with_logging)
# get the app class from configman. Why bother since we have it aleady
# with the 'initial_app' name? In most cases initial_app == app,
# it might not always be that way. The user always has the ability
# to specify on the command line a new app class that will override
# 'initial_app'.
app = config.application
if isinstance(app, type):
# invocation of the app if the app_object was a class
instance = app(config)
instance.config_manager = config_manager
return_code = fix_exit_code(instance.main())
elif inspect.ismodule(app):
# invocation of the app if the app_object was a module
return_code = fix_exit_code(app.main(config))
elif inspect.isfunction(app):
# invocation of the app if the app_object was a function
return_code = fix_exit_code(app(config))
config.logger.info('done.')
return return_code
raise NotImplementedError("The app did not have a callable main function")
|
mozilla/crontabber | crontabber/transaction_executor.py | TransactionExecutorWithInfiniteBackoff.backoff_generator | python | def backoff_generator(self):
for x in self.config.backoff_delays:
yield x
while True:
yield self.config.backoff_delays[-1] | Generate a series of integers used for the length of the sleep
between retries. It produces after exhausting the list, it repeats
the last value from the list forever. This generator will never raise
the StopIteration exception. | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/transaction_executor.py#L76-L84 | null | class TransactionExecutorWithInfiniteBackoff(TransactionExecutor):
# back off times
required_config = Namespace()
required_config.add_option('backoff_delays',
default="10, 30, 60, 120, 300",
doc='delays in seconds between retries',
from_string_converter=string_to_list_of_ints)
# wait_log_interval
required_config.add_option('wait_log_interval',
default=10,
doc='seconds between log during retries')
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
def responsive_sleep(self, seconds, wait_reason=''):
"""Sleep for the specified number of seconds, logging every
'wait_log_interval' seconds with progress info."""
for x in xrange(int(seconds)):
if (self.config.wait_log_interval and
not x % self.config.wait_log_interval):
self.config.logger.debug(
'%s: %dsec of %dsec' % (wait_reason, x, seconds)
)
self.quit_check()
time.sleep(1.0)
#--------------------------------------------------------------------------
def __call__(self, function, *args, **kwargs):
"""execute a function within the context of a transaction"""
for wait_in_seconds in self.backoff_generator():
try:
if self.do_quit_check:
self.quit_check()
# self.db_conn_context_source is an instance of a
# wrapper class on the actual connection driver
with self.db_conn_context_source() as connection:
try:
result = function(connection, *args, **kwargs)
connection.commit()
return result
except:
connection.rollback()
raise
except DBApiUtilNonFatalBaseException:
raise
except self.db_conn_context_source.conditional_exceptions, x:
# these exceptions may or may not be retriable
# the test is for is a last ditch effort to see if
# we can retry
if not self.db_conn_context_source.is_operational_exception(x):
self.config.logger.critical(
'Unrecoverable %s transaction error',
self.connection_source_type,
exc_info=True
)
raise
self.config.logger.critical(
'%s transaction error eligible for retry',
self.connection_source_type,
exc_info=True)
except self.db_conn_context_source.operational_exceptions, x:
self.config.logger.critical(
'%s transaction error eligible for retry',
self.connection_source_type,
exc_info=True)
self.db_conn_context_source.force_reconnect()
self.config.logger.debug(
'retry in %s seconds' % wait_in_seconds
)
self.responsive_sleep(
wait_in_seconds,
'waiting for retry after failure in %s transaction' %
self.connection_source_type,
)
raise
|
mozilla/crontabber | crontabber/transaction_executor.py | TransactionExecutorWithInfiniteBackoff.responsive_sleep | python | def responsive_sleep(self, seconds, wait_reason=''):
for x in xrange(int(seconds)):
if (self.config.wait_log_interval and
not x % self.config.wait_log_interval):
self.config.logger.debug(
'%s: %dsec of %dsec' % (wait_reason, x, seconds)
)
self.quit_check()
time.sleep(1.0) | Sleep for the specified number of seconds, logging every
'wait_log_interval' seconds with progress info. | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/transaction_executor.py#L87-L97 | null | class TransactionExecutorWithInfiniteBackoff(TransactionExecutor):
# back off times
required_config = Namespace()
required_config.add_option('backoff_delays',
default="10, 30, 60, 120, 300",
doc='delays in seconds between retries',
from_string_converter=string_to_list_of_ints)
# wait_log_interval
required_config.add_option('wait_log_interval',
default=10,
doc='seconds between log during retries')
#--------------------------------------------------------------------------
def backoff_generator(self):
"""Generate a series of integers used for the length of the sleep
between retries. It produces after exhausting the list, it repeats
the last value from the list forever. This generator will never raise
the StopIteration exception."""
for x in self.config.backoff_delays:
yield x
while True:
yield self.config.backoff_delays[-1]
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
def __call__(self, function, *args, **kwargs):
"""execute a function within the context of a transaction"""
for wait_in_seconds in self.backoff_generator():
try:
if self.do_quit_check:
self.quit_check()
# self.db_conn_context_source is an instance of a
# wrapper class on the actual connection driver
with self.db_conn_context_source() as connection:
try:
result = function(connection, *args, **kwargs)
connection.commit()
return result
except:
connection.rollback()
raise
except DBApiUtilNonFatalBaseException:
raise
except self.db_conn_context_source.conditional_exceptions, x:
# these exceptions may or may not be retriable
# the test is for is a last ditch effort to see if
# we can retry
if not self.db_conn_context_source.is_operational_exception(x):
self.config.logger.critical(
'Unrecoverable %s transaction error',
self.connection_source_type,
exc_info=True
)
raise
self.config.logger.critical(
'%s transaction error eligible for retry',
self.connection_source_type,
exc_info=True)
except self.db_conn_context_source.operational_exceptions, x:
self.config.logger.critical(
'%s transaction error eligible for retry',
self.connection_source_type,
exc_info=True)
self.db_conn_context_source.force_reconnect()
self.config.logger.debug(
'retry in %s seconds' % wait_in_seconds
)
self.responsive_sleep(
wait_in_seconds,
'waiting for retry after failure in %s transaction' %
self.connection_source_type,
)
raise
|
mozilla/crontabber | crontabber/mixins.py | as_backfill_cron_app | python | def as_backfill_cron_app(cls):
#----------------------------------------------------------------------
def main(self, function=None):
return super(cls, self).main(
function=function,
once=False,
)
cls.main = main
cls._is_backfill_app = True
return cls | a class decorator for Crontabber Apps. This decorator embues a CronApp
with the parts necessary to be a backfill CronApp. It adds a main method
that forces the base class to use a value of False for 'once'. That means
it will do the work of a backfilling app. | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/mixins.py#L14-L28 | null | from functools import partial
import subprocess
from configman import RequiredConfig, Namespace, class_converter
#==============================================================================
# mixin decorators
#
# the functions found in this section are for modifying the BaseCronApp base
# class by adding features and/or behaviors. This replaces the previous
# technique of using multiple inheritance for mixins.
#==============================================================================
#==============================================================================
def with_transactional_resource(
transactional_resource_class,
resource_name,
reference_value_from=None
):
"""a class decorator for Crontabber Apps. This decorator will give access
to a resource connection source. Configuration will be automatically set
up and the cron app can expect to have attributes:
self.{resource_name}_connection_factory
self.{resource_name}_transaction_executor
available to use.
Within the setup, the RequiredConfig structure gets set up like this:
config.{resource_name}.{resource_name}_class = \
transactional_resource_class
config.{resource_name}.{resource_name}_transaction_executor_class = \
'crontabber.transaction_executor.TransactionExecutor'
parameters:
transactional_resource_class - a string representing the full path of
the class that represents a connection to the resource. An example
is "crontabber.connection_factory.ConnectionFactory".
resource_name - a string that will serve as an identifier for this
resource within the mixin. For example, if the resource is
'database' we'll see configman namespace in the cron job section
of "...class-SomeCronJob.database.database_connection_class" and
"...class-SomeCronJob.database.transaction_executor_class"
"""
def class_decorator(cls):
if not issubclass(cls, RequiredConfig):
raise Exception(
'%s must have RequiredConfig as a base class' % cls
)
new_req = cls.get_required_config()
new_req.namespace(resource_name)
new_req[resource_name].add_option(
'%s_class' % resource_name,
default=transactional_resource_class,
from_string_converter=class_converter,
reference_value_from=reference_value_from,
)
new_req[resource_name].add_option(
'%s_transaction_executor_class' % resource_name,
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from=reference_value_from
)
cls.required_config = new_req
#------------------------------------------------------------------
def new__init__(self, *args, **kwargs):
# instantiate the connection class for the resource
super(cls, self).__init__(*args, **kwargs)
setattr(
self,
"%s_connection_factory" % resource_name,
self.config[resource_name]['%s_class' % resource_name](
self.config[resource_name]
)
)
# instantiate a transaction executor bound to the
# resource connection
setattr(
self,
"%s_transaction_executor" % resource_name,
self.config[resource_name][
'%s_transaction_executor_class' % resource_name
](
self.config[resource_name],
getattr(self, "%s_connection_factory" % resource_name)
)
)
if hasattr(cls, '__init__'):
original_init = cls.__init__
def both_inits(self, *args, **kwargs):
new__init__(self, *args, **kwargs)
return original_init(self, *args, **kwargs)
cls.__init__ = both_inits
else:
cls.__init__ = new__init__
return cls
return class_decorator
#==============================================================================
def with_resource_connection_as_argument(resource_name):
"""a class decorator for Crontabber Apps. This decorator will a class a
_run_proxy method that passes a databsase connection as a context manager
into the CronApp's run method. The connection will automatically be closed
when the ConApp's run method ends.
In order for this dectorator to function properly, it must be used in
conjunction with previous dectorator, "with_transactional_resource" or
equivalent. This decorator depends on the mechanims added by that
decorator.
"""
connection_factory_attr_name = '%s_connection_factory' % resource_name
def class_decorator(cls):
def _run_proxy(self, *args, **kwargs):
factory = getattr(self, connection_factory_attr_name)
with factory() as connection:
try:
self.run(connection, *args, **kwargs)
finally:
factory.close_connection(connection, force=True)
cls._run_proxy = _run_proxy
return cls
return class_decorator
#==============================================================================
def with_single_transaction(resource_name):
"""a class decorator for Crontabber Apps. This decorator will give a class
a _run_proxy method that passes a databsase connection as a context manager
into the CronApp's 'run' method. The run method may then use the
connection at will knowing that after if 'run' exits normally, the
connection will automatically be commited. Any abnormal exit from 'run'
will result in the connnection being rolledback.
In order for this dectorator to function properly, it must be used in
conjunction with previous dectorator, "with_transactional_resource" or
equivalent. This decorator depends on the mechanims added by that
decorator.
"""
transaction_executor_attr_name = "%s_transaction_executor" % resource_name
def class_decorator(cls):
def _run_proxy(self, *args, **kwargs):
getattr(self, transaction_executor_attr_name)(
self.run,
*args,
**kwargs
)
cls._run_proxy = _run_proxy
return cls
return class_decorator
#==============================================================================
def with_subprocess(cls):
"""a class decorator for Crontabber Apps. This decorator gives the CronApp
a _run_proxy method that will execute the cron app as a single PG
transaction. Commit and Rollback are automatic. The cron app should do
no transaction management of its own. The cron app should be short so that
the transaction is not held open too long.
"""
def run_process(self, command, input=None):
"""
Run the command and return a tuple of three things.
1. exit code - an integer number
2. stdout - all output that was sent to stdout
2. stderr - all output that was sent to stderr
"""
if isinstance(command, (tuple, list)):
command = ' '.join('"%s"' % x for x in command)
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = proc.communicate(input=input)
return proc.returncode, out.strip(), err.strip()
cls.run_process = run_process
return cls
#==============================================================================
# dedicated postgresql mixins
#------------------------------------------------------------------------------
# this class decorator adds attributes to the class in the form:
# self.database_connection_factory
# self.database_transaction_executor
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# class MyClass ...
using_postgres = partial(
with_transactional_resource,
'crontabber.connection_factory.ConnectionFactory',
'database',
'resource.postgresql'
)
#------------------------------------------------------------------------------
# this class decorator adds a _run_proxy method to the class that will
# acquire a database connection and then pass it to the invocation of the
# class' "run" method. Since the connection is in the form of a
# context manager, the connection will automatically be closed when "run"
# completes.
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# @with_postgres_connection_as_argument()
# class MyClass ...
with_postgres_connection_as_argument = partial(
with_resource_connection_as_argument,
'database'
)
#------------------------------------------------------------------------------
# this class decorator adds a _run_proxy method to the class that will
# call the class' run method in the context of a database transaction. It
# passes the connection to the "run" function. When "run" completes without
# raising an exception, the transaction will be commited. An exception
# escaping the run function will result in a "rollback"
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# @as_single_postgres_transaction()
# class MyClass ...
as_single_postgres_transaction = partial(
with_single_transaction,
'database'
)
# backwards compatibility
with_postgres_transactions = using_postgres
with_single_postgres_transaction = as_single_postgres_transaction |
mozilla/crontabber | crontabber/mixins.py | with_transactional_resource | python | def with_transactional_resource(
transactional_resource_class,
resource_name,
reference_value_from=None
):
def class_decorator(cls):
if not issubclass(cls, RequiredConfig):
raise Exception(
'%s must have RequiredConfig as a base class' % cls
)
new_req = cls.get_required_config()
new_req.namespace(resource_name)
new_req[resource_name].add_option(
'%s_class' % resource_name,
default=transactional_resource_class,
from_string_converter=class_converter,
reference_value_from=reference_value_from,
)
new_req[resource_name].add_option(
'%s_transaction_executor_class' % resource_name,
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from=reference_value_from
)
cls.required_config = new_req
#------------------------------------------------------------------
def new__init__(self, *args, **kwargs):
# instantiate the connection class for the resource
super(cls, self).__init__(*args, **kwargs)
setattr(
self,
"%s_connection_factory" % resource_name,
self.config[resource_name]['%s_class' % resource_name](
self.config[resource_name]
)
)
# instantiate a transaction executor bound to the
# resource connection
setattr(
self,
"%s_transaction_executor" % resource_name,
self.config[resource_name][
'%s_transaction_executor_class' % resource_name
](
self.config[resource_name],
getattr(self, "%s_connection_factory" % resource_name)
)
)
if hasattr(cls, '__init__'):
original_init = cls.__init__
def both_inits(self, *args, **kwargs):
new__init__(self, *args, **kwargs)
return original_init(self, *args, **kwargs)
cls.__init__ = both_inits
else:
cls.__init__ = new__init__
return cls
return class_decorator | a class decorator for Crontabber Apps. This decorator will give access
to a resource connection source. Configuration will be automatically set
up and the cron app can expect to have attributes:
self.{resource_name}_connection_factory
self.{resource_name}_transaction_executor
available to use.
Within the setup, the RequiredConfig structure gets set up like this:
config.{resource_name}.{resource_name}_class = \
transactional_resource_class
config.{resource_name}.{resource_name}_transaction_executor_class = \
'crontabber.transaction_executor.TransactionExecutor'
parameters:
transactional_resource_class - a string representing the full path of
the class that represents a connection to the resource. An example
is "crontabber.connection_factory.ConnectionFactory".
resource_name - a string that will serve as an identifier for this
resource within the mixin. For example, if the resource is
'database' we'll see configman namespace in the cron job section
of "...class-SomeCronJob.database.database_connection_class" and
"...class-SomeCronJob.database.transaction_executor_class" | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/mixins.py#L32-L114 | null | from functools import partial
import subprocess
from configman import RequiredConfig, Namespace, class_converter
#==============================================================================
# mixin decorators
#
# the functions found in this section are for modifying the BaseCronApp base
# class by adding features and/or behaviors. This replaces the previous
# technique of using multiple inheritance for mixins.
#==============================================================================
def as_backfill_cron_app(cls):
"""a class decorator for Crontabber Apps. This decorator embues a CronApp
with the parts necessary to be a backfill CronApp. It adds a main method
that forces the base class to use a value of False for 'once'. That means
it will do the work of a backfilling app.
"""
#----------------------------------------------------------------------
def main(self, function=None):
return super(cls, self).main(
function=function,
once=False,
)
cls.main = main
cls._is_backfill_app = True
return cls
#==============================================================================
#==============================================================================
def with_resource_connection_as_argument(resource_name):
"""a class decorator for Crontabber Apps. This decorator will a class a
_run_proxy method that passes a databsase connection as a context manager
into the CronApp's run method. The connection will automatically be closed
when the ConApp's run method ends.
In order for this dectorator to function properly, it must be used in
conjunction with previous dectorator, "with_transactional_resource" or
equivalent. This decorator depends on the mechanims added by that
decorator.
"""
connection_factory_attr_name = '%s_connection_factory' % resource_name
def class_decorator(cls):
def _run_proxy(self, *args, **kwargs):
factory = getattr(self, connection_factory_attr_name)
with factory() as connection:
try:
self.run(connection, *args, **kwargs)
finally:
factory.close_connection(connection, force=True)
cls._run_proxy = _run_proxy
return cls
return class_decorator
#==============================================================================
def with_single_transaction(resource_name):
"""a class decorator for Crontabber Apps. This decorator will give a class
a _run_proxy method that passes a databsase connection as a context manager
into the CronApp's 'run' method. The run method may then use the
connection at will knowing that after if 'run' exits normally, the
connection will automatically be commited. Any abnormal exit from 'run'
will result in the connnection being rolledback.
In order for this dectorator to function properly, it must be used in
conjunction with previous dectorator, "with_transactional_resource" or
equivalent. This decorator depends on the mechanims added by that
decorator.
"""
transaction_executor_attr_name = "%s_transaction_executor" % resource_name
def class_decorator(cls):
def _run_proxy(self, *args, **kwargs):
getattr(self, transaction_executor_attr_name)(
self.run,
*args,
**kwargs
)
cls._run_proxy = _run_proxy
return cls
return class_decorator
#==============================================================================
def with_subprocess(cls):
"""a class decorator for Crontabber Apps. This decorator gives the CronApp
a _run_proxy method that will execute the cron app as a single PG
transaction. Commit and Rollback are automatic. The cron app should do
no transaction management of its own. The cron app should be short so that
the transaction is not held open too long.
"""
def run_process(self, command, input=None):
"""
Run the command and return a tuple of three things.
1. exit code - an integer number
2. stdout - all output that was sent to stdout
2. stderr - all output that was sent to stderr
"""
if isinstance(command, (tuple, list)):
command = ' '.join('"%s"' % x for x in command)
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = proc.communicate(input=input)
return proc.returncode, out.strip(), err.strip()
cls.run_process = run_process
return cls
#==============================================================================
# dedicated postgresql mixins
#------------------------------------------------------------------------------
# this class decorator adds attributes to the class in the form:
# self.database_connection_factory
# self.database_transaction_executor
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# class MyClass ...
using_postgres = partial(
with_transactional_resource,
'crontabber.connection_factory.ConnectionFactory',
'database',
'resource.postgresql'
)
#------------------------------------------------------------------------------
# this class decorator adds a _run_proxy method to the class that will
# acquire a database connection and then pass it to the invocation of the
# class' "run" method. Since the connection is in the form of a
# context manager, the connection will automatically be closed when "run"
# completes.
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# @with_postgres_connection_as_argument()
# class MyClass ...
with_postgres_connection_as_argument = partial(
with_resource_connection_as_argument,
'database'
)
#------------------------------------------------------------------------------
# this class decorator adds a _run_proxy method to the class that will
# call the class' run method in the context of a database transaction. It
# passes the connection to the "run" function. When "run" completes without
# raising an exception, the transaction will be commited. An exception
# escaping the run function will result in a "rollback"
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# @as_single_postgres_transaction()
# class MyClass ...
as_single_postgres_transaction = partial(
with_single_transaction,
'database'
)
# backwards compatibility
with_postgres_transactions = using_postgres
with_single_postgres_transaction = as_single_postgres_transaction |
mozilla/crontabber | crontabber/mixins.py | with_resource_connection_as_argument | python | def with_resource_connection_as_argument(resource_name):
connection_factory_attr_name = '%s_connection_factory' % resource_name
def class_decorator(cls):
def _run_proxy(self, *args, **kwargs):
factory = getattr(self, connection_factory_attr_name)
with factory() as connection:
try:
self.run(connection, *args, **kwargs)
finally:
factory.close_connection(connection, force=True)
cls._run_proxy = _run_proxy
return cls
return class_decorator | a class decorator for Crontabber Apps. This decorator will a class a
_run_proxy method that passes a databsase connection as a context manager
into the CronApp's run method. The connection will automatically be closed
when the ConApp's run method ends.
In order for this dectorator to function properly, it must be used in
conjunction with previous dectorator, "with_transactional_resource" or
equivalent. This decorator depends on the mechanims added by that
decorator. | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/mixins.py#L118-L141 | null | from functools import partial
import subprocess
from configman import RequiredConfig, Namespace, class_converter
#==============================================================================
# mixin decorators
#
# the functions found in this section are for modifying the BaseCronApp base
# class by adding features and/or behaviors. This replaces the previous
# technique of using multiple inheritance for mixins.
#==============================================================================
def as_backfill_cron_app(cls):
"""a class decorator for Crontabber Apps. This decorator embues a CronApp
with the parts necessary to be a backfill CronApp. It adds a main method
that forces the base class to use a value of False for 'once'. That means
it will do the work of a backfilling app.
"""
#----------------------------------------------------------------------
def main(self, function=None):
return super(cls, self).main(
function=function,
once=False,
)
cls.main = main
cls._is_backfill_app = True
return cls
#==============================================================================
def with_transactional_resource(
transactional_resource_class,
resource_name,
reference_value_from=None
):
"""a class decorator for Crontabber Apps. This decorator will give access
to a resource connection source. Configuration will be automatically set
up and the cron app can expect to have attributes:
self.{resource_name}_connection_factory
self.{resource_name}_transaction_executor
available to use.
Within the setup, the RequiredConfig structure gets set up like this:
config.{resource_name}.{resource_name}_class = \
transactional_resource_class
config.{resource_name}.{resource_name}_transaction_executor_class = \
'crontabber.transaction_executor.TransactionExecutor'
parameters:
transactional_resource_class - a string representing the full path of
the class that represents a connection to the resource. An example
is "crontabber.connection_factory.ConnectionFactory".
resource_name - a string that will serve as an identifier for this
resource within the mixin. For example, if the resource is
'database' we'll see configman namespace in the cron job section
of "...class-SomeCronJob.database.database_connection_class" and
"...class-SomeCronJob.database.transaction_executor_class"
"""
def class_decorator(cls):
if not issubclass(cls, RequiredConfig):
raise Exception(
'%s must have RequiredConfig as a base class' % cls
)
new_req = cls.get_required_config()
new_req.namespace(resource_name)
new_req[resource_name].add_option(
'%s_class' % resource_name,
default=transactional_resource_class,
from_string_converter=class_converter,
reference_value_from=reference_value_from,
)
new_req[resource_name].add_option(
'%s_transaction_executor_class' % resource_name,
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from=reference_value_from
)
cls.required_config = new_req
#------------------------------------------------------------------
def new__init__(self, *args, **kwargs):
# instantiate the connection class for the resource
super(cls, self).__init__(*args, **kwargs)
setattr(
self,
"%s_connection_factory" % resource_name,
self.config[resource_name]['%s_class' % resource_name](
self.config[resource_name]
)
)
# instantiate a transaction executor bound to the
# resource connection
setattr(
self,
"%s_transaction_executor" % resource_name,
self.config[resource_name][
'%s_transaction_executor_class' % resource_name
](
self.config[resource_name],
getattr(self, "%s_connection_factory" % resource_name)
)
)
if hasattr(cls, '__init__'):
original_init = cls.__init__
def both_inits(self, *args, **kwargs):
new__init__(self, *args, **kwargs)
return original_init(self, *args, **kwargs)
cls.__init__ = both_inits
else:
cls.__init__ = new__init__
return cls
return class_decorator
#==============================================================================
#==============================================================================
def with_single_transaction(resource_name):
"""a class decorator for Crontabber Apps. This decorator will give a class
a _run_proxy method that passes a databsase connection as a context manager
into the CronApp's 'run' method. The run method may then use the
connection at will knowing that after if 'run' exits normally, the
connection will automatically be commited. Any abnormal exit from 'run'
will result in the connnection being rolledback.
In order for this dectorator to function properly, it must be used in
conjunction with previous dectorator, "with_transactional_resource" or
equivalent. This decorator depends on the mechanims added by that
decorator.
"""
transaction_executor_attr_name = "%s_transaction_executor" % resource_name
def class_decorator(cls):
def _run_proxy(self, *args, **kwargs):
getattr(self, transaction_executor_attr_name)(
self.run,
*args,
**kwargs
)
cls._run_proxy = _run_proxy
return cls
return class_decorator
#==============================================================================
def with_subprocess(cls):
"""a class decorator for Crontabber Apps. This decorator gives the CronApp
a _run_proxy method that will execute the cron app as a single PG
transaction. Commit and Rollback are automatic. The cron app should do
no transaction management of its own. The cron app should be short so that
the transaction is not held open too long.
"""
def run_process(self, command, input=None):
"""
Run the command and return a tuple of three things.
1. exit code - an integer number
2. stdout - all output that was sent to stdout
2. stderr - all output that was sent to stderr
"""
if isinstance(command, (tuple, list)):
command = ' '.join('"%s"' % x for x in command)
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = proc.communicate(input=input)
return proc.returncode, out.strip(), err.strip()
cls.run_process = run_process
return cls
#==============================================================================
# dedicated postgresql mixins
#------------------------------------------------------------------------------
# this class decorator adds attributes to the class in the form:
# self.database_connection_factory
# self.database_transaction_executor
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# class MyClass ...
using_postgres = partial(
with_transactional_resource,
'crontabber.connection_factory.ConnectionFactory',
'database',
'resource.postgresql'
)
#------------------------------------------------------------------------------
# this class decorator adds a _run_proxy method to the class that will
# acquire a database connection and then pass it to the invocation of the
# class' "run" method. Since the connection is in the form of a
# context manager, the connection will automatically be closed when "run"
# completes.
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# @with_postgres_connection_as_argument()
# class MyClass ...
with_postgres_connection_as_argument = partial(
with_resource_connection_as_argument,
'database'
)
#------------------------------------------------------------------------------
# this class decorator adds a _run_proxy method to the class that will
# call the class' run method in the context of a database transaction. It
# passes the connection to the "run" function. When "run" completes without
# raising an exception, the transaction will be commited. An exception
# escaping the run function will result in a "rollback"
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# @as_single_postgres_transaction()
# class MyClass ...
as_single_postgres_transaction = partial(
with_single_transaction,
'database'
)
# backwards compatibility
with_postgres_transactions = using_postgres
with_single_postgres_transaction = as_single_postgres_transaction |
mozilla/crontabber | crontabber/mixins.py | with_single_transaction | python | def with_single_transaction(resource_name):
transaction_executor_attr_name = "%s_transaction_executor" % resource_name
def class_decorator(cls):
def _run_proxy(self, *args, **kwargs):
getattr(self, transaction_executor_attr_name)(
self.run,
*args,
**kwargs
)
cls._run_proxy = _run_proxy
return cls
return class_decorator | a class decorator for Crontabber Apps. This decorator will give a class
a _run_proxy method that passes a databsase connection as a context manager
into the CronApp's 'run' method. The run method may then use the
connection at will knowing that after if 'run' exits normally, the
connection will automatically be commited. Any abnormal exit from 'run'
will result in the connnection being rolledback.
In order for this dectorator to function properly, it must be used in
conjunction with previous dectorator, "with_transactional_resource" or
equivalent. This decorator depends on the mechanims added by that
decorator. | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/mixins.py#L145-L169 | null | from functools import partial
import subprocess
from configman import RequiredConfig, Namespace, class_converter
#==============================================================================
# mixin decorators
#
# the functions found in this section are for modifying the BaseCronApp base
# class by adding features and/or behaviors. This replaces the previous
# technique of using multiple inheritance for mixins.
#==============================================================================
def as_backfill_cron_app(cls):
"""a class decorator for Crontabber Apps. This decorator embues a CronApp
with the parts necessary to be a backfill CronApp. It adds a main method
that forces the base class to use a value of False for 'once'. That means
it will do the work of a backfilling app.
"""
#----------------------------------------------------------------------
def main(self, function=None):
return super(cls, self).main(
function=function,
once=False,
)
cls.main = main
cls._is_backfill_app = True
return cls
#==============================================================================
def with_transactional_resource(
transactional_resource_class,
resource_name,
reference_value_from=None
):
"""a class decorator for Crontabber Apps. This decorator will give access
to a resource connection source. Configuration will be automatically set
up and the cron app can expect to have attributes:
self.{resource_name}_connection_factory
self.{resource_name}_transaction_executor
available to use.
Within the setup, the RequiredConfig structure gets set up like this:
config.{resource_name}.{resource_name}_class = \
transactional_resource_class
config.{resource_name}.{resource_name}_transaction_executor_class = \
'crontabber.transaction_executor.TransactionExecutor'
parameters:
transactional_resource_class - a string representing the full path of
the class that represents a connection to the resource. An example
is "crontabber.connection_factory.ConnectionFactory".
resource_name - a string that will serve as an identifier for this
resource within the mixin. For example, if the resource is
'database' we'll see configman namespace in the cron job section
of "...class-SomeCronJob.database.database_connection_class" and
"...class-SomeCronJob.database.transaction_executor_class"
"""
def class_decorator(cls):
if not issubclass(cls, RequiredConfig):
raise Exception(
'%s must have RequiredConfig as a base class' % cls
)
new_req = cls.get_required_config()
new_req.namespace(resource_name)
new_req[resource_name].add_option(
'%s_class' % resource_name,
default=transactional_resource_class,
from_string_converter=class_converter,
reference_value_from=reference_value_from,
)
new_req[resource_name].add_option(
'%s_transaction_executor_class' % resource_name,
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from=reference_value_from
)
cls.required_config = new_req
#------------------------------------------------------------------
def new__init__(self, *args, **kwargs):
# instantiate the connection class for the resource
super(cls, self).__init__(*args, **kwargs)
setattr(
self,
"%s_connection_factory" % resource_name,
self.config[resource_name]['%s_class' % resource_name](
self.config[resource_name]
)
)
# instantiate a transaction executor bound to the
# resource connection
setattr(
self,
"%s_transaction_executor" % resource_name,
self.config[resource_name][
'%s_transaction_executor_class' % resource_name
](
self.config[resource_name],
getattr(self, "%s_connection_factory" % resource_name)
)
)
if hasattr(cls, '__init__'):
original_init = cls.__init__
def both_inits(self, *args, **kwargs):
new__init__(self, *args, **kwargs)
return original_init(self, *args, **kwargs)
cls.__init__ = both_inits
else:
cls.__init__ = new__init__
return cls
return class_decorator
#==============================================================================
def with_resource_connection_as_argument(resource_name):
"""a class decorator for Crontabber Apps. This decorator will a class a
_run_proxy method that passes a databsase connection as a context manager
into the CronApp's run method. The connection will automatically be closed
when the ConApp's run method ends.
In order for this dectorator to function properly, it must be used in
conjunction with previous dectorator, "with_transactional_resource" or
equivalent. This decorator depends on the mechanims added by that
decorator.
"""
connection_factory_attr_name = '%s_connection_factory' % resource_name
def class_decorator(cls):
def _run_proxy(self, *args, **kwargs):
factory = getattr(self, connection_factory_attr_name)
with factory() as connection:
try:
self.run(connection, *args, **kwargs)
finally:
factory.close_connection(connection, force=True)
cls._run_proxy = _run_proxy
return cls
return class_decorator
#==============================================================================
#==============================================================================
def with_subprocess(cls):
"""a class decorator for Crontabber Apps. This decorator gives the CronApp
a _run_proxy method that will execute the cron app as a single PG
transaction. Commit and Rollback are automatic. The cron app should do
no transaction management of its own. The cron app should be short so that
the transaction is not held open too long.
"""
def run_process(self, command, input=None):
"""
Run the command and return a tuple of three things.
1. exit code - an integer number
2. stdout - all output that was sent to stdout
2. stderr - all output that was sent to stderr
"""
if isinstance(command, (tuple, list)):
command = ' '.join('"%s"' % x for x in command)
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = proc.communicate(input=input)
return proc.returncode, out.strip(), err.strip()
cls.run_process = run_process
return cls
#==============================================================================
# dedicated postgresql mixins
#------------------------------------------------------------------------------
# this class decorator adds attributes to the class in the form:
# self.database_connection_factory
# self.database_transaction_executor
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# class MyClass ...
using_postgres = partial(
with_transactional_resource,
'crontabber.connection_factory.ConnectionFactory',
'database',
'resource.postgresql'
)
#------------------------------------------------------------------------------
# this class decorator adds a _run_proxy method to the class that will
# acquire a database connection and then pass it to the invocation of the
# class' "run" method. Since the connection is in the form of a
# context manager, the connection will automatically be closed when "run"
# completes.
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# @with_postgres_connection_as_argument()
# class MyClass ...
with_postgres_connection_as_argument = partial(
with_resource_connection_as_argument,
'database'
)
#------------------------------------------------------------------------------
# this class decorator adds a _run_proxy method to the class that will
# call the class' run method in the context of a database transaction. It
# passes the connection to the "run" function. When "run" completes without
# raising an exception, the transaction will be commited. An exception
# escaping the run function will result in a "rollback"
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# @as_single_postgres_transaction()
# class MyClass ...
as_single_postgres_transaction = partial(
with_single_transaction,
'database'
)
# backwards compatibility
with_postgres_transactions = using_postgres
with_single_postgres_transaction = as_single_postgres_transaction |
mozilla/crontabber | crontabber/mixins.py | with_subprocess | python | def with_subprocess(cls):
def run_process(self, command, input=None):
"""
Run the command and return a tuple of three things.
1. exit code - an integer number
2. stdout - all output that was sent to stdout
2. stderr - all output that was sent to stderr
"""
if isinstance(command, (tuple, list)):
command = ' '.join('"%s"' % x for x in command)
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = proc.communicate(input=input)
return proc.returncode, out.strip(), err.strip()
cls.run_process = run_process
return cls | a class decorator for Crontabber Apps. This decorator gives the CronApp
a _run_proxy method that will execute the cron app as a single PG
transaction. Commit and Rollback are automatic. The cron app should do
no transaction management of its own. The cron app should be short so that
the transaction is not held open too long. | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/mixins.py#L173-L201 | null | from functools import partial
import subprocess
from configman import RequiredConfig, Namespace, class_converter
#==============================================================================
# mixin decorators
#
# the functions found in this section are for modifying the BaseCronApp base
# class by adding features and/or behaviors. This replaces the previous
# technique of using multiple inheritance for mixins.
#==============================================================================
def as_backfill_cron_app(cls):
"""a class decorator for Crontabber Apps. This decorator embues a CronApp
with the parts necessary to be a backfill CronApp. It adds a main method
that forces the base class to use a value of False for 'once'. That means
it will do the work of a backfilling app.
"""
#----------------------------------------------------------------------
def main(self, function=None):
return super(cls, self).main(
function=function,
once=False,
)
cls.main = main
cls._is_backfill_app = True
return cls
#==============================================================================
def with_transactional_resource(
transactional_resource_class,
resource_name,
reference_value_from=None
):
"""a class decorator for Crontabber Apps. This decorator will give access
to a resource connection source. Configuration will be automatically set
up and the cron app can expect to have attributes:
self.{resource_name}_connection_factory
self.{resource_name}_transaction_executor
available to use.
Within the setup, the RequiredConfig structure gets set up like this:
config.{resource_name}.{resource_name}_class = \
transactional_resource_class
config.{resource_name}.{resource_name}_transaction_executor_class = \
'crontabber.transaction_executor.TransactionExecutor'
parameters:
transactional_resource_class - a string representing the full path of
the class that represents a connection to the resource. An example
is "crontabber.connection_factory.ConnectionFactory".
resource_name - a string that will serve as an identifier for this
resource within the mixin. For example, if the resource is
'database' we'll see configman namespace in the cron job section
of "...class-SomeCronJob.database.database_connection_class" and
"...class-SomeCronJob.database.transaction_executor_class"
"""
def class_decorator(cls):
if not issubclass(cls, RequiredConfig):
raise Exception(
'%s must have RequiredConfig as a base class' % cls
)
new_req = cls.get_required_config()
new_req.namespace(resource_name)
new_req[resource_name].add_option(
'%s_class' % resource_name,
default=transactional_resource_class,
from_string_converter=class_converter,
reference_value_from=reference_value_from,
)
new_req[resource_name].add_option(
'%s_transaction_executor_class' % resource_name,
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from=reference_value_from
)
cls.required_config = new_req
#------------------------------------------------------------------
def new__init__(self, *args, **kwargs):
# instantiate the connection class for the resource
super(cls, self).__init__(*args, **kwargs)
setattr(
self,
"%s_connection_factory" % resource_name,
self.config[resource_name]['%s_class' % resource_name](
self.config[resource_name]
)
)
# instantiate a transaction executor bound to the
# resource connection
setattr(
self,
"%s_transaction_executor" % resource_name,
self.config[resource_name][
'%s_transaction_executor_class' % resource_name
](
self.config[resource_name],
getattr(self, "%s_connection_factory" % resource_name)
)
)
if hasattr(cls, '__init__'):
original_init = cls.__init__
def both_inits(self, *args, **kwargs):
new__init__(self, *args, **kwargs)
return original_init(self, *args, **kwargs)
cls.__init__ = both_inits
else:
cls.__init__ = new__init__
return cls
return class_decorator
#==============================================================================
def with_resource_connection_as_argument(resource_name):
"""a class decorator for Crontabber Apps. This decorator will a class a
_run_proxy method that passes a databsase connection as a context manager
into the CronApp's run method. The connection will automatically be closed
when the ConApp's run method ends.
In order for this dectorator to function properly, it must be used in
conjunction with previous dectorator, "with_transactional_resource" or
equivalent. This decorator depends on the mechanims added by that
decorator.
"""
connection_factory_attr_name = '%s_connection_factory' % resource_name
def class_decorator(cls):
def _run_proxy(self, *args, **kwargs):
factory = getattr(self, connection_factory_attr_name)
with factory() as connection:
try:
self.run(connection, *args, **kwargs)
finally:
factory.close_connection(connection, force=True)
cls._run_proxy = _run_proxy
return cls
return class_decorator
#==============================================================================
def with_single_transaction(resource_name):
"""a class decorator for Crontabber Apps. This decorator will give a class
a _run_proxy method that passes a databsase connection as a context manager
into the CronApp's 'run' method. The run method may then use the
connection at will knowing that after if 'run' exits normally, the
connection will automatically be commited. Any abnormal exit from 'run'
will result in the connnection being rolledback.
In order for this dectorator to function properly, it must be used in
conjunction with previous dectorator, "with_transactional_resource" or
equivalent. This decorator depends on the mechanims added by that
decorator.
"""
transaction_executor_attr_name = "%s_transaction_executor" % resource_name
def class_decorator(cls):
def _run_proxy(self, *args, **kwargs):
getattr(self, transaction_executor_attr_name)(
self.run,
*args,
**kwargs
)
cls._run_proxy = _run_proxy
return cls
return class_decorator
#==============================================================================
#==============================================================================
# dedicated postgresql mixins
#------------------------------------------------------------------------------
# this class decorator adds attributes to the class in the form:
# self.database_connection_factory
# self.database_transaction_executor
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# class MyClass ...
using_postgres = partial(
with_transactional_resource,
'crontabber.connection_factory.ConnectionFactory',
'database',
'resource.postgresql'
)
#------------------------------------------------------------------------------
# this class decorator adds a _run_proxy method to the class that will
# acquire a database connection and then pass it to the invocation of the
# class' "run" method. Since the connection is in the form of a
# context manager, the connection will automatically be closed when "run"
# completes.
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# @with_postgres_connection_as_argument()
# class MyClass ...
with_postgres_connection_as_argument = partial(
with_resource_connection_as_argument,
'database'
)
#------------------------------------------------------------------------------
# this class decorator adds a _run_proxy method to the class that will
# call the class' run method in the context of a database transaction. It
# passes the connection to the "run" function. When "run" completes without
# raising an exception, the transaction will be commited. An exception
# escaping the run function will result in a "rollback"
# when using this definition as a class decorator, it is necessary to use
# parenthesis as it is a function call:
# @using_postgres()
# @as_single_postgres_transaction()
# class MyClass ...
as_single_postgres_transaction = partial(
with_single_transaction,
'database'
)
# backwards compatibility
with_postgres_transactions = using_postgres
with_single_postgres_transaction = as_single_postgres_transaction |
mozilla/crontabber | crontabber/app.py | classes_in_namespaces_converter_with_compression | python | def classes_in_namespaces_converter_with_compression(
reference_namespace={},
template_for_namespace="class-%(name)s",
list_splitter_fn=_default_list_splitter,
class_extractor=_default_class_extractor,
extra_extractor=_default_extra_extractor):
# -------------------------------------------------------------------------
def class_list_converter(class_list_str):
"""This function becomes the actual converter used by configman to
take a string and convert it into the nested sequence of Namespaces,
one for each class in the list. It does this by creating a proxy
class stuffed with its own 'required_config' that's dynamically
generated."""
if isinstance(class_list_str, basestring):
class_str_list = list_splitter_fn(class_list_str)
else:
raise TypeError('must be derivative of a basestring')
# =====================================================================
class InnerClassList(RequiredConfig):
"""This nested class is a proxy list for the classes. It collects
all the config requirements for the listed classes and places them
each into their own Namespace.
"""
# we're dynamically creating a class here. The following block of
# code is actually adding class level attributes to this new class
# 1st requirement for configman
required_config = Namespace()
# to help the programmer know what Namespaces we added
subordinate_namespace_names = []
# save the template for future reference
namespace_template = template_for_namespace
# for display
original_input = class_list_str.replace('\n', '\\n')
# for each class in the class list
class_list = []
for namespace_index, class_list_element in enumerate(
class_str_list
):
try:
a_class = class_converter(
class_extractor(class_list_element)
)
except CannotConvertError:
raise JobNotFoundError(class_list_element)
class_list.append((a_class.__name__, a_class))
# figure out the Namespace name
namespace_name_dict = {
'name': a_class.__name__,
'index': namespace_index
}
namespace_name = template_for_namespace % namespace_name_dict
subordinate_namespace_names.append(namespace_name)
# create the new Namespace
required_config.namespace(namespace_name)
a_class_namespace = required_config[namespace_name]
# add options for the 'extra data'
try:
extra_options = extra_extractor(class_list_element)
a_class_namespace.update(extra_options)
except NotImplementedError:
pass
# add options frr the classes required config
try:
for k, v in a_class.get_required_config().iteritems():
if k not in reference_namespace:
a_class_namespace[k] = v
except AttributeError: # a_class has no get_required_config
pass
@classmethod
def to_str(cls):
"""this method takes this inner class object and turns it back
into the original string of classnames. This is used
primarily as for the output of the 'help' option"""
return cls.original_input
return InnerClassList # result of class_list_converter
return class_list_converter | parameters:
template_for_namespace - a template for the names of the namespaces
that will contain the classes and their
associated required config options. There are
two template variables available: %(name)s -
the name of the class to be contained in the
namespace; %(index)d - the sequential index
number of the namespace.
list_converter - a function that will take the string list of classes
and break it up into a sequence if individual elements
class_extractor - a function that will return the string version of
a classname from the result of the list_converter
extra_extractor - a function that will return a Namespace of options
created from any extra information associated with
the classes returned by the list_converter function | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/app.py#L503-L605 | null | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
CronTabber is a configman app for executing cron jobs.
"""
import re
import datetime
import inspect
import json
import sys
import time
import traceback
from functools import partial
from psycopg2 import OperationalError, IntegrityError
from dbapi2_util import (
single_value_sql,
SQLDidNotReturnSingleValue,
execute_query_iter,
execute_query_fetchall,
single_row_sql,
SQLDidNotReturnSingleRow,
execute_no_results,
)
from generic_app import App, main
from datetimeutil import utc_now, timesince
from base import (
convert_frequency,
FrequencyDefinitionError,
reorder_dag
)
try:
import raven
except ImportError: # pragma: no cover
raven = None
from configman import Namespace, RequiredConfig
from configman.converters import class_converter, CannotConvertError
from crontabber import __version__
CREATE_CRONTABBER_SQL = """
CREATE TABLE crontabber (
app_name text NOT NULL,
next_run timestamp with time zone,
first_run timestamp with time zone,
last_run timestamp with time zone,
last_success timestamp with time zone,
ongoing timestamp with time zone,
error_count integer DEFAULT 0,
depends_on text[],
last_error json
);
"""
CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX = """
CREATE UNIQUE INDEX crontabber_unique_app_name_idx
ON crontabber (app_name);
"""
CREATE_CRONTABBER_LOG_SQL = """
CREATE TABLE crontabber_log (
id SERIAL NOT NULL,
app_name text NOT NULL,
log_time timestamp with time zone DEFAULT now() NOT NULL,
duration interval,
success timestamp with time zone,
exc_type text,
exc_value text,
exc_traceback text
);
"""
# a method decorator that indicates that the method defines a single transacton
# on a database connection. It invokes the method using the instance's
# transaction object, automatically passing in the appropriate database
# connection. Any abnormal exit from the method will result in a 'rollback'
# any normal exit will result in a 'commit'
def database_transaction(transaction_object_name='transaction_executor'):
def transaction_decorator(method):
def _do_transaction(self, *args, **kwargs):
x = getattr(self, transaction_object_name)(
partial(method, self),
*args,
**kwargs
)
return x
return _do_transaction
return transaction_decorator
class JobNotFoundError(Exception):
pass
class TimeDefinitionError(Exception):
pass
class JobDescriptionError(Exception):
pass
class BrokenJSONError(ValueError):
pass
class SentryConfigurationError(Exception):
"""When Sentry isn't configured correctly"""
class RowLevelLockError(OperationalError):
"""The reason for defining this exception is that when you attempt
to read from a row that is actively locked (by another
thread/process) is that you get an OperationalError which isn't
particular developer-friendly because it might look like there's
some other more fundamental error such as a bad network connection
or something wrong with the credentials.
By giving it a name, it's more clear in the crontabber_log what
exactly was the reason why that second thread/process couldn't
work on that row a simultaneously.
"""
pass
class OngoingJobError(Exception):
"""Raised when you basically tried to run a job that already
ongoing. This is the "high level" version of `RowLevelLockError`.
"""
pass
_marker = object()
class JobStateDatabase(RequiredConfig):
required_config = Namespace()
required_config.add_option(
'database_class',
default='crontabber.connection_factory.ConnectionFactory',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.add_option(
'transaction_executor_class',
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
def __init__(self, config=None):
self.config = config
self.database_connection_factory = config.database_class(config)
self.transaction_executor = self.config.transaction_executor_class(
self.config,
self.database_connection_factory
)
found = self.transaction_executor(
execute_query_fetchall,
"SELECT relname FROM pg_class "
"WHERE relname = 'crontabber'"
)
if not found:
self.config.logger.info(
"Creating crontabber table: crontabber"
)
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_SQL
)
else:
# Check that it has the new `ongoing` column.
try:
self.transaction_executor(
single_value_sql,
"SELECT column_name FROM information_schema.columns "
"WHERE table_name='crontabber' AND column_name='ongoing'"
)
except SQLDidNotReturnSingleValue:
# So that's why then!
# We have to do a quick migration.
self.config.logger.info(
"Have to do a migration and add the `ongoing` field"
)
self.transaction_executor(
execute_no_results,
"ALTER TABLE crontabber ADD ongoing TIMESTAMP "
"WITH TIME ZONE"
)
# check that we have set the unique index on the app_name
index_count, = self.transaction_executor(
single_row_sql,
"SELECT COUNT(1) FROM pg_indexes WHERE "
"indexname = 'crontabber_unique_app_name_idx'"
)
if not index_count:
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX
)
found = self.transaction_executor(
execute_query_fetchall,
"SELECT relname FROM pg_class "
"WHERE relname = 'crontabber_log'"
)
if not found:
self.config.logger.info(
"Creating crontabber table: crontabber_log"
)
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_LOG_SQL
)
def has_data(self):
return bool(self.transaction_executor(
single_value_sql,
"SELECT COUNT(*) FROM crontabber"
))
def __iter__(self):
return iter([
record[0] for record in
self.transaction_executor(
execute_query_fetchall,
"SELECT app_name FROM crontabber"
)
])
def __contains__(self, key):
"""return True if we have a job by this key"""
try:
self.transaction_executor(
single_value_sql,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
return True
except SQLDidNotReturnSingleValue:
return False
def keys(self):
"""return a list of all app_names"""
keys = []
for app_name, __ in self.items():
keys.append(app_name)
return keys
def items(self):
"""return all the app_names and their values as tuples"""
sql = """
SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error
FROM crontabber"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error'
)
items = []
for record in self.transaction_executor(execute_query_fetchall, sql):
row = dict(zip(columns, record))
items.append((row.pop('app_name'), row))
return items
def values(self):
"""return a list of all state values"""
values = []
for __, data in self.items():
values.append(data)
return values
def __getitem__(self, key):
"""return the job info or raise a KeyError"""
sql = """
SELECT
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
FROM crontabber
WHERE
app_name = %s"""
columns = (
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error', 'ongoing'
)
try:
record = self.transaction_executor(single_row_sql, sql, (key,))
except SQLDidNotReturnSingleRow:
raise KeyError(key)
row = dict(zip(columns, record))
return row
@database_transaction()
def __setitem__(self, connection, key, value):
class LastErrorEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, type):
return repr(obj)
return json.JSONEncoder.default(self, obj)
try:
single_value_sql(
connection,
"""SELECT ongoing
FROM crontabber
WHERE
app_name = %s
FOR UPDATE NOWAIT
""",
(key,)
)
# If the above single_value_sql() didn't raise a
# SQLDidNotReturnSingleValue exception, it means
# there is a row by this app_name.
# Therefore, the next SQL is an update.
next_sql = """
UPDATE crontabber
SET
next_run = %(next_run)s,
first_run = %(first_run)s,
last_run = %(last_run)s,
last_success = %(last_success)s,
depends_on = %(depends_on)s,
error_count = %(error_count)s,
last_error = %(last_error)s,
ongoing = %(ongoing)s
WHERE
app_name = %(app_name)s
"""
except OperationalError as exception:
if 'could not obtain lock' in exception.args[0]:
raise RowLevelLockError(exception.args[0])
else:
raise
except SQLDidNotReturnSingleValue:
# the key does not exist, do an insert
next_sql = """
INSERT INTO crontabber (
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
) VALUES (
%(app_name)s,
%(next_run)s,
%(first_run)s,
%(last_run)s,
%(last_success)s,
%(depends_on)s,
%(error_count)s,
%(last_error)s,
%(ongoing)s
)
"""
parameters = {
'app_name': key,
'next_run': value['next_run'],
'first_run': value['first_run'],
'last_run': value['last_run'],
'last_success': value.get('last_success'),
'depends_on': value['depends_on'],
'error_count': value['error_count'],
'last_error': json.dumps(
value['last_error'],
cls=LastErrorEncoder
),
'ongoing': value.get('ongoing'),
}
try:
execute_no_results(
connection,
next_sql,
parameters
)
except IntegrityError as exception:
# See CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX for why
# we know to look for this mentioned in the error message.
if 'crontabber_unique_app_name_idx' in exception.args[0]:
raise RowLevelLockError(exception.args[0])
raise
@database_transaction()
def copy(self, connection):
sql = """SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
FROM crontabber
"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error', 'ongoing'
)
all = {}
for record in execute_query_iter(connection, sql):
row = dict(zip(columns, record))
all[row.pop('app_name')] = row
return all
def update(self, data):
for key in data:
self[key] = data[key]
def get(self, key, default=None):
"""return the item by key or return 'default'"""
try:
return self[key]
except KeyError:
return default
def pop(self, key, default=_marker):
"""remove the item by key
If not default is specified, raise KeyError if nothing
could be removed.
Return 'default' if specified and nothing could be removed
"""
try:
popped = self[key]
del self[key]
return popped
except KeyError:
if default == _marker:
raise
return default
@database_transaction()
def __delitem__(self, connection, key):
"""remove the item by key or raise KeyError"""
try:
# result intentionally ignored
single_value_sql(
connection,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
except SQLDidNotReturnSingleValue:
raise KeyError(key)
# item exists
execute_no_results(
connection,
"""DELETE FROM crontabber
WHERE app_name = %s""",
(key,)
)
# -----------------------------------------------------------------------------
def _default_list_splitter(class_list_str): # pragma: no cover
return [x.strip() for x in class_list_str.split(',')]
def _default_class_extractor(list_element): # pragma: no cover
return list_element
def _default_extra_extractor(list_element): # pragma: no cover
raise NotImplementedError()
# result of classes_in_namespaces_converter
def get_extra_as_options(input_str):
if '|' not in input_str:
raise JobDescriptionError('No frequency and/or time defined')
metadata = input_str.split('|')[1:]
if len(metadata) == 1:
if ':' in metadata[0]:
frequency = '1d'
time_ = metadata[0]
else:
frequency = metadata[0]
time_ = None
else:
frequency, time_ = metadata
n = Namespace()
n.add_option(
'frequency',
doc='frequency',
default=frequency,
exclude_from_print_conf=True,
exclude_from_dump_conf=True
)
n.add_option(
'time',
doc='time',
default=time_,
exclude_from_print_conf=True,
exclude_from_dump_conf=True
)
return n
def check_time(value):
"""check that it's a value like 03:45 or 1:1"""
try:
h, m = value.split(':')
h = int(h)
m = int(m)
if h >= 24 or h < 0:
raise ValueError
if m >= 60 or m < 0:
raise ValueError
except ValueError:
raise TimeDefinitionError("Invalid definition of time %r" % value)
def line_splitter(text):
return [x.strip() for x in re.split('\n|,|;', text.strip())
if x.strip() and not x.strip().startswith('#')]
def pipe_splitter(text):
return text.split('|', 1)[0]
class CronTabberBase(RequiredConfig):
app_name = 'crontabber'
app_version = __version__
app_description = __doc__
required_config = Namespace()
# the most important option, 'jobs', is defined last
required_config.namespace('crontabber')
required_config.crontabber.add_option(
name='job_state_db_class',
default=JobStateDatabase,
doc='Class to load and save the state and runs',
)
required_config.crontabber.add_option(
'jobs',
default='',
from_string_converter=classes_in_namespaces_converter_with_compression(
reference_namespace=Namespace(),
list_splitter_fn=line_splitter,
class_extractor=pipe_splitter,
extra_extractor=get_extra_as_options
)
)
required_config.crontabber.add_option(
'error_retry_time',
default=300,
doc='number of seconds to re-attempt a job that failed'
)
required_config.crontabber.add_option(
'max_ongoing_age_hours',
default=12.0,
doc=(
'If a job has been ongoing for longer than this, it gets '
'ignored as a lock and the job is run anyway.'
)
)
# for local use, independent of the JSONAndPostgresJobDatabase
required_config.crontabber.add_option(
'database_class',
default='crontabber.connection_factory.ConnectionFactory',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.crontabber.add_option(
'transaction_executor_class',
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.add_option(
name='job',
default='',
doc='Run a specific job',
short_form='j',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='list-jobs',
default=False,
doc='List all jobs',
short_form='l',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='force',
default=False,
doc='Force running a job despite dependencies',
short_form='f',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='configtest',
default=False,
doc='Check that all configured jobs are OK',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='sentrytest',
default=False,
doc='Send a sample raven exception',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='audit-ghosts',
default=False,
doc='Checks if there jobs in the database that is not configured.',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='reset-job',
default='',
doc='Pretend a job has never been run',
short_form='r',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='nagios',
default=False,
doc='Exits with 0, 1 or 2 with a message on stdout if errors have '
'happened.',
short_form='n',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='version',
default=False,
doc='Print current version and exit',
short_form='v',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.namespace('sentry')
required_config.sentry.add_option(
'dsn',
doc='DSN for Sentry via raven',
default='',
reference_value_from='secrets.sentry',
)
def __init__(self, config):
super(CronTabberBase, self).__init__(config)
self.database_connection_factory = \
self.config.crontabber.database_class(config.crontabber)
self.transaction_executor = (
self.config.crontabber.transaction_executor_class(
config.crontabber,
self.database_connection_factory
)
)
def main(self):
if self.config.get('list-jobs'):
self.list_jobs()
return 0
elif self.config.get('nagios'):
return self.nagios()
elif self.config.get('version'):
self.print_version()
return 0
elif self.config.get('reset-job'):
self.reset_job(self.config.get('reset-job'))
return 0
elif self.config.get('audit-ghosts'):
self.audit_ghosts()
return 0
elif self.config.get('configtest'):
return not self.configtest() and 1 or 0
elif self.config.get('sentrytest'):
return not self.sentrytest() and 1 or 0
if self.config.get('job'):
self.run_one(self.config['job'], self.config.get('force'))
else:
try:
self.run_all()
except RowLevelLockError:
self.config.logger.debug(
'Next job to work on is already ongoing'
)
return 2
except OngoingJobError:
self.config.logger.debug(
'Next job to work on is already ongoing'
)
return 3
return 0
@staticmethod
def _reorder_class_list(class_list):
# class_list looks something like this:
# [('FooBarJob', <class 'FooBarJob'>),
# ('BarJob', <class 'BarJob'>),
# ('FooJob', <class 'FooJob'>)]
return reorder_dag(
class_list,
depends_getter=lambda x: getattr(x[1], 'depends_on', None),
name_getter=lambda x: x[1].app_name
)
@property
def job_state_database(self):
if not getattr(self, '_job_state_database', None):
self._job_state_database = (
self.config.crontabber.job_state_db_class(
self.config.crontabber
)
)
return self._job_state_database
def nagios(self, stream=sys.stdout):
"""
return 0 (OK) if there are no errors in the state.
return 1 (WARNING) if a backfill app only has 1 error.
return 2 (CRITICAL) if a backfill app has > 1 error.
return 2 (CRITICAL) if a non-backfill app has 1 error.
"""
warnings = []
criticals = []
for class_name, job_class in self.config.crontabber.jobs.class_list:
if job_class.app_name in self.job_state_database:
info = self.job_state_database.get(job_class.app_name)
if not info.get('error_count', 0):
continue
error_count = info['error_count']
# trouble!
serialized = (
'%s (%s) | %s | %s' %
(job_class.app_name,
class_name,
info['last_error']['type'],
info['last_error']['value'])
)
if (
error_count == 1 and
hasattr(job_class, "_is_backfill_app")
):
# just a warning for now
warnings.append(serialized)
else:
# anything worse than that is critical
criticals.append(serialized)
if criticals:
stream.write('CRITICAL - ')
stream.write('; '.join(criticals))
stream.write('\n')
return 2
elif warnings:
stream.write('WARNING - ')
stream.write('; '.join(warnings))
stream.write('\n')
return 1
stream.write('OK - All systems nominal')
stream.write('\n')
return 0
def print_version(self, stream=sys.stdout):
stream.write('%s\n' % self.app_version)
def list_jobs(self, stream=None):
if not stream:
stream = sys.stdout
_fmt = '%Y-%m-%d %H:%M:%S'
_now = utc_now()
PAD = 15
for class_name, job_class in self.config.crontabber.jobs.class_list:
class_config = self.config.crontabber['class-%s' % class_name]
freq = class_config.frequency
if class_config.time:
freq += ' @ %s' % class_config.time
class_name = job_class.__module__ + '.' + job_class.__name__
print >>stream, '=== JOB ' + '=' * 72
print >>stream, 'Class:'.ljust(PAD), class_name
print >>stream, 'App name:'.ljust(PAD), job_class.app_name
print >>stream, 'Frequency:'.ljust(PAD), freq
try:
info = self.job_state_database[job_class.app_name]
except KeyError:
print >>stream, '*NO PREVIOUS RUN INFO*'
continue
if info.get('ongoing'):
print >>stream, 'Ongoing now!'.ljust(PAD),
print >>stream, 'Started', '%s ago' % timesince(
_now, info.get('ongoing')
)
print >>stream, 'Last run:'.ljust(PAD),
if info['last_run']:
print >>stream, info['last_run'].strftime(_fmt).ljust(20),
print >>stream, '(%s ago)' % timesince(info['last_run'], _now)
else:
print >>stream, 'none'
print >>stream, 'Last success:'.ljust(PAD),
if info.get('last_success'):
print >>stream, info['last_success'].strftime(_fmt).ljust(20),
print >>stream, ('(%s ago)' %
timesince(info['last_success'], _now))
else:
print >>stream, 'no previous successful run'
print >>stream, 'Next run:'.ljust(PAD),
if info['next_run']:
print >>stream, info['next_run'].strftime(_fmt).ljust(20),
if _now > info['next_run']:
print >>stream, ('(was %s ago)' %
timesince(info['next_run'], _now))
else:
print >>stream, '(in %s)' % timesince(
_now,
info['next_run']
)
else:
print >>stream, 'none'
if info.get('last_error'):
print >>stream, 'Error!!'.ljust(PAD),
print >>stream, '(%s times)' % info['error_count']
print >>stream, 'Traceback (most recent call last):'
print >>stream, info['last_error']['traceback'],
print >>stream, '%s:' % info['last_error']['type'],
print >>stream, info['last_error']['value']
print >>stream, ''
def reset_job(self, description):
"""remove the job from the state.
if means that next time we run, this job will start over from scratch.
"""
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
if (
job_class.app_name == description or
description == job_class.__module__ + '.' + job_class.__name__
):
if job_class.app_name in self.job_state_database:
self.config.logger.info('App reset')
self.job_state_database.pop(job_class.app_name)
else:
self.config.logger.warning('App already reset')
return
raise JobNotFoundError(description)
def run_all(self):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config)
def run_one(self, description, force=False):
# the description in this case is either the app_name or the full
# module/class reference
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
if (
job_class.app_name == description or
description == job_class.__module__ + '.' + job_class.__name__
):
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config, force=force)
return
raise JobNotFoundError(description)
def _run_one(self, job_class, config, force=False):
_debug = self.config.logger.debug
seconds = convert_frequency(config.frequency)
time_ = config.time
if not force:
if not self.time_to_run(job_class, time_):
_debug("skipping %r because it's not time to run", job_class)
return
ok, dependency_error = self.check_dependencies(job_class)
if not ok:
_debug(
"skipping %r dependencies aren't met [%s]",
job_class, dependency_error
)
return
_debug('about to run %r', job_class)
app_name = job_class.app_name
info = self.job_state_database.get(app_name)
last_success = None
now = utc_now()
log_run = True
try:
t0 = time.time()
for last_success in self._run_job(job_class, config, info):
t1 = time.time()
_debug('successfully ran %r on %s', job_class, last_success)
self._remember_success(job_class, last_success, t1 - t0)
# _run_job() returns a generator, so we don't know how
# many times this will loop. Anyway, we need to reset the
# 't0' for the next loop if there is one.
t0 = time.time()
exc_type = exc_value = exc_tb = None
except (OngoingJobError, RowLevelLockError):
# It's not an actual runtime error. It just basically means
# you can't start crontabber right now.
log_run = False
raise
except:
t1 = time.time()
exc_type, exc_value, exc_tb = sys.exc_info()
# when debugging tests that mock logging, uncomment this otherwise
# the exc_info=True doesn't compute and record what the exception
# was
#raise # noqa
if self.config.sentry and self.config.sentry.dsn:
assert raven, "raven not installed"
try:
client = raven.Client(dsn=self.config.sentry.dsn)
identifier = client.get_ident(client.captureException())
self.config.logger.info(
'Error captured in Sentry. Reference: %s' % identifier
)
except Exception:
# Blank exceptions like this is evil but a failure to send
# the exception to Sentry is much less important than for
# crontabber to carry on. This is especially true
# considering that raven depends on network I/O.
_debug('Failed to capture and send error to Sentry',
exc_info=True)
_debug('error when running %r on %s',
job_class, last_success, exc_info=True)
self._remember_failure(
job_class,
t1 - t0,
exc_type,
exc_value,
exc_tb
)
finally:
if log_run:
self._log_run(
job_class,
seconds,
time_,
last_success,
now,
exc_type, exc_value, exc_tb
)
@database_transaction()
def _remember_success(
self,
connection,
class_,
success_date,
duration,
):
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
success,
duration
) VALUES (
%s,
%s,
%s
)""",
(app_name, success_date, '%.5f' % duration),
)
@database_transaction()
def _remember_failure(
self,
connection,
class_,
duration,
exc_type,
exc_value,
exc_tb,
):
exc_traceback = ''.join(traceback.format_tb(exc_tb))
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
duration,
exc_type,
exc_value,
exc_traceback
) VALUES (
%s,
%s,
%s,
%s,
%s
)""",
(
app_name,
'%.5f' % duration,
repr(exc_type),
repr(exc_value),
exc_traceback
),
)
def check_dependencies(self, class_):
try:
depends_on = class_.depends_on
except AttributeError:
# that's perfectly fine
return True, None
if isinstance(depends_on, basestring):
depends_on = [depends_on]
for dependency in depends_on:
try:
job_info = self.job_state_database[dependency]
except KeyError:
# the job this one depends on hasn't been run yet!
return False, "%r hasn't been run yet" % dependency
if job_info.get('last_error'):
# errored last time it ran
return False, "%r errored last time it ran" % dependency
if job_info['next_run'] < utc_now():
# the dependency hasn't recently run
return False, "%r hasn't recently run" % dependency
# no reason not to stop this class
return True, None
def time_to_run(self, class_, time_):
"""return true if it's time to run the job.
This is true if there is no previous information about its last run
or if the last time it ran and set its next_run to a date that is now
past.
"""
app_name = class_.app_name
try:
info = self.job_state_database[app_name]
except KeyError:
if time_:
h, m = [int(x) for x in time_.split(':')]
# only run if this hour and minute is < now
now = utc_now()
if now.hour > h:
return True
elif now.hour == h and now.minute >= m:
return True
return False
else:
# no past information, run now
return True
next_run = info['next_run']
if not next_run:
# It has never run before.
# If it has an active ongoing status it means two
# independent threads tried to start it. The second one
# (by a tiny time margin) will have a job_class whose
# `ongoing` value has already been set.
# If that's the case, let it through because it will
# commence and break due to RowLevelLockError in the
# state's __setitem__ method.
return bool(info['ongoing'])
if next_run < utc_now():
return True
return False
def _run_job(self, class_, config, info):
# here we go!
instance = class_(config, info)
self._set_ongoing_job(class_)
result = instance.main()
return result
def _set_ongoing_job(self, class_):
app_name = class_.app_name
info = self.job_state_database.get(app_name)
if info:
# Was it already ongoing?
if info.get('ongoing'):
# Unless it's been ongoing for ages, raise OngoingJobError
age_hours = (utc_now() - info['ongoing']).seconds / 3600.0
if age_hours < self.config.crontabber.max_ongoing_age_hours:
raise OngoingJobError(info['ongoing'])
else:
self.config.logger.debug(
'{} has been ongoing for {:2} hours. '
'Ignore it and running the app anyway.'.format(
app_name,
age_hours,
)
)
info['ongoing'] = utc_now()
else:
depends_on = getattr(class_, 'depends_on', [])
if isinstance(depends_on, basestring):
depends_on = [depends_on]
elif not isinstance(depends_on, list):
depends_on = list(depends_on)
info = {
'next_run': None,
'first_run': None,
'last_run': None,
'last_success': None,
'last_error': {},
'error_count': 0,
'depends_on': depends_on,
'ongoing': utc_now(),
}
self.job_state_database[app_name] = info
def _log_run(self, class_, seconds, time_, last_success, now,
exc_type, exc_value, exc_tb):
assert inspect.isclass(class_)
app_name = class_.app_name
info = self.job_state_database.get(app_name, {})
depends_on = getattr(class_, 'depends_on', [])
if isinstance(depends_on, basestring):
depends_on = [depends_on]
elif not isinstance(depends_on, list):
depends_on = list(depends_on)
info['depends_on'] = depends_on
if not info.get('first_run'):
info['first_run'] = now
info['last_run'] = now
if last_success:
info['last_success'] = last_success
if exc_type:
# it errored, try very soon again
info['next_run'] = now + datetime.timedelta(
seconds=self.config.crontabber.error_retry_time
)
else:
info['next_run'] = now + datetime.timedelta(seconds=seconds)
if time_:
h, m = [int(x) for x in time_.split(':')]
info['next_run'] = info['next_run'].replace(hour=h,
minute=m,
second=0,
microsecond=0)
if exc_type:
tb = ''.join(traceback.format_tb(exc_tb))
info['last_error'] = {
'type': exc_type,
'value': str(exc_value),
'traceback': tb,
}
info['error_count'] = info.get('error_count', 0) + 1
else:
info['last_error'] = {}
info['error_count'] = 0
# Clearly it's not "ongoing" any more when it's here, because
# being here means the job has finished.
info['ongoing'] = None
self.job_state_database[app_name] = info
def configtest(self):
"""return true if all configured jobs are configured OK"""
# similar to run_all() but don't actually run them
failed = 0
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, __ in class_list:
class_config = self.config.crontabber['class-%s' % class_name]
if not self._configtest_one(class_config):
failed += 1
return not failed
def _configtest_one(self, config):
try:
seconds = convert_frequency(config.frequency)
time_ = config.time
if time_:
check_time(time_)
# if less than 1 day, it doesn't make sense to specify hour
if seconds < 60 * 60 * 24:
raise FrequencyDefinitionError(config.time)
return True
except (JobNotFoundError,
JobDescriptionError,
FrequencyDefinitionError,
TimeDefinitionError):
config.logger.critical(
'Failed to config test a job',
exc_info=True
)
return False
def sentrytest(self):
"""return true if we managed to send a sample raven exception"""
if not (self.config.sentry and self.config.sentry.dsn):
raise SentryConfigurationError('sentry dsn not configured')
try:
version = raven.fetch_package_version('crontabber')
except Exception:
version = None
self.config.logger.warning(
'Unable to extract version of crontabber',
exc_info=True
)
client = raven.Client(
dsn=self.config.sentry.dsn,
release=version
)
identifier = client.captureMessage(
'Sentry test sent from crontabber'
)
self.config.logger.info(
'Sentry successful identifier: %s', identifier
)
return True
def audit_ghosts(self):
"""compare the list of configured jobs with the jobs in the state"""
print_header = True
for app_name in self._get_ghosts():
if print_header:
print_header = False
print (
"Found the following in the state database but not "
"available as a configured job:"
)
print "\t%s" % (app_name,)
def _get_ghosts(self):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
configured_app_names = []
for __, job_class in class_list:
configured_app_names.append(job_class.app_name)
state_app_names = self.job_state_database.keys()
return set(state_app_names) - set(configured_app_names)
class CronTabber(CronTabberBase, App):
"""This class mixes in the CronTabberBase class with the default runnable
application infrastructure: crontabber.generic_app.App. Having the
CronTabberBase decoupled from the App class allows CrontTabber to integrate
seemlessly into a different system for setting up and running an app.
One of the primary clients of CronTabber is Socorro. In fact CronTabber
was spun off from Socorro as an indepentent app. Initially they had
identical copies of the App base class. To allow the two projects to
evolve indepentenly, the CronTabber App class was separated from the
CronTabberBase class. This allows Socorro to declare its own CronTabberApp
that derives from the Socorro App class instead of the
crontabber.generic_app.App class"""
# no new methods are required, the two base classes have everything
def local_main(): # pragma: no cover
import sys
import os
root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if root not in sys.path:
sys.path.append(root)
sys.exit(main(CronTabber))
# try:
# sys.exit(main(CronTabber))
# except RowLevelLockError:
# sys.exit(1)
# except OngoingJobError:
# sys.exit(2)
if __name__ == '__main__': # pragma: no cover
local_main()
|
mozilla/crontabber | crontabber/app.py | check_time | python | def check_time(value):
try:
h, m = value.split(':')
h = int(h)
m = int(m)
if h >= 24 or h < 0:
raise ValueError
if m >= 60 or m < 0:
raise ValueError
except ValueError:
raise TimeDefinitionError("Invalid definition of time %r" % value) | check that it's a value like 03:45 or 1:1 | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/app.py#L640-L651 | null | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
CronTabber is a configman app for executing cron jobs.
"""
import re
import datetime
import inspect
import json
import sys
import time
import traceback
from functools import partial
from psycopg2 import OperationalError, IntegrityError
from dbapi2_util import (
single_value_sql,
SQLDidNotReturnSingleValue,
execute_query_iter,
execute_query_fetchall,
single_row_sql,
SQLDidNotReturnSingleRow,
execute_no_results,
)
from generic_app import App, main
from datetimeutil import utc_now, timesince
from base import (
convert_frequency,
FrequencyDefinitionError,
reorder_dag
)
try:
import raven
except ImportError: # pragma: no cover
raven = None
from configman import Namespace, RequiredConfig
from configman.converters import class_converter, CannotConvertError
from crontabber import __version__
CREATE_CRONTABBER_SQL = """
CREATE TABLE crontabber (
app_name text NOT NULL,
next_run timestamp with time zone,
first_run timestamp with time zone,
last_run timestamp with time zone,
last_success timestamp with time zone,
ongoing timestamp with time zone,
error_count integer DEFAULT 0,
depends_on text[],
last_error json
);
"""
CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX = """
CREATE UNIQUE INDEX crontabber_unique_app_name_idx
ON crontabber (app_name);
"""
CREATE_CRONTABBER_LOG_SQL = """
CREATE TABLE crontabber_log (
id SERIAL NOT NULL,
app_name text NOT NULL,
log_time timestamp with time zone DEFAULT now() NOT NULL,
duration interval,
success timestamp with time zone,
exc_type text,
exc_value text,
exc_traceback text
);
"""
# a method decorator that indicates that the method defines a single transacton
# on a database connection. It invokes the method using the instance's
# transaction object, automatically passing in the appropriate database
# connection. Any abnormal exit from the method will result in a 'rollback'
# any normal exit will result in a 'commit'
def database_transaction(transaction_object_name='transaction_executor'):
def transaction_decorator(method):
def _do_transaction(self, *args, **kwargs):
x = getattr(self, transaction_object_name)(
partial(method, self),
*args,
**kwargs
)
return x
return _do_transaction
return transaction_decorator
class JobNotFoundError(Exception):
pass
class TimeDefinitionError(Exception):
pass
class JobDescriptionError(Exception):
pass
class BrokenJSONError(ValueError):
pass
class SentryConfigurationError(Exception):
"""When Sentry isn't configured correctly"""
class RowLevelLockError(OperationalError):
"""The reason for defining this exception is that when you attempt
to read from a row that is actively locked (by another
thread/process) is that you get an OperationalError which isn't
particular developer-friendly because it might look like there's
some other more fundamental error such as a bad network connection
or something wrong with the credentials.
By giving it a name, it's more clear in the crontabber_log what
exactly was the reason why that second thread/process couldn't
work on that row a simultaneously.
"""
pass
class OngoingJobError(Exception):
"""Raised when you basically tried to run a job that already
ongoing. This is the "high level" version of `RowLevelLockError`.
"""
pass
_marker = object()
class JobStateDatabase(RequiredConfig):
required_config = Namespace()
required_config.add_option(
'database_class',
default='crontabber.connection_factory.ConnectionFactory',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.add_option(
'transaction_executor_class',
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
def __init__(self, config=None):
self.config = config
self.database_connection_factory = config.database_class(config)
self.transaction_executor = self.config.transaction_executor_class(
self.config,
self.database_connection_factory
)
found = self.transaction_executor(
execute_query_fetchall,
"SELECT relname FROM pg_class "
"WHERE relname = 'crontabber'"
)
if not found:
self.config.logger.info(
"Creating crontabber table: crontabber"
)
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_SQL
)
else:
# Check that it has the new `ongoing` column.
try:
self.transaction_executor(
single_value_sql,
"SELECT column_name FROM information_schema.columns "
"WHERE table_name='crontabber' AND column_name='ongoing'"
)
except SQLDidNotReturnSingleValue:
# So that's why then!
# We have to do a quick migration.
self.config.logger.info(
"Have to do a migration and add the `ongoing` field"
)
self.transaction_executor(
execute_no_results,
"ALTER TABLE crontabber ADD ongoing TIMESTAMP "
"WITH TIME ZONE"
)
# check that we have set the unique index on the app_name
index_count, = self.transaction_executor(
single_row_sql,
"SELECT COUNT(1) FROM pg_indexes WHERE "
"indexname = 'crontabber_unique_app_name_idx'"
)
if not index_count:
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX
)
found = self.transaction_executor(
execute_query_fetchall,
"SELECT relname FROM pg_class "
"WHERE relname = 'crontabber_log'"
)
if not found:
self.config.logger.info(
"Creating crontabber table: crontabber_log"
)
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_LOG_SQL
)
def has_data(self):
return bool(self.transaction_executor(
single_value_sql,
"SELECT COUNT(*) FROM crontabber"
))
def __iter__(self):
return iter([
record[0] for record in
self.transaction_executor(
execute_query_fetchall,
"SELECT app_name FROM crontabber"
)
])
def __contains__(self, key):
"""return True if we have a job by this key"""
try:
self.transaction_executor(
single_value_sql,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
return True
except SQLDidNotReturnSingleValue:
return False
def keys(self):
"""return a list of all app_names"""
keys = []
for app_name, __ in self.items():
keys.append(app_name)
return keys
def items(self):
"""return all the app_names and their values as tuples"""
sql = """
SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error
FROM crontabber"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error'
)
items = []
for record in self.transaction_executor(execute_query_fetchall, sql):
row = dict(zip(columns, record))
items.append((row.pop('app_name'), row))
return items
def values(self):
"""return a list of all state values"""
values = []
for __, data in self.items():
values.append(data)
return values
def __getitem__(self, key):
"""return the job info or raise a KeyError"""
sql = """
SELECT
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
FROM crontabber
WHERE
app_name = %s"""
columns = (
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error', 'ongoing'
)
try:
record = self.transaction_executor(single_row_sql, sql, (key,))
except SQLDidNotReturnSingleRow:
raise KeyError(key)
row = dict(zip(columns, record))
return row
@database_transaction()
def __setitem__(self, connection, key, value):
class LastErrorEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, type):
return repr(obj)
return json.JSONEncoder.default(self, obj)
try:
single_value_sql(
connection,
"""SELECT ongoing
FROM crontabber
WHERE
app_name = %s
FOR UPDATE NOWAIT
""",
(key,)
)
# If the above single_value_sql() didn't raise a
# SQLDidNotReturnSingleValue exception, it means
# there is a row by this app_name.
# Therefore, the next SQL is an update.
next_sql = """
UPDATE crontabber
SET
next_run = %(next_run)s,
first_run = %(first_run)s,
last_run = %(last_run)s,
last_success = %(last_success)s,
depends_on = %(depends_on)s,
error_count = %(error_count)s,
last_error = %(last_error)s,
ongoing = %(ongoing)s
WHERE
app_name = %(app_name)s
"""
except OperationalError as exception:
if 'could not obtain lock' in exception.args[0]:
raise RowLevelLockError(exception.args[0])
else:
raise
except SQLDidNotReturnSingleValue:
# the key does not exist, do an insert
next_sql = """
INSERT INTO crontabber (
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
) VALUES (
%(app_name)s,
%(next_run)s,
%(first_run)s,
%(last_run)s,
%(last_success)s,
%(depends_on)s,
%(error_count)s,
%(last_error)s,
%(ongoing)s
)
"""
parameters = {
'app_name': key,
'next_run': value['next_run'],
'first_run': value['first_run'],
'last_run': value['last_run'],
'last_success': value.get('last_success'),
'depends_on': value['depends_on'],
'error_count': value['error_count'],
'last_error': json.dumps(
value['last_error'],
cls=LastErrorEncoder
),
'ongoing': value.get('ongoing'),
}
try:
execute_no_results(
connection,
next_sql,
parameters
)
except IntegrityError as exception:
# See CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX for why
# we know to look for this mentioned in the error message.
if 'crontabber_unique_app_name_idx' in exception.args[0]:
raise RowLevelLockError(exception.args[0])
raise
@database_transaction()
def copy(self, connection):
sql = """SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
FROM crontabber
"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error', 'ongoing'
)
all = {}
for record in execute_query_iter(connection, sql):
row = dict(zip(columns, record))
all[row.pop('app_name')] = row
return all
def update(self, data):
for key in data:
self[key] = data[key]
def get(self, key, default=None):
"""return the item by key or return 'default'"""
try:
return self[key]
except KeyError:
return default
def pop(self, key, default=_marker):
"""remove the item by key
If not default is specified, raise KeyError if nothing
could be removed.
Return 'default' if specified and nothing could be removed
"""
try:
popped = self[key]
del self[key]
return popped
except KeyError:
if default == _marker:
raise
return default
@database_transaction()
def __delitem__(self, connection, key):
"""remove the item by key or raise KeyError"""
try:
# result intentionally ignored
single_value_sql(
connection,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
except SQLDidNotReturnSingleValue:
raise KeyError(key)
# item exists
execute_no_results(
connection,
"""DELETE FROM crontabber
WHERE app_name = %s""",
(key,)
)
# -----------------------------------------------------------------------------
def _default_list_splitter(class_list_str): # pragma: no cover
return [x.strip() for x in class_list_str.split(',')]
def _default_class_extractor(list_element): # pragma: no cover
return list_element
def _default_extra_extractor(list_element): # pragma: no cover
raise NotImplementedError()
def classes_in_namespaces_converter_with_compression(
reference_namespace={},
template_for_namespace="class-%(name)s",
list_splitter_fn=_default_list_splitter,
class_extractor=_default_class_extractor,
extra_extractor=_default_extra_extractor):
"""
parameters:
template_for_namespace - a template for the names of the namespaces
that will contain the classes and their
associated required config options. There are
two template variables available: %(name)s -
the name of the class to be contained in the
namespace; %(index)d - the sequential index
number of the namespace.
list_converter - a function that will take the string list of classes
and break it up into a sequence if individual elements
class_extractor - a function that will return the string version of
a classname from the result of the list_converter
extra_extractor - a function that will return a Namespace of options
created from any extra information associated with
the classes returned by the list_converter function
"""
# -------------------------------------------------------------------------
def class_list_converter(class_list_str):
"""This function becomes the actual converter used by configman to
take a string and convert it into the nested sequence of Namespaces,
one for each class in the list. It does this by creating a proxy
class stuffed with its own 'required_config' that's dynamically
generated."""
if isinstance(class_list_str, basestring):
class_str_list = list_splitter_fn(class_list_str)
else:
raise TypeError('must be derivative of a basestring')
# =====================================================================
class InnerClassList(RequiredConfig):
"""This nested class is a proxy list for the classes. It collects
all the config requirements for the listed classes and places them
each into their own Namespace.
"""
# we're dynamically creating a class here. The following block of
# code is actually adding class level attributes to this new class
# 1st requirement for configman
required_config = Namespace()
# to help the programmer know what Namespaces we added
subordinate_namespace_names = []
# save the template for future reference
namespace_template = template_for_namespace
# for display
original_input = class_list_str.replace('\n', '\\n')
# for each class in the class list
class_list = []
for namespace_index, class_list_element in enumerate(
class_str_list
):
try:
a_class = class_converter(
class_extractor(class_list_element)
)
except CannotConvertError:
raise JobNotFoundError(class_list_element)
class_list.append((a_class.__name__, a_class))
# figure out the Namespace name
namespace_name_dict = {
'name': a_class.__name__,
'index': namespace_index
}
namespace_name = template_for_namespace % namespace_name_dict
subordinate_namespace_names.append(namespace_name)
# create the new Namespace
required_config.namespace(namespace_name)
a_class_namespace = required_config[namespace_name]
# add options for the 'extra data'
try:
extra_options = extra_extractor(class_list_element)
a_class_namespace.update(extra_options)
except NotImplementedError:
pass
# add options frr the classes required config
try:
for k, v in a_class.get_required_config().iteritems():
if k not in reference_namespace:
a_class_namespace[k] = v
except AttributeError: # a_class has no get_required_config
pass
@classmethod
def to_str(cls):
"""this method takes this inner class object and turns it back
into the original string of classnames. This is used
primarily as for the output of the 'help' option"""
return cls.original_input
return InnerClassList # result of class_list_converter
return class_list_converter # result of classes_in_namespaces_converter
def get_extra_as_options(input_str):
if '|' not in input_str:
raise JobDescriptionError('No frequency and/or time defined')
metadata = input_str.split('|')[1:]
if len(metadata) == 1:
if ':' in metadata[0]:
frequency = '1d'
time_ = metadata[0]
else:
frequency = metadata[0]
time_ = None
else:
frequency, time_ = metadata
n = Namespace()
n.add_option(
'frequency',
doc='frequency',
default=frequency,
exclude_from_print_conf=True,
exclude_from_dump_conf=True
)
n.add_option(
'time',
doc='time',
default=time_,
exclude_from_print_conf=True,
exclude_from_dump_conf=True
)
return n
def line_splitter(text):
return [x.strip() for x in re.split('\n|,|;', text.strip())
if x.strip() and not x.strip().startswith('#')]
def pipe_splitter(text):
return text.split('|', 1)[0]
class CronTabberBase(RequiredConfig):
app_name = 'crontabber'
app_version = __version__
app_description = __doc__
required_config = Namespace()
# the most important option, 'jobs', is defined last
required_config.namespace('crontabber')
required_config.crontabber.add_option(
name='job_state_db_class',
default=JobStateDatabase,
doc='Class to load and save the state and runs',
)
required_config.crontabber.add_option(
'jobs',
default='',
from_string_converter=classes_in_namespaces_converter_with_compression(
reference_namespace=Namespace(),
list_splitter_fn=line_splitter,
class_extractor=pipe_splitter,
extra_extractor=get_extra_as_options
)
)
required_config.crontabber.add_option(
'error_retry_time',
default=300,
doc='number of seconds to re-attempt a job that failed'
)
required_config.crontabber.add_option(
'max_ongoing_age_hours',
default=12.0,
doc=(
'If a job has been ongoing for longer than this, it gets '
'ignored as a lock and the job is run anyway.'
)
)
# for local use, independent of the JSONAndPostgresJobDatabase
required_config.crontabber.add_option(
'database_class',
default='crontabber.connection_factory.ConnectionFactory',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.crontabber.add_option(
'transaction_executor_class',
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.add_option(
name='job',
default='',
doc='Run a specific job',
short_form='j',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='list-jobs',
default=False,
doc='List all jobs',
short_form='l',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='force',
default=False,
doc='Force running a job despite dependencies',
short_form='f',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='configtest',
default=False,
doc='Check that all configured jobs are OK',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='sentrytest',
default=False,
doc='Send a sample raven exception',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='audit-ghosts',
default=False,
doc='Checks if there jobs in the database that is not configured.',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='reset-job',
default='',
doc='Pretend a job has never been run',
short_form='r',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='nagios',
default=False,
doc='Exits with 0, 1 or 2 with a message on stdout if errors have '
'happened.',
short_form='n',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='version',
default=False,
doc='Print current version and exit',
short_form='v',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.namespace('sentry')
required_config.sentry.add_option(
'dsn',
doc='DSN for Sentry via raven',
default='',
reference_value_from='secrets.sentry',
)
def __init__(self, config):
super(CronTabberBase, self).__init__(config)
self.database_connection_factory = \
self.config.crontabber.database_class(config.crontabber)
self.transaction_executor = (
self.config.crontabber.transaction_executor_class(
config.crontabber,
self.database_connection_factory
)
)
def main(self):
if self.config.get('list-jobs'):
self.list_jobs()
return 0
elif self.config.get('nagios'):
return self.nagios()
elif self.config.get('version'):
self.print_version()
return 0
elif self.config.get('reset-job'):
self.reset_job(self.config.get('reset-job'))
return 0
elif self.config.get('audit-ghosts'):
self.audit_ghosts()
return 0
elif self.config.get('configtest'):
return not self.configtest() and 1 or 0
elif self.config.get('sentrytest'):
return not self.sentrytest() and 1 or 0
if self.config.get('job'):
self.run_one(self.config['job'], self.config.get('force'))
else:
try:
self.run_all()
except RowLevelLockError:
self.config.logger.debug(
'Next job to work on is already ongoing'
)
return 2
except OngoingJobError:
self.config.logger.debug(
'Next job to work on is already ongoing'
)
return 3
return 0
@staticmethod
def _reorder_class_list(class_list):
# class_list looks something like this:
# [('FooBarJob', <class 'FooBarJob'>),
# ('BarJob', <class 'BarJob'>),
# ('FooJob', <class 'FooJob'>)]
return reorder_dag(
class_list,
depends_getter=lambda x: getattr(x[1], 'depends_on', None),
name_getter=lambda x: x[1].app_name
)
@property
def job_state_database(self):
if not getattr(self, '_job_state_database', None):
self._job_state_database = (
self.config.crontabber.job_state_db_class(
self.config.crontabber
)
)
return self._job_state_database
def nagios(self, stream=sys.stdout):
"""
return 0 (OK) if there are no errors in the state.
return 1 (WARNING) if a backfill app only has 1 error.
return 2 (CRITICAL) if a backfill app has > 1 error.
return 2 (CRITICAL) if a non-backfill app has 1 error.
"""
warnings = []
criticals = []
for class_name, job_class in self.config.crontabber.jobs.class_list:
if job_class.app_name in self.job_state_database:
info = self.job_state_database.get(job_class.app_name)
if not info.get('error_count', 0):
continue
error_count = info['error_count']
# trouble!
serialized = (
'%s (%s) | %s | %s' %
(job_class.app_name,
class_name,
info['last_error']['type'],
info['last_error']['value'])
)
if (
error_count == 1 and
hasattr(job_class, "_is_backfill_app")
):
# just a warning for now
warnings.append(serialized)
else:
# anything worse than that is critical
criticals.append(serialized)
if criticals:
stream.write('CRITICAL - ')
stream.write('; '.join(criticals))
stream.write('\n')
return 2
elif warnings:
stream.write('WARNING - ')
stream.write('; '.join(warnings))
stream.write('\n')
return 1
stream.write('OK - All systems nominal')
stream.write('\n')
return 0
def print_version(self, stream=sys.stdout):
stream.write('%s\n' % self.app_version)
def list_jobs(self, stream=None):
if not stream:
stream = sys.stdout
_fmt = '%Y-%m-%d %H:%M:%S'
_now = utc_now()
PAD = 15
for class_name, job_class in self.config.crontabber.jobs.class_list:
class_config = self.config.crontabber['class-%s' % class_name]
freq = class_config.frequency
if class_config.time:
freq += ' @ %s' % class_config.time
class_name = job_class.__module__ + '.' + job_class.__name__
print >>stream, '=== JOB ' + '=' * 72
print >>stream, 'Class:'.ljust(PAD), class_name
print >>stream, 'App name:'.ljust(PAD), job_class.app_name
print >>stream, 'Frequency:'.ljust(PAD), freq
try:
info = self.job_state_database[job_class.app_name]
except KeyError:
print >>stream, '*NO PREVIOUS RUN INFO*'
continue
if info.get('ongoing'):
print >>stream, 'Ongoing now!'.ljust(PAD),
print >>stream, 'Started', '%s ago' % timesince(
_now, info.get('ongoing')
)
print >>stream, 'Last run:'.ljust(PAD),
if info['last_run']:
print >>stream, info['last_run'].strftime(_fmt).ljust(20),
print >>stream, '(%s ago)' % timesince(info['last_run'], _now)
else:
print >>stream, 'none'
print >>stream, 'Last success:'.ljust(PAD),
if info.get('last_success'):
print >>stream, info['last_success'].strftime(_fmt).ljust(20),
print >>stream, ('(%s ago)' %
timesince(info['last_success'], _now))
else:
print >>stream, 'no previous successful run'
print >>stream, 'Next run:'.ljust(PAD),
if info['next_run']:
print >>stream, info['next_run'].strftime(_fmt).ljust(20),
if _now > info['next_run']:
print >>stream, ('(was %s ago)' %
timesince(info['next_run'], _now))
else:
print >>stream, '(in %s)' % timesince(
_now,
info['next_run']
)
else:
print >>stream, 'none'
if info.get('last_error'):
print >>stream, 'Error!!'.ljust(PAD),
print >>stream, '(%s times)' % info['error_count']
print >>stream, 'Traceback (most recent call last):'
print >>stream, info['last_error']['traceback'],
print >>stream, '%s:' % info['last_error']['type'],
print >>stream, info['last_error']['value']
print >>stream, ''
def reset_job(self, description):
"""remove the job from the state.
if means that next time we run, this job will start over from scratch.
"""
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
if (
job_class.app_name == description or
description == job_class.__module__ + '.' + job_class.__name__
):
if job_class.app_name in self.job_state_database:
self.config.logger.info('App reset')
self.job_state_database.pop(job_class.app_name)
else:
self.config.logger.warning('App already reset')
return
raise JobNotFoundError(description)
def run_all(self):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config)
def run_one(self, description, force=False):
# the description in this case is either the app_name or the full
# module/class reference
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
if (
job_class.app_name == description or
description == job_class.__module__ + '.' + job_class.__name__
):
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config, force=force)
return
raise JobNotFoundError(description)
def _run_one(self, job_class, config, force=False):
_debug = self.config.logger.debug
seconds = convert_frequency(config.frequency)
time_ = config.time
if not force:
if not self.time_to_run(job_class, time_):
_debug("skipping %r because it's not time to run", job_class)
return
ok, dependency_error = self.check_dependencies(job_class)
if not ok:
_debug(
"skipping %r dependencies aren't met [%s]",
job_class, dependency_error
)
return
_debug('about to run %r', job_class)
app_name = job_class.app_name
info = self.job_state_database.get(app_name)
last_success = None
now = utc_now()
log_run = True
try:
t0 = time.time()
for last_success in self._run_job(job_class, config, info):
t1 = time.time()
_debug('successfully ran %r on %s', job_class, last_success)
self._remember_success(job_class, last_success, t1 - t0)
# _run_job() returns a generator, so we don't know how
# many times this will loop. Anyway, we need to reset the
# 't0' for the next loop if there is one.
t0 = time.time()
exc_type = exc_value = exc_tb = None
except (OngoingJobError, RowLevelLockError):
# It's not an actual runtime error. It just basically means
# you can't start crontabber right now.
log_run = False
raise
except:
t1 = time.time()
exc_type, exc_value, exc_tb = sys.exc_info()
# when debugging tests that mock logging, uncomment this otherwise
# the exc_info=True doesn't compute and record what the exception
# was
#raise # noqa
if self.config.sentry and self.config.sentry.dsn:
assert raven, "raven not installed"
try:
client = raven.Client(dsn=self.config.sentry.dsn)
identifier = client.get_ident(client.captureException())
self.config.logger.info(
'Error captured in Sentry. Reference: %s' % identifier
)
except Exception:
# Blank exceptions like this is evil but a failure to send
# the exception to Sentry is much less important than for
# crontabber to carry on. This is especially true
# considering that raven depends on network I/O.
_debug('Failed to capture and send error to Sentry',
exc_info=True)
_debug('error when running %r on %s',
job_class, last_success, exc_info=True)
self._remember_failure(
job_class,
t1 - t0,
exc_type,
exc_value,
exc_tb
)
finally:
if log_run:
self._log_run(
job_class,
seconds,
time_,
last_success,
now,
exc_type, exc_value, exc_tb
)
@database_transaction()
def _remember_success(
self,
connection,
class_,
success_date,
duration,
):
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
success,
duration
) VALUES (
%s,
%s,
%s
)""",
(app_name, success_date, '%.5f' % duration),
)
@database_transaction()
def _remember_failure(
self,
connection,
class_,
duration,
exc_type,
exc_value,
exc_tb,
):
exc_traceback = ''.join(traceback.format_tb(exc_tb))
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
duration,
exc_type,
exc_value,
exc_traceback
) VALUES (
%s,
%s,
%s,
%s,
%s
)""",
(
app_name,
'%.5f' % duration,
repr(exc_type),
repr(exc_value),
exc_traceback
),
)
def check_dependencies(self, class_):
try:
depends_on = class_.depends_on
except AttributeError:
# that's perfectly fine
return True, None
if isinstance(depends_on, basestring):
depends_on = [depends_on]
for dependency in depends_on:
try:
job_info = self.job_state_database[dependency]
except KeyError:
# the job this one depends on hasn't been run yet!
return False, "%r hasn't been run yet" % dependency
if job_info.get('last_error'):
# errored last time it ran
return False, "%r errored last time it ran" % dependency
if job_info['next_run'] < utc_now():
# the dependency hasn't recently run
return False, "%r hasn't recently run" % dependency
# no reason not to stop this class
return True, None
def time_to_run(self, class_, time_):
"""return true if it's time to run the job.
This is true if there is no previous information about its last run
or if the last time it ran and set its next_run to a date that is now
past.
"""
app_name = class_.app_name
try:
info = self.job_state_database[app_name]
except KeyError:
if time_:
h, m = [int(x) for x in time_.split(':')]
# only run if this hour and minute is < now
now = utc_now()
if now.hour > h:
return True
elif now.hour == h and now.minute >= m:
return True
return False
else:
# no past information, run now
return True
next_run = info['next_run']
if not next_run:
# It has never run before.
# If it has an active ongoing status it means two
# independent threads tried to start it. The second one
# (by a tiny time margin) will have a job_class whose
# `ongoing` value has already been set.
# If that's the case, let it through because it will
# commence and break due to RowLevelLockError in the
# state's __setitem__ method.
return bool(info['ongoing'])
if next_run < utc_now():
return True
return False
def _run_job(self, class_, config, info):
# here we go!
instance = class_(config, info)
self._set_ongoing_job(class_)
result = instance.main()
return result
def _set_ongoing_job(self, class_):
app_name = class_.app_name
info = self.job_state_database.get(app_name)
if info:
# Was it already ongoing?
if info.get('ongoing'):
# Unless it's been ongoing for ages, raise OngoingJobError
age_hours = (utc_now() - info['ongoing']).seconds / 3600.0
if age_hours < self.config.crontabber.max_ongoing_age_hours:
raise OngoingJobError(info['ongoing'])
else:
self.config.logger.debug(
'{} has been ongoing for {:2} hours. '
'Ignore it and running the app anyway.'.format(
app_name,
age_hours,
)
)
info['ongoing'] = utc_now()
else:
depends_on = getattr(class_, 'depends_on', [])
if isinstance(depends_on, basestring):
depends_on = [depends_on]
elif not isinstance(depends_on, list):
depends_on = list(depends_on)
info = {
'next_run': None,
'first_run': None,
'last_run': None,
'last_success': None,
'last_error': {},
'error_count': 0,
'depends_on': depends_on,
'ongoing': utc_now(),
}
self.job_state_database[app_name] = info
def _log_run(self, class_, seconds, time_, last_success, now,
exc_type, exc_value, exc_tb):
assert inspect.isclass(class_)
app_name = class_.app_name
info = self.job_state_database.get(app_name, {})
depends_on = getattr(class_, 'depends_on', [])
if isinstance(depends_on, basestring):
depends_on = [depends_on]
elif not isinstance(depends_on, list):
depends_on = list(depends_on)
info['depends_on'] = depends_on
if not info.get('first_run'):
info['first_run'] = now
info['last_run'] = now
if last_success:
info['last_success'] = last_success
if exc_type:
# it errored, try very soon again
info['next_run'] = now + datetime.timedelta(
seconds=self.config.crontabber.error_retry_time
)
else:
info['next_run'] = now + datetime.timedelta(seconds=seconds)
if time_:
h, m = [int(x) for x in time_.split(':')]
info['next_run'] = info['next_run'].replace(hour=h,
minute=m,
second=0,
microsecond=0)
if exc_type:
tb = ''.join(traceback.format_tb(exc_tb))
info['last_error'] = {
'type': exc_type,
'value': str(exc_value),
'traceback': tb,
}
info['error_count'] = info.get('error_count', 0) + 1
else:
info['last_error'] = {}
info['error_count'] = 0
# Clearly it's not "ongoing" any more when it's here, because
# being here means the job has finished.
info['ongoing'] = None
self.job_state_database[app_name] = info
def configtest(self):
"""return true if all configured jobs are configured OK"""
# similar to run_all() but don't actually run them
failed = 0
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, __ in class_list:
class_config = self.config.crontabber['class-%s' % class_name]
if not self._configtest_one(class_config):
failed += 1
return not failed
def _configtest_one(self, config):
try:
seconds = convert_frequency(config.frequency)
time_ = config.time
if time_:
check_time(time_)
# if less than 1 day, it doesn't make sense to specify hour
if seconds < 60 * 60 * 24:
raise FrequencyDefinitionError(config.time)
return True
except (JobNotFoundError,
JobDescriptionError,
FrequencyDefinitionError,
TimeDefinitionError):
config.logger.critical(
'Failed to config test a job',
exc_info=True
)
return False
def sentrytest(self):
"""return true if we managed to send a sample raven exception"""
if not (self.config.sentry and self.config.sentry.dsn):
raise SentryConfigurationError('sentry dsn not configured')
try:
version = raven.fetch_package_version('crontabber')
except Exception:
version = None
self.config.logger.warning(
'Unable to extract version of crontabber',
exc_info=True
)
client = raven.Client(
dsn=self.config.sentry.dsn,
release=version
)
identifier = client.captureMessage(
'Sentry test sent from crontabber'
)
self.config.logger.info(
'Sentry successful identifier: %s', identifier
)
return True
def audit_ghosts(self):
"""compare the list of configured jobs with the jobs in the state"""
print_header = True
for app_name in self._get_ghosts():
if print_header:
print_header = False
print (
"Found the following in the state database but not "
"available as a configured job:"
)
print "\t%s" % (app_name,)
def _get_ghosts(self):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
configured_app_names = []
for __, job_class in class_list:
configured_app_names.append(job_class.app_name)
state_app_names = self.job_state_database.keys()
return set(state_app_names) - set(configured_app_names)
class CronTabber(CronTabberBase, App):
"""This class mixes in the CronTabberBase class with the default runnable
application infrastructure: crontabber.generic_app.App. Having the
CronTabberBase decoupled from the App class allows CrontTabber to integrate
seemlessly into a different system for setting up and running an app.
One of the primary clients of CronTabber is Socorro. In fact CronTabber
was spun off from Socorro as an indepentent app. Initially they had
identical copies of the App base class. To allow the two projects to
evolve indepentenly, the CronTabber App class was separated from the
CronTabberBase class. This allows Socorro to declare its own CronTabberApp
that derives from the Socorro App class instead of the
crontabber.generic_app.App class"""
# no new methods are required, the two base classes have everything
def local_main(): # pragma: no cover
import sys
import os
root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if root not in sys.path:
sys.path.append(root)
sys.exit(main(CronTabber))
# try:
# sys.exit(main(CronTabber))
# except RowLevelLockError:
# sys.exit(1)
# except OngoingJobError:
# sys.exit(2)
if __name__ == '__main__': # pragma: no cover
local_main()
|
mozilla/crontabber | crontabber/app.py | JobStateDatabase.keys | python | def keys(self):
keys = []
for app_name, __ in self.items():
keys.append(app_name)
return keys | return a list of all app_names | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/app.py#L257-L262 | null | class JobStateDatabase(RequiredConfig):
required_config = Namespace()
required_config.add_option(
'database_class',
default='crontabber.connection_factory.ConnectionFactory',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.add_option(
'transaction_executor_class',
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
def __init__(self, config=None):
self.config = config
self.database_connection_factory = config.database_class(config)
self.transaction_executor = self.config.transaction_executor_class(
self.config,
self.database_connection_factory
)
found = self.transaction_executor(
execute_query_fetchall,
"SELECT relname FROM pg_class "
"WHERE relname = 'crontabber'"
)
if not found:
self.config.logger.info(
"Creating crontabber table: crontabber"
)
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_SQL
)
else:
# Check that it has the new `ongoing` column.
try:
self.transaction_executor(
single_value_sql,
"SELECT column_name FROM information_schema.columns "
"WHERE table_name='crontabber' AND column_name='ongoing'"
)
except SQLDidNotReturnSingleValue:
# So that's why then!
# We have to do a quick migration.
self.config.logger.info(
"Have to do a migration and add the `ongoing` field"
)
self.transaction_executor(
execute_no_results,
"ALTER TABLE crontabber ADD ongoing TIMESTAMP "
"WITH TIME ZONE"
)
# check that we have set the unique index on the app_name
index_count, = self.transaction_executor(
single_row_sql,
"SELECT COUNT(1) FROM pg_indexes WHERE "
"indexname = 'crontabber_unique_app_name_idx'"
)
if not index_count:
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX
)
found = self.transaction_executor(
execute_query_fetchall,
"SELECT relname FROM pg_class "
"WHERE relname = 'crontabber_log'"
)
if not found:
self.config.logger.info(
"Creating crontabber table: crontabber_log"
)
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_LOG_SQL
)
def has_data(self):
return bool(self.transaction_executor(
single_value_sql,
"SELECT COUNT(*) FROM crontabber"
))
def __iter__(self):
return iter([
record[0] for record in
self.transaction_executor(
execute_query_fetchall,
"SELECT app_name FROM crontabber"
)
])
def __contains__(self, key):
"""return True if we have a job by this key"""
try:
self.transaction_executor(
single_value_sql,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
return True
except SQLDidNotReturnSingleValue:
return False
def items(self):
"""return all the app_names and their values as tuples"""
sql = """
SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error
FROM crontabber"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error'
)
items = []
for record in self.transaction_executor(execute_query_fetchall, sql):
row = dict(zip(columns, record))
items.append((row.pop('app_name'), row))
return items
def values(self):
"""return a list of all state values"""
values = []
for __, data in self.items():
values.append(data)
return values
def __getitem__(self, key):
"""return the job info or raise a KeyError"""
sql = """
SELECT
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
FROM crontabber
WHERE
app_name = %s"""
columns = (
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error', 'ongoing'
)
try:
record = self.transaction_executor(single_row_sql, sql, (key,))
except SQLDidNotReturnSingleRow:
raise KeyError(key)
row = dict(zip(columns, record))
return row
@database_transaction()
def __setitem__(self, connection, key, value):
class LastErrorEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, type):
return repr(obj)
return json.JSONEncoder.default(self, obj)
try:
single_value_sql(
connection,
"""SELECT ongoing
FROM crontabber
WHERE
app_name = %s
FOR UPDATE NOWAIT
""",
(key,)
)
# If the above single_value_sql() didn't raise a
# SQLDidNotReturnSingleValue exception, it means
# there is a row by this app_name.
# Therefore, the next SQL is an update.
next_sql = """
UPDATE crontabber
SET
next_run = %(next_run)s,
first_run = %(first_run)s,
last_run = %(last_run)s,
last_success = %(last_success)s,
depends_on = %(depends_on)s,
error_count = %(error_count)s,
last_error = %(last_error)s,
ongoing = %(ongoing)s
WHERE
app_name = %(app_name)s
"""
except OperationalError as exception:
if 'could not obtain lock' in exception.args[0]:
raise RowLevelLockError(exception.args[0])
else:
raise
except SQLDidNotReturnSingleValue:
# the key does not exist, do an insert
next_sql = """
INSERT INTO crontabber (
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
) VALUES (
%(app_name)s,
%(next_run)s,
%(first_run)s,
%(last_run)s,
%(last_success)s,
%(depends_on)s,
%(error_count)s,
%(last_error)s,
%(ongoing)s
)
"""
parameters = {
'app_name': key,
'next_run': value['next_run'],
'first_run': value['first_run'],
'last_run': value['last_run'],
'last_success': value.get('last_success'),
'depends_on': value['depends_on'],
'error_count': value['error_count'],
'last_error': json.dumps(
value['last_error'],
cls=LastErrorEncoder
),
'ongoing': value.get('ongoing'),
}
try:
execute_no_results(
connection,
next_sql,
parameters
)
except IntegrityError as exception:
# See CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX for why
# we know to look for this mentioned in the error message.
if 'crontabber_unique_app_name_idx' in exception.args[0]:
raise RowLevelLockError(exception.args[0])
raise
@database_transaction()
def copy(self, connection):
sql = """SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
FROM crontabber
"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error', 'ongoing'
)
all = {}
for record in execute_query_iter(connection, sql):
row = dict(zip(columns, record))
all[row.pop('app_name')] = row
return all
def update(self, data):
for key in data:
self[key] = data[key]
def get(self, key, default=None):
"""return the item by key or return 'default'"""
try:
return self[key]
except KeyError:
return default
def pop(self, key, default=_marker):
"""remove the item by key
If not default is specified, raise KeyError if nothing
could be removed.
Return 'default' if specified and nothing could be removed
"""
try:
popped = self[key]
del self[key]
return popped
except KeyError:
if default == _marker:
raise
return default
@database_transaction()
def __delitem__(self, connection, key):
"""remove the item by key or raise KeyError"""
try:
# result intentionally ignored
single_value_sql(
connection,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
except SQLDidNotReturnSingleValue:
raise KeyError(key)
# item exists
execute_no_results(
connection,
"""DELETE FROM crontabber
WHERE app_name = %s""",
(key,)
)
|
mozilla/crontabber | crontabber/app.py | JobStateDatabase.items | python | def items(self):
sql = """
SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error
FROM crontabber"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error'
)
items = []
for record in self.transaction_executor(execute_query_fetchall, sql):
row = dict(zip(columns, record))
items.append((row.pop('app_name'), row))
return items | return all the app_names and their values as tuples | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/app.py#L264-L286 | null | class JobStateDatabase(RequiredConfig):
required_config = Namespace()
required_config.add_option(
'database_class',
default='crontabber.connection_factory.ConnectionFactory',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.add_option(
'transaction_executor_class',
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
def __init__(self, config=None):
self.config = config
self.database_connection_factory = config.database_class(config)
self.transaction_executor = self.config.transaction_executor_class(
self.config,
self.database_connection_factory
)
found = self.transaction_executor(
execute_query_fetchall,
"SELECT relname FROM pg_class "
"WHERE relname = 'crontabber'"
)
if not found:
self.config.logger.info(
"Creating crontabber table: crontabber"
)
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_SQL
)
else:
# Check that it has the new `ongoing` column.
try:
self.transaction_executor(
single_value_sql,
"SELECT column_name FROM information_schema.columns "
"WHERE table_name='crontabber' AND column_name='ongoing'"
)
except SQLDidNotReturnSingleValue:
# So that's why then!
# We have to do a quick migration.
self.config.logger.info(
"Have to do a migration and add the `ongoing` field"
)
self.transaction_executor(
execute_no_results,
"ALTER TABLE crontabber ADD ongoing TIMESTAMP "
"WITH TIME ZONE"
)
# check that we have set the unique index on the app_name
index_count, = self.transaction_executor(
single_row_sql,
"SELECT COUNT(1) FROM pg_indexes WHERE "
"indexname = 'crontabber_unique_app_name_idx'"
)
if not index_count:
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX
)
found = self.transaction_executor(
execute_query_fetchall,
"SELECT relname FROM pg_class "
"WHERE relname = 'crontabber_log'"
)
if not found:
self.config.logger.info(
"Creating crontabber table: crontabber_log"
)
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_LOG_SQL
)
def has_data(self):
return bool(self.transaction_executor(
single_value_sql,
"SELECT COUNT(*) FROM crontabber"
))
def __iter__(self):
return iter([
record[0] for record in
self.transaction_executor(
execute_query_fetchall,
"SELECT app_name FROM crontabber"
)
])
def __contains__(self, key):
"""return True if we have a job by this key"""
try:
self.transaction_executor(
single_value_sql,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
return True
except SQLDidNotReturnSingleValue:
return False
def keys(self):
"""return a list of all app_names"""
keys = []
for app_name, __ in self.items():
keys.append(app_name)
return keys
def values(self):
"""return a list of all state values"""
values = []
for __, data in self.items():
values.append(data)
return values
def __getitem__(self, key):
"""return the job info or raise a KeyError"""
sql = """
SELECT
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
FROM crontabber
WHERE
app_name = %s"""
columns = (
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error', 'ongoing'
)
try:
record = self.transaction_executor(single_row_sql, sql, (key,))
except SQLDidNotReturnSingleRow:
raise KeyError(key)
row = dict(zip(columns, record))
return row
@database_transaction()
def __setitem__(self, connection, key, value):
class LastErrorEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, type):
return repr(obj)
return json.JSONEncoder.default(self, obj)
try:
single_value_sql(
connection,
"""SELECT ongoing
FROM crontabber
WHERE
app_name = %s
FOR UPDATE NOWAIT
""",
(key,)
)
# If the above single_value_sql() didn't raise a
# SQLDidNotReturnSingleValue exception, it means
# there is a row by this app_name.
# Therefore, the next SQL is an update.
next_sql = """
UPDATE crontabber
SET
next_run = %(next_run)s,
first_run = %(first_run)s,
last_run = %(last_run)s,
last_success = %(last_success)s,
depends_on = %(depends_on)s,
error_count = %(error_count)s,
last_error = %(last_error)s,
ongoing = %(ongoing)s
WHERE
app_name = %(app_name)s
"""
except OperationalError as exception:
if 'could not obtain lock' in exception.args[0]:
raise RowLevelLockError(exception.args[0])
else:
raise
except SQLDidNotReturnSingleValue:
# the key does not exist, do an insert
next_sql = """
INSERT INTO crontabber (
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
) VALUES (
%(app_name)s,
%(next_run)s,
%(first_run)s,
%(last_run)s,
%(last_success)s,
%(depends_on)s,
%(error_count)s,
%(last_error)s,
%(ongoing)s
)
"""
parameters = {
'app_name': key,
'next_run': value['next_run'],
'first_run': value['first_run'],
'last_run': value['last_run'],
'last_success': value.get('last_success'),
'depends_on': value['depends_on'],
'error_count': value['error_count'],
'last_error': json.dumps(
value['last_error'],
cls=LastErrorEncoder
),
'ongoing': value.get('ongoing'),
}
try:
execute_no_results(
connection,
next_sql,
parameters
)
except IntegrityError as exception:
# See CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX for why
# we know to look for this mentioned in the error message.
if 'crontabber_unique_app_name_idx' in exception.args[0]:
raise RowLevelLockError(exception.args[0])
raise
@database_transaction()
def copy(self, connection):
sql = """SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
FROM crontabber
"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error', 'ongoing'
)
all = {}
for record in execute_query_iter(connection, sql):
row = dict(zip(columns, record))
all[row.pop('app_name')] = row
return all
def update(self, data):
for key in data:
self[key] = data[key]
def get(self, key, default=None):
"""return the item by key or return 'default'"""
try:
return self[key]
except KeyError:
return default
def pop(self, key, default=_marker):
"""remove the item by key
If not default is specified, raise KeyError if nothing
could be removed.
Return 'default' if specified and nothing could be removed
"""
try:
popped = self[key]
del self[key]
return popped
except KeyError:
if default == _marker:
raise
return default
@database_transaction()
def __delitem__(self, connection, key):
"""remove the item by key or raise KeyError"""
try:
# result intentionally ignored
single_value_sql(
connection,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
except SQLDidNotReturnSingleValue:
raise KeyError(key)
# item exists
execute_no_results(
connection,
"""DELETE FROM crontabber
WHERE app_name = %s""",
(key,)
)
|
mozilla/crontabber | crontabber/app.py | JobStateDatabase.values | python | def values(self):
values = []
for __, data in self.items():
values.append(data)
return values | return a list of all state values | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/app.py#L288-L293 | null | class JobStateDatabase(RequiredConfig):
required_config = Namespace()
required_config.add_option(
'database_class',
default='crontabber.connection_factory.ConnectionFactory',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.add_option(
'transaction_executor_class',
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
def __init__(self, config=None):
self.config = config
self.database_connection_factory = config.database_class(config)
self.transaction_executor = self.config.transaction_executor_class(
self.config,
self.database_connection_factory
)
found = self.transaction_executor(
execute_query_fetchall,
"SELECT relname FROM pg_class "
"WHERE relname = 'crontabber'"
)
if not found:
self.config.logger.info(
"Creating crontabber table: crontabber"
)
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_SQL
)
else:
# Check that it has the new `ongoing` column.
try:
self.transaction_executor(
single_value_sql,
"SELECT column_name FROM information_schema.columns "
"WHERE table_name='crontabber' AND column_name='ongoing'"
)
except SQLDidNotReturnSingleValue:
# So that's why then!
# We have to do a quick migration.
self.config.logger.info(
"Have to do a migration and add the `ongoing` field"
)
self.transaction_executor(
execute_no_results,
"ALTER TABLE crontabber ADD ongoing TIMESTAMP "
"WITH TIME ZONE"
)
# check that we have set the unique index on the app_name
index_count, = self.transaction_executor(
single_row_sql,
"SELECT COUNT(1) FROM pg_indexes WHERE "
"indexname = 'crontabber_unique_app_name_idx'"
)
if not index_count:
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX
)
found = self.transaction_executor(
execute_query_fetchall,
"SELECT relname FROM pg_class "
"WHERE relname = 'crontabber_log'"
)
if not found:
self.config.logger.info(
"Creating crontabber table: crontabber_log"
)
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_LOG_SQL
)
def has_data(self):
return bool(self.transaction_executor(
single_value_sql,
"SELECT COUNT(*) FROM crontabber"
))
def __iter__(self):
return iter([
record[0] for record in
self.transaction_executor(
execute_query_fetchall,
"SELECT app_name FROM crontabber"
)
])
def __contains__(self, key):
"""return True if we have a job by this key"""
try:
self.transaction_executor(
single_value_sql,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
return True
except SQLDidNotReturnSingleValue:
return False
def keys(self):
"""return a list of all app_names"""
keys = []
for app_name, __ in self.items():
keys.append(app_name)
return keys
def items(self):
"""return all the app_names and their values as tuples"""
sql = """
SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error
FROM crontabber"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error'
)
items = []
for record in self.transaction_executor(execute_query_fetchall, sql):
row = dict(zip(columns, record))
items.append((row.pop('app_name'), row))
return items
def __getitem__(self, key):
"""return the job info or raise a KeyError"""
sql = """
SELECT
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
FROM crontabber
WHERE
app_name = %s"""
columns = (
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error', 'ongoing'
)
try:
record = self.transaction_executor(single_row_sql, sql, (key,))
except SQLDidNotReturnSingleRow:
raise KeyError(key)
row = dict(zip(columns, record))
return row
@database_transaction()
def __setitem__(self, connection, key, value):
class LastErrorEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, type):
return repr(obj)
return json.JSONEncoder.default(self, obj)
try:
single_value_sql(
connection,
"""SELECT ongoing
FROM crontabber
WHERE
app_name = %s
FOR UPDATE NOWAIT
""",
(key,)
)
# If the above single_value_sql() didn't raise a
# SQLDidNotReturnSingleValue exception, it means
# there is a row by this app_name.
# Therefore, the next SQL is an update.
next_sql = """
UPDATE crontabber
SET
next_run = %(next_run)s,
first_run = %(first_run)s,
last_run = %(last_run)s,
last_success = %(last_success)s,
depends_on = %(depends_on)s,
error_count = %(error_count)s,
last_error = %(last_error)s,
ongoing = %(ongoing)s
WHERE
app_name = %(app_name)s
"""
except OperationalError as exception:
if 'could not obtain lock' in exception.args[0]:
raise RowLevelLockError(exception.args[0])
else:
raise
except SQLDidNotReturnSingleValue:
# the key does not exist, do an insert
next_sql = """
INSERT INTO crontabber (
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
) VALUES (
%(app_name)s,
%(next_run)s,
%(first_run)s,
%(last_run)s,
%(last_success)s,
%(depends_on)s,
%(error_count)s,
%(last_error)s,
%(ongoing)s
)
"""
parameters = {
'app_name': key,
'next_run': value['next_run'],
'first_run': value['first_run'],
'last_run': value['last_run'],
'last_success': value.get('last_success'),
'depends_on': value['depends_on'],
'error_count': value['error_count'],
'last_error': json.dumps(
value['last_error'],
cls=LastErrorEncoder
),
'ongoing': value.get('ongoing'),
}
try:
execute_no_results(
connection,
next_sql,
parameters
)
except IntegrityError as exception:
# See CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX for why
# we know to look for this mentioned in the error message.
if 'crontabber_unique_app_name_idx' in exception.args[0]:
raise RowLevelLockError(exception.args[0])
raise
@database_transaction()
def copy(self, connection):
sql = """SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
FROM crontabber
"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error', 'ongoing'
)
all = {}
for record in execute_query_iter(connection, sql):
row = dict(zip(columns, record))
all[row.pop('app_name')] = row
return all
def update(self, data):
for key in data:
self[key] = data[key]
def get(self, key, default=None):
"""return the item by key or return 'default'"""
try:
return self[key]
except KeyError:
return default
def pop(self, key, default=_marker):
"""remove the item by key
If not default is specified, raise KeyError if nothing
could be removed.
Return 'default' if specified and nothing could be removed
"""
try:
popped = self[key]
del self[key]
return popped
except KeyError:
if default == _marker:
raise
return default
@database_transaction()
def __delitem__(self, connection, key):
"""remove the item by key or raise KeyError"""
try:
# result intentionally ignored
single_value_sql(
connection,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
except SQLDidNotReturnSingleValue:
raise KeyError(key)
# item exists
execute_no_results(
connection,
"""DELETE FROM crontabber
WHERE app_name = %s""",
(key,)
)
|
mozilla/crontabber | crontabber/app.py | JobStateDatabase.pop | python | def pop(self, key, default=_marker):
try:
popped = self[key]
del self[key]
return popped
except KeyError:
if default == _marker:
raise
return default | remove the item by key
If not default is specified, raise KeyError if nothing
could be removed.
Return 'default' if specified and nothing could be removed | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/app.py#L451-L464 | null | class JobStateDatabase(RequiredConfig):
required_config = Namespace()
required_config.add_option(
'database_class',
default='crontabber.connection_factory.ConnectionFactory',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.add_option(
'transaction_executor_class',
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
def __init__(self, config=None):
self.config = config
self.database_connection_factory = config.database_class(config)
self.transaction_executor = self.config.transaction_executor_class(
self.config,
self.database_connection_factory
)
found = self.transaction_executor(
execute_query_fetchall,
"SELECT relname FROM pg_class "
"WHERE relname = 'crontabber'"
)
if not found:
self.config.logger.info(
"Creating crontabber table: crontabber"
)
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_SQL
)
else:
# Check that it has the new `ongoing` column.
try:
self.transaction_executor(
single_value_sql,
"SELECT column_name FROM information_schema.columns "
"WHERE table_name='crontabber' AND column_name='ongoing'"
)
except SQLDidNotReturnSingleValue:
# So that's why then!
# We have to do a quick migration.
self.config.logger.info(
"Have to do a migration and add the `ongoing` field"
)
self.transaction_executor(
execute_no_results,
"ALTER TABLE crontabber ADD ongoing TIMESTAMP "
"WITH TIME ZONE"
)
# check that we have set the unique index on the app_name
index_count, = self.transaction_executor(
single_row_sql,
"SELECT COUNT(1) FROM pg_indexes WHERE "
"indexname = 'crontabber_unique_app_name_idx'"
)
if not index_count:
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX
)
found = self.transaction_executor(
execute_query_fetchall,
"SELECT relname FROM pg_class "
"WHERE relname = 'crontabber_log'"
)
if not found:
self.config.logger.info(
"Creating crontabber table: crontabber_log"
)
self.transaction_executor(
execute_no_results,
CREATE_CRONTABBER_LOG_SQL
)
def has_data(self):
return bool(self.transaction_executor(
single_value_sql,
"SELECT COUNT(*) FROM crontabber"
))
def __iter__(self):
return iter([
record[0] for record in
self.transaction_executor(
execute_query_fetchall,
"SELECT app_name FROM crontabber"
)
])
def __contains__(self, key):
"""return True if we have a job by this key"""
try:
self.transaction_executor(
single_value_sql,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
return True
except SQLDidNotReturnSingleValue:
return False
def keys(self):
"""return a list of all app_names"""
keys = []
for app_name, __ in self.items():
keys.append(app_name)
return keys
def items(self):
"""return all the app_names and their values as tuples"""
sql = """
SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error
FROM crontabber"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error'
)
items = []
for record in self.transaction_executor(execute_query_fetchall, sql):
row = dict(zip(columns, record))
items.append((row.pop('app_name'), row))
return items
def values(self):
"""return a list of all state values"""
values = []
for __, data in self.items():
values.append(data)
return values
def __getitem__(self, key):
"""return the job info or raise a KeyError"""
sql = """
SELECT
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
FROM crontabber
WHERE
app_name = %s"""
columns = (
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error', 'ongoing'
)
try:
record = self.transaction_executor(single_row_sql, sql, (key,))
except SQLDidNotReturnSingleRow:
raise KeyError(key)
row = dict(zip(columns, record))
return row
@database_transaction()
def __setitem__(self, connection, key, value):
class LastErrorEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, type):
return repr(obj)
return json.JSONEncoder.default(self, obj)
try:
single_value_sql(
connection,
"""SELECT ongoing
FROM crontabber
WHERE
app_name = %s
FOR UPDATE NOWAIT
""",
(key,)
)
# If the above single_value_sql() didn't raise a
# SQLDidNotReturnSingleValue exception, it means
# there is a row by this app_name.
# Therefore, the next SQL is an update.
next_sql = """
UPDATE crontabber
SET
next_run = %(next_run)s,
first_run = %(first_run)s,
last_run = %(last_run)s,
last_success = %(last_success)s,
depends_on = %(depends_on)s,
error_count = %(error_count)s,
last_error = %(last_error)s,
ongoing = %(ongoing)s
WHERE
app_name = %(app_name)s
"""
except OperationalError as exception:
if 'could not obtain lock' in exception.args[0]:
raise RowLevelLockError(exception.args[0])
else:
raise
except SQLDidNotReturnSingleValue:
# the key does not exist, do an insert
next_sql = """
INSERT INTO crontabber (
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
) VALUES (
%(app_name)s,
%(next_run)s,
%(first_run)s,
%(last_run)s,
%(last_success)s,
%(depends_on)s,
%(error_count)s,
%(last_error)s,
%(ongoing)s
)
"""
parameters = {
'app_name': key,
'next_run': value['next_run'],
'first_run': value['first_run'],
'last_run': value['last_run'],
'last_success': value.get('last_success'),
'depends_on': value['depends_on'],
'error_count': value['error_count'],
'last_error': json.dumps(
value['last_error'],
cls=LastErrorEncoder
),
'ongoing': value.get('ongoing'),
}
try:
execute_no_results(
connection,
next_sql,
parameters
)
except IntegrityError as exception:
# See CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX for why
# we know to look for this mentioned in the error message.
if 'crontabber_unique_app_name_idx' in exception.args[0]:
raise RowLevelLockError(exception.args[0])
raise
@database_transaction()
def copy(self, connection):
sql = """SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error,
ongoing
FROM crontabber
"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error', 'ongoing'
)
all = {}
for record in execute_query_iter(connection, sql):
row = dict(zip(columns, record))
all[row.pop('app_name')] = row
return all
def update(self, data):
for key in data:
self[key] = data[key]
def get(self, key, default=None):
"""return the item by key or return 'default'"""
try:
return self[key]
except KeyError:
return default
@database_transaction()
def __delitem__(self, connection, key):
"""remove the item by key or raise KeyError"""
try:
# result intentionally ignored
single_value_sql(
connection,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
except SQLDidNotReturnSingleValue:
raise KeyError(key)
# item exists
execute_no_results(
connection,
"""DELETE FROM crontabber
WHERE app_name = %s""",
(key,)
)
|
mozilla/crontabber | crontabber/app.py | CronTabberBase.nagios | python | def nagios(self, stream=sys.stdout):
warnings = []
criticals = []
for class_name, job_class in self.config.crontabber.jobs.class_list:
if job_class.app_name in self.job_state_database:
info = self.job_state_database.get(job_class.app_name)
if not info.get('error_count', 0):
continue
error_count = info['error_count']
# trouble!
serialized = (
'%s (%s) | %s | %s' %
(job_class.app_name,
class_name,
info['last_error']['type'],
info['last_error']['value'])
)
if (
error_count == 1 and
hasattr(job_class, "_is_backfill_app")
):
# just a warning for now
warnings.append(serialized)
else:
# anything worse than that is critical
criticals.append(serialized)
if criticals:
stream.write('CRITICAL - ')
stream.write('; '.join(criticals))
stream.write('\n')
return 2
elif warnings:
stream.write('WARNING - ')
stream.write('; '.join(warnings))
stream.write('\n')
return 1
stream.write('OK - All systems nominal')
stream.write('\n')
return 0 | return 0 (OK) if there are no errors in the state.
return 1 (WARNING) if a backfill app only has 1 error.
return 2 (CRITICAL) if a backfill app has > 1 error.
return 2 (CRITICAL) if a non-backfill app has 1 error. | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/app.py#L876-L921 | null | class CronTabberBase(RequiredConfig):
app_name = 'crontabber'
app_version = __version__
app_description = __doc__
required_config = Namespace()
# the most important option, 'jobs', is defined last
required_config.namespace('crontabber')
required_config.crontabber.add_option(
name='job_state_db_class',
default=JobStateDatabase,
doc='Class to load and save the state and runs',
)
required_config.crontabber.add_option(
'jobs',
default='',
from_string_converter=classes_in_namespaces_converter_with_compression(
reference_namespace=Namespace(),
list_splitter_fn=line_splitter,
class_extractor=pipe_splitter,
extra_extractor=get_extra_as_options
)
)
required_config.crontabber.add_option(
'error_retry_time',
default=300,
doc='number of seconds to re-attempt a job that failed'
)
required_config.crontabber.add_option(
'max_ongoing_age_hours',
default=12.0,
doc=(
'If a job has been ongoing for longer than this, it gets '
'ignored as a lock and the job is run anyway.'
)
)
# for local use, independent of the JSONAndPostgresJobDatabase
required_config.crontabber.add_option(
'database_class',
default='crontabber.connection_factory.ConnectionFactory',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.crontabber.add_option(
'transaction_executor_class',
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.add_option(
name='job',
default='',
doc='Run a specific job',
short_form='j',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='list-jobs',
default=False,
doc='List all jobs',
short_form='l',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='force',
default=False,
doc='Force running a job despite dependencies',
short_form='f',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='configtest',
default=False,
doc='Check that all configured jobs are OK',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='sentrytest',
default=False,
doc='Send a sample raven exception',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='audit-ghosts',
default=False,
doc='Checks if there jobs in the database that is not configured.',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='reset-job',
default='',
doc='Pretend a job has never been run',
short_form='r',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='nagios',
default=False,
doc='Exits with 0, 1 or 2 with a message on stdout if errors have '
'happened.',
short_form='n',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='version',
default=False,
doc='Print current version and exit',
short_form='v',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.namespace('sentry')
required_config.sentry.add_option(
'dsn',
doc='DSN for Sentry via raven',
default='',
reference_value_from='secrets.sentry',
)
def __init__(self, config):
super(CronTabberBase, self).__init__(config)
self.database_connection_factory = \
self.config.crontabber.database_class(config.crontabber)
self.transaction_executor = (
self.config.crontabber.transaction_executor_class(
config.crontabber,
self.database_connection_factory
)
)
def main(self):
if self.config.get('list-jobs'):
self.list_jobs()
return 0
elif self.config.get('nagios'):
return self.nagios()
elif self.config.get('version'):
self.print_version()
return 0
elif self.config.get('reset-job'):
self.reset_job(self.config.get('reset-job'))
return 0
elif self.config.get('audit-ghosts'):
self.audit_ghosts()
return 0
elif self.config.get('configtest'):
return not self.configtest() and 1 or 0
elif self.config.get('sentrytest'):
return not self.sentrytest() and 1 or 0
if self.config.get('job'):
self.run_one(self.config['job'], self.config.get('force'))
else:
try:
self.run_all()
except RowLevelLockError:
self.config.logger.debug(
'Next job to work on is already ongoing'
)
return 2
except OngoingJobError:
self.config.logger.debug(
'Next job to work on is already ongoing'
)
return 3
return 0
@staticmethod
def _reorder_class_list(class_list):
# class_list looks something like this:
# [('FooBarJob', <class 'FooBarJob'>),
# ('BarJob', <class 'BarJob'>),
# ('FooJob', <class 'FooJob'>)]
return reorder_dag(
class_list,
depends_getter=lambda x: getattr(x[1], 'depends_on', None),
name_getter=lambda x: x[1].app_name
)
@property
def job_state_database(self):
if not getattr(self, '_job_state_database', None):
self._job_state_database = (
self.config.crontabber.job_state_db_class(
self.config.crontabber
)
)
return self._job_state_database
def print_version(self, stream=sys.stdout):
stream.write('%s\n' % self.app_version)
def list_jobs(self, stream=None):
if not stream:
stream = sys.stdout
_fmt = '%Y-%m-%d %H:%M:%S'
_now = utc_now()
PAD = 15
for class_name, job_class in self.config.crontabber.jobs.class_list:
class_config = self.config.crontabber['class-%s' % class_name]
freq = class_config.frequency
if class_config.time:
freq += ' @ %s' % class_config.time
class_name = job_class.__module__ + '.' + job_class.__name__
print >>stream, '=== JOB ' + '=' * 72
print >>stream, 'Class:'.ljust(PAD), class_name
print >>stream, 'App name:'.ljust(PAD), job_class.app_name
print >>stream, 'Frequency:'.ljust(PAD), freq
try:
info = self.job_state_database[job_class.app_name]
except KeyError:
print >>stream, '*NO PREVIOUS RUN INFO*'
continue
if info.get('ongoing'):
print >>stream, 'Ongoing now!'.ljust(PAD),
print >>stream, 'Started', '%s ago' % timesince(
_now, info.get('ongoing')
)
print >>stream, 'Last run:'.ljust(PAD),
if info['last_run']:
print >>stream, info['last_run'].strftime(_fmt).ljust(20),
print >>stream, '(%s ago)' % timesince(info['last_run'], _now)
else:
print >>stream, 'none'
print >>stream, 'Last success:'.ljust(PAD),
if info.get('last_success'):
print >>stream, info['last_success'].strftime(_fmt).ljust(20),
print >>stream, ('(%s ago)' %
timesince(info['last_success'], _now))
else:
print >>stream, 'no previous successful run'
print >>stream, 'Next run:'.ljust(PAD),
if info['next_run']:
print >>stream, info['next_run'].strftime(_fmt).ljust(20),
if _now > info['next_run']:
print >>stream, ('(was %s ago)' %
timesince(info['next_run'], _now))
else:
print >>stream, '(in %s)' % timesince(
_now,
info['next_run']
)
else:
print >>stream, 'none'
if info.get('last_error'):
print >>stream, 'Error!!'.ljust(PAD),
print >>stream, '(%s times)' % info['error_count']
print >>stream, 'Traceback (most recent call last):'
print >>stream, info['last_error']['traceback'],
print >>stream, '%s:' % info['last_error']['type'],
print >>stream, info['last_error']['value']
print >>stream, ''
def reset_job(self, description):
"""remove the job from the state.
if means that next time we run, this job will start over from scratch.
"""
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
if (
job_class.app_name == description or
description == job_class.__module__ + '.' + job_class.__name__
):
if job_class.app_name in self.job_state_database:
self.config.logger.info('App reset')
self.job_state_database.pop(job_class.app_name)
else:
self.config.logger.warning('App already reset')
return
raise JobNotFoundError(description)
def run_all(self):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config)
def run_one(self, description, force=False):
# the description in this case is either the app_name or the full
# module/class reference
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
if (
job_class.app_name == description or
description == job_class.__module__ + '.' + job_class.__name__
):
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config, force=force)
return
raise JobNotFoundError(description)
def _run_one(self, job_class, config, force=False):
_debug = self.config.logger.debug
seconds = convert_frequency(config.frequency)
time_ = config.time
if not force:
if not self.time_to_run(job_class, time_):
_debug("skipping %r because it's not time to run", job_class)
return
ok, dependency_error = self.check_dependencies(job_class)
if not ok:
_debug(
"skipping %r dependencies aren't met [%s]",
job_class, dependency_error
)
return
_debug('about to run %r', job_class)
app_name = job_class.app_name
info = self.job_state_database.get(app_name)
last_success = None
now = utc_now()
log_run = True
try:
t0 = time.time()
for last_success in self._run_job(job_class, config, info):
t1 = time.time()
_debug('successfully ran %r on %s', job_class, last_success)
self._remember_success(job_class, last_success, t1 - t0)
# _run_job() returns a generator, so we don't know how
# many times this will loop. Anyway, we need to reset the
# 't0' for the next loop if there is one.
t0 = time.time()
exc_type = exc_value = exc_tb = None
except (OngoingJobError, RowLevelLockError):
# It's not an actual runtime error. It just basically means
# you can't start crontabber right now.
log_run = False
raise
except:
t1 = time.time()
exc_type, exc_value, exc_tb = sys.exc_info()
# when debugging tests that mock logging, uncomment this otherwise
# the exc_info=True doesn't compute and record what the exception
# was
#raise # noqa
if self.config.sentry and self.config.sentry.dsn:
assert raven, "raven not installed"
try:
client = raven.Client(dsn=self.config.sentry.dsn)
identifier = client.get_ident(client.captureException())
self.config.logger.info(
'Error captured in Sentry. Reference: %s' % identifier
)
except Exception:
# Blank exceptions like this is evil but a failure to send
# the exception to Sentry is much less important than for
# crontabber to carry on. This is especially true
# considering that raven depends on network I/O.
_debug('Failed to capture and send error to Sentry',
exc_info=True)
_debug('error when running %r on %s',
job_class, last_success, exc_info=True)
self._remember_failure(
job_class,
t1 - t0,
exc_type,
exc_value,
exc_tb
)
finally:
if log_run:
self._log_run(
job_class,
seconds,
time_,
last_success,
now,
exc_type, exc_value, exc_tb
)
@database_transaction()
def _remember_success(
self,
connection,
class_,
success_date,
duration,
):
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
success,
duration
) VALUES (
%s,
%s,
%s
)""",
(app_name, success_date, '%.5f' % duration),
)
@database_transaction()
def _remember_failure(
self,
connection,
class_,
duration,
exc_type,
exc_value,
exc_tb,
):
exc_traceback = ''.join(traceback.format_tb(exc_tb))
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
duration,
exc_type,
exc_value,
exc_traceback
) VALUES (
%s,
%s,
%s,
%s,
%s
)""",
(
app_name,
'%.5f' % duration,
repr(exc_type),
repr(exc_value),
exc_traceback
),
)
def check_dependencies(self, class_):
try:
depends_on = class_.depends_on
except AttributeError:
# that's perfectly fine
return True, None
if isinstance(depends_on, basestring):
depends_on = [depends_on]
for dependency in depends_on:
try:
job_info = self.job_state_database[dependency]
except KeyError:
# the job this one depends on hasn't been run yet!
return False, "%r hasn't been run yet" % dependency
if job_info.get('last_error'):
# errored last time it ran
return False, "%r errored last time it ran" % dependency
if job_info['next_run'] < utc_now():
# the dependency hasn't recently run
return False, "%r hasn't recently run" % dependency
# no reason not to stop this class
return True, None
def time_to_run(self, class_, time_):
"""return true if it's time to run the job.
This is true if there is no previous information about its last run
or if the last time it ran and set its next_run to a date that is now
past.
"""
app_name = class_.app_name
try:
info = self.job_state_database[app_name]
except KeyError:
if time_:
h, m = [int(x) for x in time_.split(':')]
# only run if this hour and minute is < now
now = utc_now()
if now.hour > h:
return True
elif now.hour == h and now.minute >= m:
return True
return False
else:
# no past information, run now
return True
next_run = info['next_run']
if not next_run:
# It has never run before.
# If it has an active ongoing status it means two
# independent threads tried to start it. The second one
# (by a tiny time margin) will have a job_class whose
# `ongoing` value has already been set.
# If that's the case, let it through because it will
# commence and break due to RowLevelLockError in the
# state's __setitem__ method.
return bool(info['ongoing'])
if next_run < utc_now():
return True
return False
def _run_job(self, class_, config, info):
# here we go!
instance = class_(config, info)
self._set_ongoing_job(class_)
result = instance.main()
return result
def _set_ongoing_job(self, class_):
app_name = class_.app_name
info = self.job_state_database.get(app_name)
if info:
# Was it already ongoing?
if info.get('ongoing'):
# Unless it's been ongoing for ages, raise OngoingJobError
age_hours = (utc_now() - info['ongoing']).seconds / 3600.0
if age_hours < self.config.crontabber.max_ongoing_age_hours:
raise OngoingJobError(info['ongoing'])
else:
self.config.logger.debug(
'{} has been ongoing for {:2} hours. '
'Ignore it and running the app anyway.'.format(
app_name,
age_hours,
)
)
info['ongoing'] = utc_now()
else:
depends_on = getattr(class_, 'depends_on', [])
if isinstance(depends_on, basestring):
depends_on = [depends_on]
elif not isinstance(depends_on, list):
depends_on = list(depends_on)
info = {
'next_run': None,
'first_run': None,
'last_run': None,
'last_success': None,
'last_error': {},
'error_count': 0,
'depends_on': depends_on,
'ongoing': utc_now(),
}
self.job_state_database[app_name] = info
def _log_run(self, class_, seconds, time_, last_success, now,
exc_type, exc_value, exc_tb):
assert inspect.isclass(class_)
app_name = class_.app_name
info = self.job_state_database.get(app_name, {})
depends_on = getattr(class_, 'depends_on', [])
if isinstance(depends_on, basestring):
depends_on = [depends_on]
elif not isinstance(depends_on, list):
depends_on = list(depends_on)
info['depends_on'] = depends_on
if not info.get('first_run'):
info['first_run'] = now
info['last_run'] = now
if last_success:
info['last_success'] = last_success
if exc_type:
# it errored, try very soon again
info['next_run'] = now + datetime.timedelta(
seconds=self.config.crontabber.error_retry_time
)
else:
info['next_run'] = now + datetime.timedelta(seconds=seconds)
if time_:
h, m = [int(x) for x in time_.split(':')]
info['next_run'] = info['next_run'].replace(hour=h,
minute=m,
second=0,
microsecond=0)
if exc_type:
tb = ''.join(traceback.format_tb(exc_tb))
info['last_error'] = {
'type': exc_type,
'value': str(exc_value),
'traceback': tb,
}
info['error_count'] = info.get('error_count', 0) + 1
else:
info['last_error'] = {}
info['error_count'] = 0
# Clearly it's not "ongoing" any more when it's here, because
# being here means the job has finished.
info['ongoing'] = None
self.job_state_database[app_name] = info
def configtest(self):
"""return true if all configured jobs are configured OK"""
# similar to run_all() but don't actually run them
failed = 0
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, __ in class_list:
class_config = self.config.crontabber['class-%s' % class_name]
if not self._configtest_one(class_config):
failed += 1
return not failed
def _configtest_one(self, config):
try:
seconds = convert_frequency(config.frequency)
time_ = config.time
if time_:
check_time(time_)
# if less than 1 day, it doesn't make sense to specify hour
if seconds < 60 * 60 * 24:
raise FrequencyDefinitionError(config.time)
return True
except (JobNotFoundError,
JobDescriptionError,
FrequencyDefinitionError,
TimeDefinitionError):
config.logger.critical(
'Failed to config test a job',
exc_info=True
)
return False
def sentrytest(self):
"""return true if we managed to send a sample raven exception"""
if not (self.config.sentry and self.config.sentry.dsn):
raise SentryConfigurationError('sentry dsn not configured')
try:
version = raven.fetch_package_version('crontabber')
except Exception:
version = None
self.config.logger.warning(
'Unable to extract version of crontabber',
exc_info=True
)
client = raven.Client(
dsn=self.config.sentry.dsn,
release=version
)
identifier = client.captureMessage(
'Sentry test sent from crontabber'
)
self.config.logger.info(
'Sentry successful identifier: %s', identifier
)
return True
def audit_ghosts(self):
"""compare the list of configured jobs with the jobs in the state"""
print_header = True
for app_name in self._get_ghosts():
if print_header:
print_header = False
print (
"Found the following in the state database but not "
"available as a configured job:"
)
print "\t%s" % (app_name,)
def _get_ghosts(self):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
configured_app_names = []
for __, job_class in class_list:
configured_app_names.append(job_class.app_name)
state_app_names = self.job_state_database.keys()
return set(state_app_names) - set(configured_app_names)
|
mozilla/crontabber | crontabber/app.py | CronTabberBase.reset_job | python | def reset_job(self, description):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
if (
job_class.app_name == description or
description == job_class.__module__ + '.' + job_class.__name__
):
if job_class.app_name in self.job_state_database:
self.config.logger.info('App reset')
self.job_state_database.pop(job_class.app_name)
else:
self.config.logger.warning('App already reset')
return
raise JobNotFoundError(description) | remove the job from the state.
if means that next time we run, this job will start over from scratch. | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/app.py#L987-L1004 | null | class CronTabberBase(RequiredConfig):
app_name = 'crontabber'
app_version = __version__
app_description = __doc__
required_config = Namespace()
# the most important option, 'jobs', is defined last
required_config.namespace('crontabber')
required_config.crontabber.add_option(
name='job_state_db_class',
default=JobStateDatabase,
doc='Class to load and save the state and runs',
)
required_config.crontabber.add_option(
'jobs',
default='',
from_string_converter=classes_in_namespaces_converter_with_compression(
reference_namespace=Namespace(),
list_splitter_fn=line_splitter,
class_extractor=pipe_splitter,
extra_extractor=get_extra_as_options
)
)
required_config.crontabber.add_option(
'error_retry_time',
default=300,
doc='number of seconds to re-attempt a job that failed'
)
required_config.crontabber.add_option(
'max_ongoing_age_hours',
default=12.0,
doc=(
'If a job has been ongoing for longer than this, it gets '
'ignored as a lock and the job is run anyway.'
)
)
# for local use, independent of the JSONAndPostgresJobDatabase
required_config.crontabber.add_option(
'database_class',
default='crontabber.connection_factory.ConnectionFactory',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.crontabber.add_option(
'transaction_executor_class',
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.add_option(
name='job',
default='',
doc='Run a specific job',
short_form='j',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='list-jobs',
default=False,
doc='List all jobs',
short_form='l',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='force',
default=False,
doc='Force running a job despite dependencies',
short_form='f',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='configtest',
default=False,
doc='Check that all configured jobs are OK',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='sentrytest',
default=False,
doc='Send a sample raven exception',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='audit-ghosts',
default=False,
doc='Checks if there jobs in the database that is not configured.',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='reset-job',
default='',
doc='Pretend a job has never been run',
short_form='r',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='nagios',
default=False,
doc='Exits with 0, 1 or 2 with a message on stdout if errors have '
'happened.',
short_form='n',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='version',
default=False,
doc='Print current version and exit',
short_form='v',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.namespace('sentry')
required_config.sentry.add_option(
'dsn',
doc='DSN for Sentry via raven',
default='',
reference_value_from='secrets.sentry',
)
def __init__(self, config):
super(CronTabberBase, self).__init__(config)
self.database_connection_factory = \
self.config.crontabber.database_class(config.crontabber)
self.transaction_executor = (
self.config.crontabber.transaction_executor_class(
config.crontabber,
self.database_connection_factory
)
)
def main(self):
if self.config.get('list-jobs'):
self.list_jobs()
return 0
elif self.config.get('nagios'):
return self.nagios()
elif self.config.get('version'):
self.print_version()
return 0
elif self.config.get('reset-job'):
self.reset_job(self.config.get('reset-job'))
return 0
elif self.config.get('audit-ghosts'):
self.audit_ghosts()
return 0
elif self.config.get('configtest'):
return not self.configtest() and 1 or 0
elif self.config.get('sentrytest'):
return not self.sentrytest() and 1 or 0
if self.config.get('job'):
self.run_one(self.config['job'], self.config.get('force'))
else:
try:
self.run_all()
except RowLevelLockError:
self.config.logger.debug(
'Next job to work on is already ongoing'
)
return 2
except OngoingJobError:
self.config.logger.debug(
'Next job to work on is already ongoing'
)
return 3
return 0
@staticmethod
def _reorder_class_list(class_list):
# class_list looks something like this:
# [('FooBarJob', <class 'FooBarJob'>),
# ('BarJob', <class 'BarJob'>),
# ('FooJob', <class 'FooJob'>)]
return reorder_dag(
class_list,
depends_getter=lambda x: getattr(x[1], 'depends_on', None),
name_getter=lambda x: x[1].app_name
)
@property
def job_state_database(self):
if not getattr(self, '_job_state_database', None):
self._job_state_database = (
self.config.crontabber.job_state_db_class(
self.config.crontabber
)
)
return self._job_state_database
def nagios(self, stream=sys.stdout):
"""
return 0 (OK) if there are no errors in the state.
return 1 (WARNING) if a backfill app only has 1 error.
return 2 (CRITICAL) if a backfill app has > 1 error.
return 2 (CRITICAL) if a non-backfill app has 1 error.
"""
warnings = []
criticals = []
for class_name, job_class in self.config.crontabber.jobs.class_list:
if job_class.app_name in self.job_state_database:
info = self.job_state_database.get(job_class.app_name)
if not info.get('error_count', 0):
continue
error_count = info['error_count']
# trouble!
serialized = (
'%s (%s) | %s | %s' %
(job_class.app_name,
class_name,
info['last_error']['type'],
info['last_error']['value'])
)
if (
error_count == 1 and
hasattr(job_class, "_is_backfill_app")
):
# just a warning for now
warnings.append(serialized)
else:
# anything worse than that is critical
criticals.append(serialized)
if criticals:
stream.write('CRITICAL - ')
stream.write('; '.join(criticals))
stream.write('\n')
return 2
elif warnings:
stream.write('WARNING - ')
stream.write('; '.join(warnings))
stream.write('\n')
return 1
stream.write('OK - All systems nominal')
stream.write('\n')
return 0
def print_version(self, stream=sys.stdout):
stream.write('%s\n' % self.app_version)
def list_jobs(self, stream=None):
if not stream:
stream = sys.stdout
_fmt = '%Y-%m-%d %H:%M:%S'
_now = utc_now()
PAD = 15
for class_name, job_class in self.config.crontabber.jobs.class_list:
class_config = self.config.crontabber['class-%s' % class_name]
freq = class_config.frequency
if class_config.time:
freq += ' @ %s' % class_config.time
class_name = job_class.__module__ + '.' + job_class.__name__
print >>stream, '=== JOB ' + '=' * 72
print >>stream, 'Class:'.ljust(PAD), class_name
print >>stream, 'App name:'.ljust(PAD), job_class.app_name
print >>stream, 'Frequency:'.ljust(PAD), freq
try:
info = self.job_state_database[job_class.app_name]
except KeyError:
print >>stream, '*NO PREVIOUS RUN INFO*'
continue
if info.get('ongoing'):
print >>stream, 'Ongoing now!'.ljust(PAD),
print >>stream, 'Started', '%s ago' % timesince(
_now, info.get('ongoing')
)
print >>stream, 'Last run:'.ljust(PAD),
if info['last_run']:
print >>stream, info['last_run'].strftime(_fmt).ljust(20),
print >>stream, '(%s ago)' % timesince(info['last_run'], _now)
else:
print >>stream, 'none'
print >>stream, 'Last success:'.ljust(PAD),
if info.get('last_success'):
print >>stream, info['last_success'].strftime(_fmt).ljust(20),
print >>stream, ('(%s ago)' %
timesince(info['last_success'], _now))
else:
print >>stream, 'no previous successful run'
print >>stream, 'Next run:'.ljust(PAD),
if info['next_run']:
print >>stream, info['next_run'].strftime(_fmt).ljust(20),
if _now > info['next_run']:
print >>stream, ('(was %s ago)' %
timesince(info['next_run'], _now))
else:
print >>stream, '(in %s)' % timesince(
_now,
info['next_run']
)
else:
print >>stream, 'none'
if info.get('last_error'):
print >>stream, 'Error!!'.ljust(PAD),
print >>stream, '(%s times)' % info['error_count']
print >>stream, 'Traceback (most recent call last):'
print >>stream, info['last_error']['traceback'],
print >>stream, '%s:' % info['last_error']['type'],
print >>stream, info['last_error']['value']
print >>stream, ''
def run_all(self):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config)
def run_one(self, description, force=False):
# the description in this case is either the app_name or the full
# module/class reference
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
if (
job_class.app_name == description or
description == job_class.__module__ + '.' + job_class.__name__
):
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config, force=force)
return
raise JobNotFoundError(description)
def _run_one(self, job_class, config, force=False):
_debug = self.config.logger.debug
seconds = convert_frequency(config.frequency)
time_ = config.time
if not force:
if not self.time_to_run(job_class, time_):
_debug("skipping %r because it's not time to run", job_class)
return
ok, dependency_error = self.check_dependencies(job_class)
if not ok:
_debug(
"skipping %r dependencies aren't met [%s]",
job_class, dependency_error
)
return
_debug('about to run %r', job_class)
app_name = job_class.app_name
info = self.job_state_database.get(app_name)
last_success = None
now = utc_now()
log_run = True
try:
t0 = time.time()
for last_success in self._run_job(job_class, config, info):
t1 = time.time()
_debug('successfully ran %r on %s', job_class, last_success)
self._remember_success(job_class, last_success, t1 - t0)
# _run_job() returns a generator, so we don't know how
# many times this will loop. Anyway, we need to reset the
# 't0' for the next loop if there is one.
t0 = time.time()
exc_type = exc_value = exc_tb = None
except (OngoingJobError, RowLevelLockError):
# It's not an actual runtime error. It just basically means
# you can't start crontabber right now.
log_run = False
raise
except:
t1 = time.time()
exc_type, exc_value, exc_tb = sys.exc_info()
# when debugging tests that mock logging, uncomment this otherwise
# the exc_info=True doesn't compute and record what the exception
# was
#raise # noqa
if self.config.sentry and self.config.sentry.dsn:
assert raven, "raven not installed"
try:
client = raven.Client(dsn=self.config.sentry.dsn)
identifier = client.get_ident(client.captureException())
self.config.logger.info(
'Error captured in Sentry. Reference: %s' % identifier
)
except Exception:
# Blank exceptions like this is evil but a failure to send
# the exception to Sentry is much less important than for
# crontabber to carry on. This is especially true
# considering that raven depends on network I/O.
_debug('Failed to capture and send error to Sentry',
exc_info=True)
_debug('error when running %r on %s',
job_class, last_success, exc_info=True)
self._remember_failure(
job_class,
t1 - t0,
exc_type,
exc_value,
exc_tb
)
finally:
if log_run:
self._log_run(
job_class,
seconds,
time_,
last_success,
now,
exc_type, exc_value, exc_tb
)
@database_transaction()
def _remember_success(
self,
connection,
class_,
success_date,
duration,
):
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
success,
duration
) VALUES (
%s,
%s,
%s
)""",
(app_name, success_date, '%.5f' % duration),
)
@database_transaction()
def _remember_failure(
self,
connection,
class_,
duration,
exc_type,
exc_value,
exc_tb,
):
exc_traceback = ''.join(traceback.format_tb(exc_tb))
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
duration,
exc_type,
exc_value,
exc_traceback
) VALUES (
%s,
%s,
%s,
%s,
%s
)""",
(
app_name,
'%.5f' % duration,
repr(exc_type),
repr(exc_value),
exc_traceback
),
)
def check_dependencies(self, class_):
try:
depends_on = class_.depends_on
except AttributeError:
# that's perfectly fine
return True, None
if isinstance(depends_on, basestring):
depends_on = [depends_on]
for dependency in depends_on:
try:
job_info = self.job_state_database[dependency]
except KeyError:
# the job this one depends on hasn't been run yet!
return False, "%r hasn't been run yet" % dependency
if job_info.get('last_error'):
# errored last time it ran
return False, "%r errored last time it ran" % dependency
if job_info['next_run'] < utc_now():
# the dependency hasn't recently run
return False, "%r hasn't recently run" % dependency
# no reason not to stop this class
return True, None
def time_to_run(self, class_, time_):
"""return true if it's time to run the job.
This is true if there is no previous information about its last run
or if the last time it ran and set its next_run to a date that is now
past.
"""
app_name = class_.app_name
try:
info = self.job_state_database[app_name]
except KeyError:
if time_:
h, m = [int(x) for x in time_.split(':')]
# only run if this hour and minute is < now
now = utc_now()
if now.hour > h:
return True
elif now.hour == h and now.minute >= m:
return True
return False
else:
# no past information, run now
return True
next_run = info['next_run']
if not next_run:
# It has never run before.
# If it has an active ongoing status it means two
# independent threads tried to start it. The second one
# (by a tiny time margin) will have a job_class whose
# `ongoing` value has already been set.
# If that's the case, let it through because it will
# commence and break due to RowLevelLockError in the
# state's __setitem__ method.
return bool(info['ongoing'])
if next_run < utc_now():
return True
return False
def _run_job(self, class_, config, info):
# here we go!
instance = class_(config, info)
self._set_ongoing_job(class_)
result = instance.main()
return result
def _set_ongoing_job(self, class_):
app_name = class_.app_name
info = self.job_state_database.get(app_name)
if info:
# Was it already ongoing?
if info.get('ongoing'):
# Unless it's been ongoing for ages, raise OngoingJobError
age_hours = (utc_now() - info['ongoing']).seconds / 3600.0
if age_hours < self.config.crontabber.max_ongoing_age_hours:
raise OngoingJobError(info['ongoing'])
else:
self.config.logger.debug(
'{} has been ongoing for {:2} hours. '
'Ignore it and running the app anyway.'.format(
app_name,
age_hours,
)
)
info['ongoing'] = utc_now()
else:
depends_on = getattr(class_, 'depends_on', [])
if isinstance(depends_on, basestring):
depends_on = [depends_on]
elif not isinstance(depends_on, list):
depends_on = list(depends_on)
info = {
'next_run': None,
'first_run': None,
'last_run': None,
'last_success': None,
'last_error': {},
'error_count': 0,
'depends_on': depends_on,
'ongoing': utc_now(),
}
self.job_state_database[app_name] = info
def _log_run(self, class_, seconds, time_, last_success, now,
exc_type, exc_value, exc_tb):
assert inspect.isclass(class_)
app_name = class_.app_name
info = self.job_state_database.get(app_name, {})
depends_on = getattr(class_, 'depends_on', [])
if isinstance(depends_on, basestring):
depends_on = [depends_on]
elif not isinstance(depends_on, list):
depends_on = list(depends_on)
info['depends_on'] = depends_on
if not info.get('first_run'):
info['first_run'] = now
info['last_run'] = now
if last_success:
info['last_success'] = last_success
if exc_type:
# it errored, try very soon again
info['next_run'] = now + datetime.timedelta(
seconds=self.config.crontabber.error_retry_time
)
else:
info['next_run'] = now + datetime.timedelta(seconds=seconds)
if time_:
h, m = [int(x) for x in time_.split(':')]
info['next_run'] = info['next_run'].replace(hour=h,
minute=m,
second=0,
microsecond=0)
if exc_type:
tb = ''.join(traceback.format_tb(exc_tb))
info['last_error'] = {
'type': exc_type,
'value': str(exc_value),
'traceback': tb,
}
info['error_count'] = info.get('error_count', 0) + 1
else:
info['last_error'] = {}
info['error_count'] = 0
# Clearly it's not "ongoing" any more when it's here, because
# being here means the job has finished.
info['ongoing'] = None
self.job_state_database[app_name] = info
def configtest(self):
"""return true if all configured jobs are configured OK"""
# similar to run_all() but don't actually run them
failed = 0
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, __ in class_list:
class_config = self.config.crontabber['class-%s' % class_name]
if not self._configtest_one(class_config):
failed += 1
return not failed
def _configtest_one(self, config):
try:
seconds = convert_frequency(config.frequency)
time_ = config.time
if time_:
check_time(time_)
# if less than 1 day, it doesn't make sense to specify hour
if seconds < 60 * 60 * 24:
raise FrequencyDefinitionError(config.time)
return True
except (JobNotFoundError,
JobDescriptionError,
FrequencyDefinitionError,
TimeDefinitionError):
config.logger.critical(
'Failed to config test a job',
exc_info=True
)
return False
def sentrytest(self):
"""return true if we managed to send a sample raven exception"""
if not (self.config.sentry and self.config.sentry.dsn):
raise SentryConfigurationError('sentry dsn not configured')
try:
version = raven.fetch_package_version('crontabber')
except Exception:
version = None
self.config.logger.warning(
'Unable to extract version of crontabber',
exc_info=True
)
client = raven.Client(
dsn=self.config.sentry.dsn,
release=version
)
identifier = client.captureMessage(
'Sentry test sent from crontabber'
)
self.config.logger.info(
'Sentry successful identifier: %s', identifier
)
return True
def audit_ghosts(self):
"""compare the list of configured jobs with the jobs in the state"""
print_header = True
for app_name in self._get_ghosts():
if print_header:
print_header = False
print (
"Found the following in the state database but not "
"available as a configured job:"
)
print "\t%s" % (app_name,)
def _get_ghosts(self):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
configured_app_names = []
for __, job_class in class_list:
configured_app_names.append(job_class.app_name)
state_app_names = self.job_state_database.keys()
return set(state_app_names) - set(configured_app_names)
|
mozilla/crontabber | crontabber/app.py | CronTabberBase.time_to_run | python | def time_to_run(self, class_, time_):
app_name = class_.app_name
try:
info = self.job_state_database[app_name]
except KeyError:
if time_:
h, m = [int(x) for x in time_.split(':')]
# only run if this hour and minute is < now
now = utc_now()
if now.hour > h:
return True
elif now.hour == h and now.minute >= m:
return True
return False
else:
# no past information, run now
return True
next_run = info['next_run']
if not next_run:
# It has never run before.
# If it has an active ongoing status it means two
# independent threads tried to start it. The second one
# (by a tiny time margin) will have a job_class whose
# `ongoing` value has already been set.
# If that's the case, let it through because it will
# commence and break due to RowLevelLockError in the
# state's __setitem__ method.
return bool(info['ongoing'])
if next_run < utc_now():
return True
return False | return true if it's time to run the job.
This is true if there is no previous information about its last run
or if the last time it ran and set its next_run to a date that is now
past. | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/app.py#L1195-L1232 | [
"def utc_now():\n \"\"\"Return a timezone aware datetime instance in UTC timezone\n\n This funciton is mainly for convenience. Compare:\n\n >>> from datetimeutil import utc_now\n >>> utc_now()\n datetime.datetime(2012, 1, 5, 16, 42, 13, 639834,\n tzinfo=<isodate.tzinfo.Utc object... | class CronTabberBase(RequiredConfig):
app_name = 'crontabber'
app_version = __version__
app_description = __doc__
required_config = Namespace()
# the most important option, 'jobs', is defined last
required_config.namespace('crontabber')
required_config.crontabber.add_option(
name='job_state_db_class',
default=JobStateDatabase,
doc='Class to load and save the state and runs',
)
required_config.crontabber.add_option(
'jobs',
default='',
from_string_converter=classes_in_namespaces_converter_with_compression(
reference_namespace=Namespace(),
list_splitter_fn=line_splitter,
class_extractor=pipe_splitter,
extra_extractor=get_extra_as_options
)
)
required_config.crontabber.add_option(
'error_retry_time',
default=300,
doc='number of seconds to re-attempt a job that failed'
)
required_config.crontabber.add_option(
'max_ongoing_age_hours',
default=12.0,
doc=(
'If a job has been ongoing for longer than this, it gets '
'ignored as a lock and the job is run anyway.'
)
)
# for local use, independent of the JSONAndPostgresJobDatabase
required_config.crontabber.add_option(
'database_class',
default='crontabber.connection_factory.ConnectionFactory',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.crontabber.add_option(
'transaction_executor_class',
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.add_option(
name='job',
default='',
doc='Run a specific job',
short_form='j',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='list-jobs',
default=False,
doc='List all jobs',
short_form='l',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='force',
default=False,
doc='Force running a job despite dependencies',
short_form='f',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='configtest',
default=False,
doc='Check that all configured jobs are OK',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='sentrytest',
default=False,
doc='Send a sample raven exception',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='audit-ghosts',
default=False,
doc='Checks if there jobs in the database that is not configured.',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='reset-job',
default='',
doc='Pretend a job has never been run',
short_form='r',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='nagios',
default=False,
doc='Exits with 0, 1 or 2 with a message on stdout if errors have '
'happened.',
short_form='n',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='version',
default=False,
doc='Print current version and exit',
short_form='v',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.namespace('sentry')
required_config.sentry.add_option(
'dsn',
doc='DSN for Sentry via raven',
default='',
reference_value_from='secrets.sentry',
)
def __init__(self, config):
super(CronTabberBase, self).__init__(config)
self.database_connection_factory = \
self.config.crontabber.database_class(config.crontabber)
self.transaction_executor = (
self.config.crontabber.transaction_executor_class(
config.crontabber,
self.database_connection_factory
)
)
def main(self):
if self.config.get('list-jobs'):
self.list_jobs()
return 0
elif self.config.get('nagios'):
return self.nagios()
elif self.config.get('version'):
self.print_version()
return 0
elif self.config.get('reset-job'):
self.reset_job(self.config.get('reset-job'))
return 0
elif self.config.get('audit-ghosts'):
self.audit_ghosts()
return 0
elif self.config.get('configtest'):
return not self.configtest() and 1 or 0
elif self.config.get('sentrytest'):
return not self.sentrytest() and 1 or 0
if self.config.get('job'):
self.run_one(self.config['job'], self.config.get('force'))
else:
try:
self.run_all()
except RowLevelLockError:
self.config.logger.debug(
'Next job to work on is already ongoing'
)
return 2
except OngoingJobError:
self.config.logger.debug(
'Next job to work on is already ongoing'
)
return 3
return 0
@staticmethod
def _reorder_class_list(class_list):
# class_list looks something like this:
# [('FooBarJob', <class 'FooBarJob'>),
# ('BarJob', <class 'BarJob'>),
# ('FooJob', <class 'FooJob'>)]
return reorder_dag(
class_list,
depends_getter=lambda x: getattr(x[1], 'depends_on', None),
name_getter=lambda x: x[1].app_name
)
@property
def job_state_database(self):
if not getattr(self, '_job_state_database', None):
self._job_state_database = (
self.config.crontabber.job_state_db_class(
self.config.crontabber
)
)
return self._job_state_database
def nagios(self, stream=sys.stdout):
"""
return 0 (OK) if there are no errors in the state.
return 1 (WARNING) if a backfill app only has 1 error.
return 2 (CRITICAL) if a backfill app has > 1 error.
return 2 (CRITICAL) if a non-backfill app has 1 error.
"""
warnings = []
criticals = []
for class_name, job_class in self.config.crontabber.jobs.class_list:
if job_class.app_name in self.job_state_database:
info = self.job_state_database.get(job_class.app_name)
if not info.get('error_count', 0):
continue
error_count = info['error_count']
# trouble!
serialized = (
'%s (%s) | %s | %s' %
(job_class.app_name,
class_name,
info['last_error']['type'],
info['last_error']['value'])
)
if (
error_count == 1 and
hasattr(job_class, "_is_backfill_app")
):
# just a warning for now
warnings.append(serialized)
else:
# anything worse than that is critical
criticals.append(serialized)
if criticals:
stream.write('CRITICAL - ')
stream.write('; '.join(criticals))
stream.write('\n')
return 2
elif warnings:
stream.write('WARNING - ')
stream.write('; '.join(warnings))
stream.write('\n')
return 1
stream.write('OK - All systems nominal')
stream.write('\n')
return 0
def print_version(self, stream=sys.stdout):
stream.write('%s\n' % self.app_version)
def list_jobs(self, stream=None):
if not stream:
stream = sys.stdout
_fmt = '%Y-%m-%d %H:%M:%S'
_now = utc_now()
PAD = 15
for class_name, job_class in self.config.crontabber.jobs.class_list:
class_config = self.config.crontabber['class-%s' % class_name]
freq = class_config.frequency
if class_config.time:
freq += ' @ %s' % class_config.time
class_name = job_class.__module__ + '.' + job_class.__name__
print >>stream, '=== JOB ' + '=' * 72
print >>stream, 'Class:'.ljust(PAD), class_name
print >>stream, 'App name:'.ljust(PAD), job_class.app_name
print >>stream, 'Frequency:'.ljust(PAD), freq
try:
info = self.job_state_database[job_class.app_name]
except KeyError:
print >>stream, '*NO PREVIOUS RUN INFO*'
continue
if info.get('ongoing'):
print >>stream, 'Ongoing now!'.ljust(PAD),
print >>stream, 'Started', '%s ago' % timesince(
_now, info.get('ongoing')
)
print >>stream, 'Last run:'.ljust(PAD),
if info['last_run']:
print >>stream, info['last_run'].strftime(_fmt).ljust(20),
print >>stream, '(%s ago)' % timesince(info['last_run'], _now)
else:
print >>stream, 'none'
print >>stream, 'Last success:'.ljust(PAD),
if info.get('last_success'):
print >>stream, info['last_success'].strftime(_fmt).ljust(20),
print >>stream, ('(%s ago)' %
timesince(info['last_success'], _now))
else:
print >>stream, 'no previous successful run'
print >>stream, 'Next run:'.ljust(PAD),
if info['next_run']:
print >>stream, info['next_run'].strftime(_fmt).ljust(20),
if _now > info['next_run']:
print >>stream, ('(was %s ago)' %
timesince(info['next_run'], _now))
else:
print >>stream, '(in %s)' % timesince(
_now,
info['next_run']
)
else:
print >>stream, 'none'
if info.get('last_error'):
print >>stream, 'Error!!'.ljust(PAD),
print >>stream, '(%s times)' % info['error_count']
print >>stream, 'Traceback (most recent call last):'
print >>stream, info['last_error']['traceback'],
print >>stream, '%s:' % info['last_error']['type'],
print >>stream, info['last_error']['value']
print >>stream, ''
def reset_job(self, description):
"""remove the job from the state.
if means that next time we run, this job will start over from scratch.
"""
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
if (
job_class.app_name == description or
description == job_class.__module__ + '.' + job_class.__name__
):
if job_class.app_name in self.job_state_database:
self.config.logger.info('App reset')
self.job_state_database.pop(job_class.app_name)
else:
self.config.logger.warning('App already reset')
return
raise JobNotFoundError(description)
def run_all(self):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config)
def run_one(self, description, force=False):
# the description in this case is either the app_name or the full
# module/class reference
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
if (
job_class.app_name == description or
description == job_class.__module__ + '.' + job_class.__name__
):
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config, force=force)
return
raise JobNotFoundError(description)
def _run_one(self, job_class, config, force=False):
_debug = self.config.logger.debug
seconds = convert_frequency(config.frequency)
time_ = config.time
if not force:
if not self.time_to_run(job_class, time_):
_debug("skipping %r because it's not time to run", job_class)
return
ok, dependency_error = self.check_dependencies(job_class)
if not ok:
_debug(
"skipping %r dependencies aren't met [%s]",
job_class, dependency_error
)
return
_debug('about to run %r', job_class)
app_name = job_class.app_name
info = self.job_state_database.get(app_name)
last_success = None
now = utc_now()
log_run = True
try:
t0 = time.time()
for last_success in self._run_job(job_class, config, info):
t1 = time.time()
_debug('successfully ran %r on %s', job_class, last_success)
self._remember_success(job_class, last_success, t1 - t0)
# _run_job() returns a generator, so we don't know how
# many times this will loop. Anyway, we need to reset the
# 't0' for the next loop if there is one.
t0 = time.time()
exc_type = exc_value = exc_tb = None
except (OngoingJobError, RowLevelLockError):
# It's not an actual runtime error. It just basically means
# you can't start crontabber right now.
log_run = False
raise
except:
t1 = time.time()
exc_type, exc_value, exc_tb = sys.exc_info()
# when debugging tests that mock logging, uncomment this otherwise
# the exc_info=True doesn't compute and record what the exception
# was
#raise # noqa
if self.config.sentry and self.config.sentry.dsn:
assert raven, "raven not installed"
try:
client = raven.Client(dsn=self.config.sentry.dsn)
identifier = client.get_ident(client.captureException())
self.config.logger.info(
'Error captured in Sentry. Reference: %s' % identifier
)
except Exception:
# Blank exceptions like this is evil but a failure to send
# the exception to Sentry is much less important than for
# crontabber to carry on. This is especially true
# considering that raven depends on network I/O.
_debug('Failed to capture and send error to Sentry',
exc_info=True)
_debug('error when running %r on %s',
job_class, last_success, exc_info=True)
self._remember_failure(
job_class,
t1 - t0,
exc_type,
exc_value,
exc_tb
)
finally:
if log_run:
self._log_run(
job_class,
seconds,
time_,
last_success,
now,
exc_type, exc_value, exc_tb
)
@database_transaction()
def _remember_success(
self,
connection,
class_,
success_date,
duration,
):
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
success,
duration
) VALUES (
%s,
%s,
%s
)""",
(app_name, success_date, '%.5f' % duration),
)
@database_transaction()
def _remember_failure(
self,
connection,
class_,
duration,
exc_type,
exc_value,
exc_tb,
):
exc_traceback = ''.join(traceback.format_tb(exc_tb))
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
duration,
exc_type,
exc_value,
exc_traceback
) VALUES (
%s,
%s,
%s,
%s,
%s
)""",
(
app_name,
'%.5f' % duration,
repr(exc_type),
repr(exc_value),
exc_traceback
),
)
def check_dependencies(self, class_):
try:
depends_on = class_.depends_on
except AttributeError:
# that's perfectly fine
return True, None
if isinstance(depends_on, basestring):
depends_on = [depends_on]
for dependency in depends_on:
try:
job_info = self.job_state_database[dependency]
except KeyError:
# the job this one depends on hasn't been run yet!
return False, "%r hasn't been run yet" % dependency
if job_info.get('last_error'):
# errored last time it ran
return False, "%r errored last time it ran" % dependency
if job_info['next_run'] < utc_now():
# the dependency hasn't recently run
return False, "%r hasn't recently run" % dependency
# no reason not to stop this class
return True, None
def _run_job(self, class_, config, info):
# here we go!
instance = class_(config, info)
self._set_ongoing_job(class_)
result = instance.main()
return result
def _set_ongoing_job(self, class_):
app_name = class_.app_name
info = self.job_state_database.get(app_name)
if info:
# Was it already ongoing?
if info.get('ongoing'):
# Unless it's been ongoing for ages, raise OngoingJobError
age_hours = (utc_now() - info['ongoing']).seconds / 3600.0
if age_hours < self.config.crontabber.max_ongoing_age_hours:
raise OngoingJobError(info['ongoing'])
else:
self.config.logger.debug(
'{} has been ongoing for {:2} hours. '
'Ignore it and running the app anyway.'.format(
app_name,
age_hours,
)
)
info['ongoing'] = utc_now()
else:
depends_on = getattr(class_, 'depends_on', [])
if isinstance(depends_on, basestring):
depends_on = [depends_on]
elif not isinstance(depends_on, list):
depends_on = list(depends_on)
info = {
'next_run': None,
'first_run': None,
'last_run': None,
'last_success': None,
'last_error': {},
'error_count': 0,
'depends_on': depends_on,
'ongoing': utc_now(),
}
self.job_state_database[app_name] = info
def _log_run(self, class_, seconds, time_, last_success, now,
exc_type, exc_value, exc_tb):
assert inspect.isclass(class_)
app_name = class_.app_name
info = self.job_state_database.get(app_name, {})
depends_on = getattr(class_, 'depends_on', [])
if isinstance(depends_on, basestring):
depends_on = [depends_on]
elif not isinstance(depends_on, list):
depends_on = list(depends_on)
info['depends_on'] = depends_on
if not info.get('first_run'):
info['first_run'] = now
info['last_run'] = now
if last_success:
info['last_success'] = last_success
if exc_type:
# it errored, try very soon again
info['next_run'] = now + datetime.timedelta(
seconds=self.config.crontabber.error_retry_time
)
else:
info['next_run'] = now + datetime.timedelta(seconds=seconds)
if time_:
h, m = [int(x) for x in time_.split(':')]
info['next_run'] = info['next_run'].replace(hour=h,
minute=m,
second=0,
microsecond=0)
if exc_type:
tb = ''.join(traceback.format_tb(exc_tb))
info['last_error'] = {
'type': exc_type,
'value': str(exc_value),
'traceback': tb,
}
info['error_count'] = info.get('error_count', 0) + 1
else:
info['last_error'] = {}
info['error_count'] = 0
# Clearly it's not "ongoing" any more when it's here, because
# being here means the job has finished.
info['ongoing'] = None
self.job_state_database[app_name] = info
def configtest(self):
"""return true if all configured jobs are configured OK"""
# similar to run_all() but don't actually run them
failed = 0
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, __ in class_list:
class_config = self.config.crontabber['class-%s' % class_name]
if not self._configtest_one(class_config):
failed += 1
return not failed
def _configtest_one(self, config):
try:
seconds = convert_frequency(config.frequency)
time_ = config.time
if time_:
check_time(time_)
# if less than 1 day, it doesn't make sense to specify hour
if seconds < 60 * 60 * 24:
raise FrequencyDefinitionError(config.time)
return True
except (JobNotFoundError,
JobDescriptionError,
FrequencyDefinitionError,
TimeDefinitionError):
config.logger.critical(
'Failed to config test a job',
exc_info=True
)
return False
def sentrytest(self):
"""return true if we managed to send a sample raven exception"""
if not (self.config.sentry and self.config.sentry.dsn):
raise SentryConfigurationError('sentry dsn not configured')
try:
version = raven.fetch_package_version('crontabber')
except Exception:
version = None
self.config.logger.warning(
'Unable to extract version of crontabber',
exc_info=True
)
client = raven.Client(
dsn=self.config.sentry.dsn,
release=version
)
identifier = client.captureMessage(
'Sentry test sent from crontabber'
)
self.config.logger.info(
'Sentry successful identifier: %s', identifier
)
return True
def audit_ghosts(self):
"""compare the list of configured jobs with the jobs in the state"""
print_header = True
for app_name in self._get_ghosts():
if print_header:
print_header = False
print (
"Found the following in the state database but not "
"available as a configured job:"
)
print "\t%s" % (app_name,)
def _get_ghosts(self):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
configured_app_names = []
for __, job_class in class_list:
configured_app_names.append(job_class.app_name)
state_app_names = self.job_state_database.keys()
return set(state_app_names) - set(configured_app_names)
|
mozilla/crontabber | crontabber/app.py | CronTabberBase.audit_ghosts | python | def audit_ghosts(self):
print_header = True
for app_name in self._get_ghosts():
if print_header:
print_header = False
print (
"Found the following in the state database but not "
"available as a configured job:"
)
print "\t%s" % (app_name,) | compare the list of configured jobs with the jobs in the state | train | https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/app.py#L1385-L1395 | null | class CronTabberBase(RequiredConfig):
app_name = 'crontabber'
app_version = __version__
app_description = __doc__
required_config = Namespace()
# the most important option, 'jobs', is defined last
required_config.namespace('crontabber')
required_config.crontabber.add_option(
name='job_state_db_class',
default=JobStateDatabase,
doc='Class to load and save the state and runs',
)
required_config.crontabber.add_option(
'jobs',
default='',
from_string_converter=classes_in_namespaces_converter_with_compression(
reference_namespace=Namespace(),
list_splitter_fn=line_splitter,
class_extractor=pipe_splitter,
extra_extractor=get_extra_as_options
)
)
required_config.crontabber.add_option(
'error_retry_time',
default=300,
doc='number of seconds to re-attempt a job that failed'
)
required_config.crontabber.add_option(
'max_ongoing_age_hours',
default=12.0,
doc=(
'If a job has been ongoing for longer than this, it gets '
'ignored as a lock and the job is run anyway.'
)
)
# for local use, independent of the JSONAndPostgresJobDatabase
required_config.crontabber.add_option(
'database_class',
default='crontabber.connection_factory.ConnectionFactory',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.crontabber.add_option(
'transaction_executor_class',
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql'
)
required_config.add_option(
name='job',
default='',
doc='Run a specific job',
short_form='j',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='list-jobs',
default=False,
doc='List all jobs',
short_form='l',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='force',
default=False,
doc='Force running a job despite dependencies',
short_form='f',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='configtest',
default=False,
doc='Check that all configured jobs are OK',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='sentrytest',
default=False,
doc='Send a sample raven exception',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='audit-ghosts',
default=False,
doc='Checks if there jobs in the database that is not configured.',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='reset-job',
default='',
doc='Pretend a job has never been run',
short_form='r',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='nagios',
default=False,
doc='Exits with 0, 1 or 2 with a message on stdout if errors have '
'happened.',
short_form='n',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.add_option(
name='version',
default=False,
doc='Print current version and exit',
short_form='v',
exclude_from_print_conf=True,
exclude_from_dump_conf=True,
)
required_config.namespace('sentry')
required_config.sentry.add_option(
'dsn',
doc='DSN for Sentry via raven',
default='',
reference_value_from='secrets.sentry',
)
def __init__(self, config):
super(CronTabberBase, self).__init__(config)
self.database_connection_factory = \
self.config.crontabber.database_class(config.crontabber)
self.transaction_executor = (
self.config.crontabber.transaction_executor_class(
config.crontabber,
self.database_connection_factory
)
)
def main(self):
if self.config.get('list-jobs'):
self.list_jobs()
return 0
elif self.config.get('nagios'):
return self.nagios()
elif self.config.get('version'):
self.print_version()
return 0
elif self.config.get('reset-job'):
self.reset_job(self.config.get('reset-job'))
return 0
elif self.config.get('audit-ghosts'):
self.audit_ghosts()
return 0
elif self.config.get('configtest'):
return not self.configtest() and 1 or 0
elif self.config.get('sentrytest'):
return not self.sentrytest() and 1 or 0
if self.config.get('job'):
self.run_one(self.config['job'], self.config.get('force'))
else:
try:
self.run_all()
except RowLevelLockError:
self.config.logger.debug(
'Next job to work on is already ongoing'
)
return 2
except OngoingJobError:
self.config.logger.debug(
'Next job to work on is already ongoing'
)
return 3
return 0
@staticmethod
def _reorder_class_list(class_list):
# class_list looks something like this:
# [('FooBarJob', <class 'FooBarJob'>),
# ('BarJob', <class 'BarJob'>),
# ('FooJob', <class 'FooJob'>)]
return reorder_dag(
class_list,
depends_getter=lambda x: getattr(x[1], 'depends_on', None),
name_getter=lambda x: x[1].app_name
)
@property
def job_state_database(self):
if not getattr(self, '_job_state_database', None):
self._job_state_database = (
self.config.crontabber.job_state_db_class(
self.config.crontabber
)
)
return self._job_state_database
def nagios(self, stream=sys.stdout):
"""
return 0 (OK) if there are no errors in the state.
return 1 (WARNING) if a backfill app only has 1 error.
return 2 (CRITICAL) if a backfill app has > 1 error.
return 2 (CRITICAL) if a non-backfill app has 1 error.
"""
warnings = []
criticals = []
for class_name, job_class in self.config.crontabber.jobs.class_list:
if job_class.app_name in self.job_state_database:
info = self.job_state_database.get(job_class.app_name)
if not info.get('error_count', 0):
continue
error_count = info['error_count']
# trouble!
serialized = (
'%s (%s) | %s | %s' %
(job_class.app_name,
class_name,
info['last_error']['type'],
info['last_error']['value'])
)
if (
error_count == 1 and
hasattr(job_class, "_is_backfill_app")
):
# just a warning for now
warnings.append(serialized)
else:
# anything worse than that is critical
criticals.append(serialized)
if criticals:
stream.write('CRITICAL - ')
stream.write('; '.join(criticals))
stream.write('\n')
return 2
elif warnings:
stream.write('WARNING - ')
stream.write('; '.join(warnings))
stream.write('\n')
return 1
stream.write('OK - All systems nominal')
stream.write('\n')
return 0
def print_version(self, stream=sys.stdout):
stream.write('%s\n' % self.app_version)
def list_jobs(self, stream=None):
if not stream:
stream = sys.stdout
_fmt = '%Y-%m-%d %H:%M:%S'
_now = utc_now()
PAD = 15
for class_name, job_class in self.config.crontabber.jobs.class_list:
class_config = self.config.crontabber['class-%s' % class_name]
freq = class_config.frequency
if class_config.time:
freq += ' @ %s' % class_config.time
class_name = job_class.__module__ + '.' + job_class.__name__
print >>stream, '=== JOB ' + '=' * 72
print >>stream, 'Class:'.ljust(PAD), class_name
print >>stream, 'App name:'.ljust(PAD), job_class.app_name
print >>stream, 'Frequency:'.ljust(PAD), freq
try:
info = self.job_state_database[job_class.app_name]
except KeyError:
print >>stream, '*NO PREVIOUS RUN INFO*'
continue
if info.get('ongoing'):
print >>stream, 'Ongoing now!'.ljust(PAD),
print >>stream, 'Started', '%s ago' % timesince(
_now, info.get('ongoing')
)
print >>stream, 'Last run:'.ljust(PAD),
if info['last_run']:
print >>stream, info['last_run'].strftime(_fmt).ljust(20),
print >>stream, '(%s ago)' % timesince(info['last_run'], _now)
else:
print >>stream, 'none'
print >>stream, 'Last success:'.ljust(PAD),
if info.get('last_success'):
print >>stream, info['last_success'].strftime(_fmt).ljust(20),
print >>stream, ('(%s ago)' %
timesince(info['last_success'], _now))
else:
print >>stream, 'no previous successful run'
print >>stream, 'Next run:'.ljust(PAD),
if info['next_run']:
print >>stream, info['next_run'].strftime(_fmt).ljust(20),
if _now > info['next_run']:
print >>stream, ('(was %s ago)' %
timesince(info['next_run'], _now))
else:
print >>stream, '(in %s)' % timesince(
_now,
info['next_run']
)
else:
print >>stream, 'none'
if info.get('last_error'):
print >>stream, 'Error!!'.ljust(PAD),
print >>stream, '(%s times)' % info['error_count']
print >>stream, 'Traceback (most recent call last):'
print >>stream, info['last_error']['traceback'],
print >>stream, '%s:' % info['last_error']['type'],
print >>stream, info['last_error']['value']
print >>stream, ''
def reset_job(self, description):
"""remove the job from the state.
if means that next time we run, this job will start over from scratch.
"""
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
if (
job_class.app_name == description or
description == job_class.__module__ + '.' + job_class.__name__
):
if job_class.app_name in self.job_state_database:
self.config.logger.info('App reset')
self.job_state_database.pop(job_class.app_name)
else:
self.config.logger.warning('App already reset')
return
raise JobNotFoundError(description)
def run_all(self):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config)
def run_one(self, description, force=False):
# the description in this case is either the app_name or the full
# module/class reference
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, job_class in class_list:
if (
job_class.app_name == description or
description == job_class.__module__ + '.' + job_class.__name__
):
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config, force=force)
return
raise JobNotFoundError(description)
def _run_one(self, job_class, config, force=False):
_debug = self.config.logger.debug
seconds = convert_frequency(config.frequency)
time_ = config.time
if not force:
if not self.time_to_run(job_class, time_):
_debug("skipping %r because it's not time to run", job_class)
return
ok, dependency_error = self.check_dependencies(job_class)
if not ok:
_debug(
"skipping %r dependencies aren't met [%s]",
job_class, dependency_error
)
return
_debug('about to run %r', job_class)
app_name = job_class.app_name
info = self.job_state_database.get(app_name)
last_success = None
now = utc_now()
log_run = True
try:
t0 = time.time()
for last_success in self._run_job(job_class, config, info):
t1 = time.time()
_debug('successfully ran %r on %s', job_class, last_success)
self._remember_success(job_class, last_success, t1 - t0)
# _run_job() returns a generator, so we don't know how
# many times this will loop. Anyway, we need to reset the
# 't0' for the next loop if there is one.
t0 = time.time()
exc_type = exc_value = exc_tb = None
except (OngoingJobError, RowLevelLockError):
# It's not an actual runtime error. It just basically means
# you can't start crontabber right now.
log_run = False
raise
except:
t1 = time.time()
exc_type, exc_value, exc_tb = sys.exc_info()
# when debugging tests that mock logging, uncomment this otherwise
# the exc_info=True doesn't compute and record what the exception
# was
#raise # noqa
if self.config.sentry and self.config.sentry.dsn:
assert raven, "raven not installed"
try:
client = raven.Client(dsn=self.config.sentry.dsn)
identifier = client.get_ident(client.captureException())
self.config.logger.info(
'Error captured in Sentry. Reference: %s' % identifier
)
except Exception:
# Blank exceptions like this is evil but a failure to send
# the exception to Sentry is much less important than for
# crontabber to carry on. This is especially true
# considering that raven depends on network I/O.
_debug('Failed to capture and send error to Sentry',
exc_info=True)
_debug('error when running %r on %s',
job_class, last_success, exc_info=True)
self._remember_failure(
job_class,
t1 - t0,
exc_type,
exc_value,
exc_tb
)
finally:
if log_run:
self._log_run(
job_class,
seconds,
time_,
last_success,
now,
exc_type, exc_value, exc_tb
)
@database_transaction()
def _remember_success(
self,
connection,
class_,
success_date,
duration,
):
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
success,
duration
) VALUES (
%s,
%s,
%s
)""",
(app_name, success_date, '%.5f' % duration),
)
@database_transaction()
def _remember_failure(
self,
connection,
class_,
duration,
exc_type,
exc_value,
exc_tb,
):
exc_traceback = ''.join(traceback.format_tb(exc_tb))
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
duration,
exc_type,
exc_value,
exc_traceback
) VALUES (
%s,
%s,
%s,
%s,
%s
)""",
(
app_name,
'%.5f' % duration,
repr(exc_type),
repr(exc_value),
exc_traceback
),
)
def check_dependencies(self, class_):
try:
depends_on = class_.depends_on
except AttributeError:
# that's perfectly fine
return True, None
if isinstance(depends_on, basestring):
depends_on = [depends_on]
for dependency in depends_on:
try:
job_info = self.job_state_database[dependency]
except KeyError:
# the job this one depends on hasn't been run yet!
return False, "%r hasn't been run yet" % dependency
if job_info.get('last_error'):
# errored last time it ran
return False, "%r errored last time it ran" % dependency
if job_info['next_run'] < utc_now():
# the dependency hasn't recently run
return False, "%r hasn't recently run" % dependency
# no reason not to stop this class
return True, None
def time_to_run(self, class_, time_):
"""return true if it's time to run the job.
This is true if there is no previous information about its last run
or if the last time it ran and set its next_run to a date that is now
past.
"""
app_name = class_.app_name
try:
info = self.job_state_database[app_name]
except KeyError:
if time_:
h, m = [int(x) for x in time_.split(':')]
# only run if this hour and minute is < now
now = utc_now()
if now.hour > h:
return True
elif now.hour == h and now.minute >= m:
return True
return False
else:
# no past information, run now
return True
next_run = info['next_run']
if not next_run:
# It has never run before.
# If it has an active ongoing status it means two
# independent threads tried to start it. The second one
# (by a tiny time margin) will have a job_class whose
# `ongoing` value has already been set.
# If that's the case, let it through because it will
# commence and break due to RowLevelLockError in the
# state's __setitem__ method.
return bool(info['ongoing'])
if next_run < utc_now():
return True
return False
def _run_job(self, class_, config, info):
# here we go!
instance = class_(config, info)
self._set_ongoing_job(class_)
result = instance.main()
return result
def _set_ongoing_job(self, class_):
app_name = class_.app_name
info = self.job_state_database.get(app_name)
if info:
# Was it already ongoing?
if info.get('ongoing'):
# Unless it's been ongoing for ages, raise OngoingJobError
age_hours = (utc_now() - info['ongoing']).seconds / 3600.0
if age_hours < self.config.crontabber.max_ongoing_age_hours:
raise OngoingJobError(info['ongoing'])
else:
self.config.logger.debug(
'{} has been ongoing for {:2} hours. '
'Ignore it and running the app anyway.'.format(
app_name,
age_hours,
)
)
info['ongoing'] = utc_now()
else:
depends_on = getattr(class_, 'depends_on', [])
if isinstance(depends_on, basestring):
depends_on = [depends_on]
elif not isinstance(depends_on, list):
depends_on = list(depends_on)
info = {
'next_run': None,
'first_run': None,
'last_run': None,
'last_success': None,
'last_error': {},
'error_count': 0,
'depends_on': depends_on,
'ongoing': utc_now(),
}
self.job_state_database[app_name] = info
def _log_run(self, class_, seconds, time_, last_success, now,
exc_type, exc_value, exc_tb):
assert inspect.isclass(class_)
app_name = class_.app_name
info = self.job_state_database.get(app_name, {})
depends_on = getattr(class_, 'depends_on', [])
if isinstance(depends_on, basestring):
depends_on = [depends_on]
elif not isinstance(depends_on, list):
depends_on = list(depends_on)
info['depends_on'] = depends_on
if not info.get('first_run'):
info['first_run'] = now
info['last_run'] = now
if last_success:
info['last_success'] = last_success
if exc_type:
# it errored, try very soon again
info['next_run'] = now + datetime.timedelta(
seconds=self.config.crontabber.error_retry_time
)
else:
info['next_run'] = now + datetime.timedelta(seconds=seconds)
if time_:
h, m = [int(x) for x in time_.split(':')]
info['next_run'] = info['next_run'].replace(hour=h,
minute=m,
second=0,
microsecond=0)
if exc_type:
tb = ''.join(traceback.format_tb(exc_tb))
info['last_error'] = {
'type': exc_type,
'value': str(exc_value),
'traceback': tb,
}
info['error_count'] = info.get('error_count', 0) + 1
else:
info['last_error'] = {}
info['error_count'] = 0
# Clearly it's not "ongoing" any more when it's here, because
# being here means the job has finished.
info['ongoing'] = None
self.job_state_database[app_name] = info
def configtest(self):
"""return true if all configured jobs are configured OK"""
# similar to run_all() but don't actually run them
failed = 0
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
for class_name, __ in class_list:
class_config = self.config.crontabber['class-%s' % class_name]
if not self._configtest_one(class_config):
failed += 1
return not failed
def _configtest_one(self, config):
try:
seconds = convert_frequency(config.frequency)
time_ = config.time
if time_:
check_time(time_)
# if less than 1 day, it doesn't make sense to specify hour
if seconds < 60 * 60 * 24:
raise FrequencyDefinitionError(config.time)
return True
except (JobNotFoundError,
JobDescriptionError,
FrequencyDefinitionError,
TimeDefinitionError):
config.logger.critical(
'Failed to config test a job',
exc_info=True
)
return False
def sentrytest(self):
"""return true if we managed to send a sample raven exception"""
if not (self.config.sentry and self.config.sentry.dsn):
raise SentryConfigurationError('sentry dsn not configured')
try:
version = raven.fetch_package_version('crontabber')
except Exception:
version = None
self.config.logger.warning(
'Unable to extract version of crontabber',
exc_info=True
)
client = raven.Client(
dsn=self.config.sentry.dsn,
release=version
)
identifier = client.captureMessage(
'Sentry test sent from crontabber'
)
self.config.logger.info(
'Sentry successful identifier: %s', identifier
)
return True
def _get_ghosts(self):
class_list = self.config.crontabber.jobs.class_list
class_list = self._reorder_class_list(class_list)
configured_app_names = []
for __, job_class in class_list:
configured_app_names.append(job_class.app_name)
state_app_names = self.job_state_database.keys()
return set(state_app_names) - set(configured_app_names)
|
lightning-viz/lightning-python | lightning/main.py | Lightning.enable_ipython | python | def enable_ipython(self, **kwargs):
# inspired by code powering similar functionality in mpld3
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L357
from IPython.core.getipython import get_ipython
from IPython.display import display, Javascript, HTML
self.ipython_enabled = True
self.set_size('medium')
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
if self.local_enabled:
from lightning.visualization import VisualizationLocal
js = VisualizationLocal.load_embed()
display(HTML("<script>" + js + "</script>"))
if not self.quiet:
print('Running local mode, some functionality limited.\n')
formatter.for_type(VisualizationLocal, lambda viz, kwds=kwargs: viz.get_html())
else:
formatter.for_type(Visualization, lambda viz, kwds=kwargs: viz.get_html())
r = requests.get(self.get_ipython_markup_link(), auth=self.auth)
display(Javascript(r.text)) | Enable plotting in the iPython notebook.
Once enabled, all lightning plots will be automatically produced
within the iPython notebook. They will also be available on
your lightning server within the current session. | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/main.py#L52-L83 | [
"def get_ipython_markup_link(self):\n return '%s/js/ipython-comm.js' % self.host\n",
"def set_size(self, size='medium'):\n \"\"\"\n Set a figure size using one of four options.\n\n Convention is 'small': 400px, 'medium': 600px, 'large': 800px,\n and 'full' will use the entire width\n \"\"\"\n ... | class Lightning(object):
def __init__(self, host="http://localhost:3000", local=False, ipython=False, auth=None, size='medium', quiet=False):
self.quiet = quiet
if not self.quiet:
if ipython:
self.startup_message_ipython()
else:
self.startup_message()
if local:
self.enable_local()
else:
self.local_enabled = False
self.set_host(host)
self.auth = auth
if auth is not None:
if isinstance(auth, tuple):
self.set_basic_auth(auth[0], auth[1])
status = self.check_status()
if not status:
raise ValueError("Could not access server")
if ipython:
self.enable_ipython()
self.set_size(size)
else:
self.ipython_enabled = False
self.set_size('full')
def __repr__(self):
s = 'Lightning\n'
if hasattr(self, 'host') and self.host is not None and not self.local_enabled:
s += 'host: %s\n' % self.host
if self.local_enabled:
s += 'host: local\n'
if hasattr(self, 'session') and self.session is not None:
s += 'session: %s\n' % self.session.id
return s
def get_ipython_markup_link(self):
return '%s/js/ipython-comm.js' % self.host
def disable_ipython(self):
"""
Disable plotting in the iPython notebook.
After disabling, lightning plots will be produced in your lightning server,
but will not appear in the notebook.
"""
from IPython.core.getipython import get_ipython
self.ipython_enabled = False
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.type_printers.pop(Visualization, None)
formatter.type_printers.pop(VisualizationLocal, None)
def create_session(self, name=None):
"""
Create a lightning session.
Can create a session with the provided name, otherwise session name
will be "Session No." with the number automatically generated.
"""
self.session = Session.create(self, name=name)
return self.session
def use_session(self, session_id):
"""
Use the specified lightning session.
Specify a lightning session by id number. Check the number of an existing
session in the attribute lightning.session.id.
"""
self.session = Session(lgn=self, id=session_id)
return self.session
def enable_local(self):
"""
Enable a local mode.
Data is handled locally and embedded via templates.
Does not require a running Lightning server.
Useful for notebooks, and can be used offline.
"""
self.local_enabled = True
def disable_local(self):
"""
Disable local mode.
"""
self.local_enabled = False
def set_basic_auth(self, username, password):
"""
Set authenatication.
"""
from requests.auth import HTTPBasicAuth
self.auth = HTTPBasicAuth(username, password)
return self
def set_host(self, host):
"""
Set the host for a lightning server.
Host can be local (e.g. http://localhost:3000), a heroku
instance (e.g. http://lightning-test.herokuapp.com), or
a independently hosted lightning server.
"""
if host[-1] == '/':
host = host[:-1]
self.host = host
return self
def set_size(self, size='medium'):
"""
Set a figure size using one of four options.
Convention is 'small': 400px, 'medium': 600px, 'large': 800px,
and 'full' will use the entire width
"""
if size not in ['small', 'medium', 'large', 'full']:
raise ValueError("Size must be one of 'small', 'medium', 'large', 'full'")
self.size = size
def check_status(self):
"""
Check the server for status
"""
try:
r = requests.get(self.host + '/status', auth=self.auth,
timeout=(10.0, 10.0))
if not r.status_code == requests.codes.ok:
print("Problem connecting to server at %s" % self.host)
print("status code: %s" % r.status_code)
return False
else:
print("Connected to server at %s" % self.host)
return True
except (requests.exceptions.ConnectionError,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema) as e:
print("Problem connecting to server at %s" % self.host)
print("error: %s" % e)
return False
def startup_message_ipython(self):
import os
import base64
try:
from IPython.display import display, HTML
icon = os.path.join(os.path.dirname(__file__), 'lib/icon.png')
with open(icon, "rb") as imfile:
im = b"".join([b'data:image/png;base64,', base64.b64encode(imfile.read())]).decode("utf-8")
t = "<div style='margin-top:8px'><img src='%s' width='30px' height='35px' " \
"style='display: inline-block; padding-right: 10px'>" \
"</img><span>Lightning initialized</span></div>" % im
display(HTML(t))
except:
print("Lightning initialized")
def startup_message(self):
print("Lightning initialized") |
lightning-viz/lightning-python | lightning/main.py | Lightning.disable_ipython | python | def disable_ipython(self):
from IPython.core.getipython import get_ipython
self.ipython_enabled = False
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.type_printers.pop(Visualization, None)
formatter.type_printers.pop(VisualizationLocal, None) | Disable plotting in the iPython notebook.
After disabling, lightning plots will be produced in your lightning server,
but will not appear in the notebook. | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/main.py#L85-L98 | null | class Lightning(object):
def __init__(self, host="http://localhost:3000", local=False, ipython=False, auth=None, size='medium', quiet=False):
self.quiet = quiet
if not self.quiet:
if ipython:
self.startup_message_ipython()
else:
self.startup_message()
if local:
self.enable_local()
else:
self.local_enabled = False
self.set_host(host)
self.auth = auth
if auth is not None:
if isinstance(auth, tuple):
self.set_basic_auth(auth[0], auth[1])
status = self.check_status()
if not status:
raise ValueError("Could not access server")
if ipython:
self.enable_ipython()
self.set_size(size)
else:
self.ipython_enabled = False
self.set_size('full')
def __repr__(self):
s = 'Lightning\n'
if hasattr(self, 'host') and self.host is not None and not self.local_enabled:
s += 'host: %s\n' % self.host
if self.local_enabled:
s += 'host: local\n'
if hasattr(self, 'session') and self.session is not None:
s += 'session: %s\n' % self.session.id
return s
def get_ipython_markup_link(self):
return '%s/js/ipython-comm.js' % self.host
def enable_ipython(self, **kwargs):
"""
Enable plotting in the iPython notebook.
Once enabled, all lightning plots will be automatically produced
within the iPython notebook. They will also be available on
your lightning server within the current session.
"""
# inspired by code powering similar functionality in mpld3
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L357
from IPython.core.getipython import get_ipython
from IPython.display import display, Javascript, HTML
self.ipython_enabled = True
self.set_size('medium')
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
if self.local_enabled:
from lightning.visualization import VisualizationLocal
js = VisualizationLocal.load_embed()
display(HTML("<script>" + js + "</script>"))
if not self.quiet:
print('Running local mode, some functionality limited.\n')
formatter.for_type(VisualizationLocal, lambda viz, kwds=kwargs: viz.get_html())
else:
formatter.for_type(Visualization, lambda viz, kwds=kwargs: viz.get_html())
r = requests.get(self.get_ipython_markup_link(), auth=self.auth)
display(Javascript(r.text))
def create_session(self, name=None):
"""
Create a lightning session.
Can create a session with the provided name, otherwise session name
will be "Session No." with the number automatically generated.
"""
self.session = Session.create(self, name=name)
return self.session
def use_session(self, session_id):
"""
Use the specified lightning session.
Specify a lightning session by id number. Check the number of an existing
session in the attribute lightning.session.id.
"""
self.session = Session(lgn=self, id=session_id)
return self.session
def enable_local(self):
"""
Enable a local mode.
Data is handled locally and embedded via templates.
Does not require a running Lightning server.
Useful for notebooks, and can be used offline.
"""
self.local_enabled = True
def disable_local(self):
"""
Disable local mode.
"""
self.local_enabled = False
def set_basic_auth(self, username, password):
"""
Set authenatication.
"""
from requests.auth import HTTPBasicAuth
self.auth = HTTPBasicAuth(username, password)
return self
def set_host(self, host):
"""
Set the host for a lightning server.
Host can be local (e.g. http://localhost:3000), a heroku
instance (e.g. http://lightning-test.herokuapp.com), or
a independently hosted lightning server.
"""
if host[-1] == '/':
host = host[:-1]
self.host = host
return self
def set_size(self, size='medium'):
"""
Set a figure size using one of four options.
Convention is 'small': 400px, 'medium': 600px, 'large': 800px,
and 'full' will use the entire width
"""
if size not in ['small', 'medium', 'large', 'full']:
raise ValueError("Size must be one of 'small', 'medium', 'large', 'full'")
self.size = size
def check_status(self):
"""
Check the server for status
"""
try:
r = requests.get(self.host + '/status', auth=self.auth,
timeout=(10.0, 10.0))
if not r.status_code == requests.codes.ok:
print("Problem connecting to server at %s" % self.host)
print("status code: %s" % r.status_code)
return False
else:
print("Connected to server at %s" % self.host)
return True
except (requests.exceptions.ConnectionError,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema) as e:
print("Problem connecting to server at %s" % self.host)
print("error: %s" % e)
return False
def startup_message_ipython(self):
import os
import base64
try:
from IPython.display import display, HTML
icon = os.path.join(os.path.dirname(__file__), 'lib/icon.png')
with open(icon, "rb") as imfile:
im = b"".join([b'data:image/png;base64,', base64.b64encode(imfile.read())]).decode("utf-8")
t = "<div style='margin-top:8px'><img src='%s' width='30px' height='35px' " \
"style='display: inline-block; padding-right: 10px'>" \
"</img><span>Lightning initialized</span></div>" % im
display(HTML(t))
except:
print("Lightning initialized")
def startup_message(self):
print("Lightning initialized") |
lightning-viz/lightning-python | lightning/main.py | Lightning.create_session | python | def create_session(self, name=None):
self.session = Session.create(self, name=name)
return self.session | Create a lightning session.
Can create a session with the provided name, otherwise session name
will be "Session No." with the number automatically generated. | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/main.py#L100-L108 | [
"def create(cls, lgn, name=None):\n url = lgn.host + '/sessions/'\n\n payload = {}\n if name:\n payload = {'name': name}\n\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n r = requests.post(url, data=json.dumps(payload), headers=headers, auth=lgn.auth)\n return cls... | class Lightning(object):
def __init__(self, host="http://localhost:3000", local=False, ipython=False, auth=None, size='medium', quiet=False):
self.quiet = quiet
if not self.quiet:
if ipython:
self.startup_message_ipython()
else:
self.startup_message()
if local:
self.enable_local()
else:
self.local_enabled = False
self.set_host(host)
self.auth = auth
if auth is not None:
if isinstance(auth, tuple):
self.set_basic_auth(auth[0], auth[1])
status = self.check_status()
if not status:
raise ValueError("Could not access server")
if ipython:
self.enable_ipython()
self.set_size(size)
else:
self.ipython_enabled = False
self.set_size('full')
def __repr__(self):
s = 'Lightning\n'
if hasattr(self, 'host') and self.host is not None and not self.local_enabled:
s += 'host: %s\n' % self.host
if self.local_enabled:
s += 'host: local\n'
if hasattr(self, 'session') and self.session is not None:
s += 'session: %s\n' % self.session.id
return s
def get_ipython_markup_link(self):
return '%s/js/ipython-comm.js' % self.host
def enable_ipython(self, **kwargs):
"""
Enable plotting in the iPython notebook.
Once enabled, all lightning plots will be automatically produced
within the iPython notebook. They will also be available on
your lightning server within the current session.
"""
# inspired by code powering similar functionality in mpld3
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L357
from IPython.core.getipython import get_ipython
from IPython.display import display, Javascript, HTML
self.ipython_enabled = True
self.set_size('medium')
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
if self.local_enabled:
from lightning.visualization import VisualizationLocal
js = VisualizationLocal.load_embed()
display(HTML("<script>" + js + "</script>"))
if not self.quiet:
print('Running local mode, some functionality limited.\n')
formatter.for_type(VisualizationLocal, lambda viz, kwds=kwargs: viz.get_html())
else:
formatter.for_type(Visualization, lambda viz, kwds=kwargs: viz.get_html())
r = requests.get(self.get_ipython_markup_link(), auth=self.auth)
display(Javascript(r.text))
def disable_ipython(self):
"""
Disable plotting in the iPython notebook.
After disabling, lightning plots will be produced in your lightning server,
but will not appear in the notebook.
"""
from IPython.core.getipython import get_ipython
self.ipython_enabled = False
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.type_printers.pop(Visualization, None)
formatter.type_printers.pop(VisualizationLocal, None)
def use_session(self, session_id):
"""
Use the specified lightning session.
Specify a lightning session by id number. Check the number of an existing
session in the attribute lightning.session.id.
"""
self.session = Session(lgn=self, id=session_id)
return self.session
def enable_local(self):
"""
Enable a local mode.
Data is handled locally and embedded via templates.
Does not require a running Lightning server.
Useful for notebooks, and can be used offline.
"""
self.local_enabled = True
def disable_local(self):
"""
Disable local mode.
"""
self.local_enabled = False
def set_basic_auth(self, username, password):
"""
Set authenatication.
"""
from requests.auth import HTTPBasicAuth
self.auth = HTTPBasicAuth(username, password)
return self
def set_host(self, host):
"""
Set the host for a lightning server.
Host can be local (e.g. http://localhost:3000), a heroku
instance (e.g. http://lightning-test.herokuapp.com), or
a independently hosted lightning server.
"""
if host[-1] == '/':
host = host[:-1]
self.host = host
return self
def set_size(self, size='medium'):
"""
Set a figure size using one of four options.
Convention is 'small': 400px, 'medium': 600px, 'large': 800px,
and 'full' will use the entire width
"""
if size not in ['small', 'medium', 'large', 'full']:
raise ValueError("Size must be one of 'small', 'medium', 'large', 'full'")
self.size = size
def check_status(self):
"""
Check the server for status
"""
try:
r = requests.get(self.host + '/status', auth=self.auth,
timeout=(10.0, 10.0))
if not r.status_code == requests.codes.ok:
print("Problem connecting to server at %s" % self.host)
print("status code: %s" % r.status_code)
return False
else:
print("Connected to server at %s" % self.host)
return True
except (requests.exceptions.ConnectionError,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema) as e:
print("Problem connecting to server at %s" % self.host)
print("error: %s" % e)
return False
def startup_message_ipython(self):
import os
import base64
try:
from IPython.display import display, HTML
icon = os.path.join(os.path.dirname(__file__), 'lib/icon.png')
with open(icon, "rb") as imfile:
im = b"".join([b'data:image/png;base64,', base64.b64encode(imfile.read())]).decode("utf-8")
t = "<div style='margin-top:8px'><img src='%s' width='30px' height='35px' " \
"style='display: inline-block; padding-right: 10px'>" \
"</img><span>Lightning initialized</span></div>" % im
display(HTML(t))
except:
print("Lightning initialized")
def startup_message(self):
print("Lightning initialized") |
lightning-viz/lightning-python | lightning/main.py | Lightning.use_session | python | def use_session(self, session_id):
self.session = Session(lgn=self, id=session_id)
return self.session | Use the specified lightning session.
Specify a lightning session by id number. Check the number of an existing
session in the attribute lightning.session.id. | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/main.py#L110-L118 | null | class Lightning(object):
def __init__(self, host="http://localhost:3000", local=False, ipython=False, auth=None, size='medium', quiet=False):
self.quiet = quiet
if not self.quiet:
if ipython:
self.startup_message_ipython()
else:
self.startup_message()
if local:
self.enable_local()
else:
self.local_enabled = False
self.set_host(host)
self.auth = auth
if auth is not None:
if isinstance(auth, tuple):
self.set_basic_auth(auth[0], auth[1])
status = self.check_status()
if not status:
raise ValueError("Could not access server")
if ipython:
self.enable_ipython()
self.set_size(size)
else:
self.ipython_enabled = False
self.set_size('full')
def __repr__(self):
s = 'Lightning\n'
if hasattr(self, 'host') and self.host is not None and not self.local_enabled:
s += 'host: %s\n' % self.host
if self.local_enabled:
s += 'host: local\n'
if hasattr(self, 'session') and self.session is not None:
s += 'session: %s\n' % self.session.id
return s
def get_ipython_markup_link(self):
return '%s/js/ipython-comm.js' % self.host
def enable_ipython(self, **kwargs):
"""
Enable plotting in the iPython notebook.
Once enabled, all lightning plots will be automatically produced
within the iPython notebook. They will also be available on
your lightning server within the current session.
"""
# inspired by code powering similar functionality in mpld3
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L357
from IPython.core.getipython import get_ipython
from IPython.display import display, Javascript, HTML
self.ipython_enabled = True
self.set_size('medium')
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
if self.local_enabled:
from lightning.visualization import VisualizationLocal
js = VisualizationLocal.load_embed()
display(HTML("<script>" + js + "</script>"))
if not self.quiet:
print('Running local mode, some functionality limited.\n')
formatter.for_type(VisualizationLocal, lambda viz, kwds=kwargs: viz.get_html())
else:
formatter.for_type(Visualization, lambda viz, kwds=kwargs: viz.get_html())
r = requests.get(self.get_ipython_markup_link(), auth=self.auth)
display(Javascript(r.text))
def disable_ipython(self):
"""
Disable plotting in the iPython notebook.
After disabling, lightning plots will be produced in your lightning server,
but will not appear in the notebook.
"""
from IPython.core.getipython import get_ipython
self.ipython_enabled = False
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.type_printers.pop(Visualization, None)
formatter.type_printers.pop(VisualizationLocal, None)
def create_session(self, name=None):
"""
Create a lightning session.
Can create a session with the provided name, otherwise session name
will be "Session No." with the number automatically generated.
"""
self.session = Session.create(self, name=name)
return self.session
def enable_local(self):
"""
Enable a local mode.
Data is handled locally and embedded via templates.
Does not require a running Lightning server.
Useful for notebooks, and can be used offline.
"""
self.local_enabled = True
def disable_local(self):
"""
Disable local mode.
"""
self.local_enabled = False
def set_basic_auth(self, username, password):
"""
Set authenatication.
"""
from requests.auth import HTTPBasicAuth
self.auth = HTTPBasicAuth(username, password)
return self
def set_host(self, host):
"""
Set the host for a lightning server.
Host can be local (e.g. http://localhost:3000), a heroku
instance (e.g. http://lightning-test.herokuapp.com), or
a independently hosted lightning server.
"""
if host[-1] == '/':
host = host[:-1]
self.host = host
return self
def set_size(self, size='medium'):
"""
Set a figure size using one of four options.
Convention is 'small': 400px, 'medium': 600px, 'large': 800px,
and 'full' will use the entire width
"""
if size not in ['small', 'medium', 'large', 'full']:
raise ValueError("Size must be one of 'small', 'medium', 'large', 'full'")
self.size = size
def check_status(self):
"""
Check the server for status
"""
try:
r = requests.get(self.host + '/status', auth=self.auth,
timeout=(10.0, 10.0))
if not r.status_code == requests.codes.ok:
print("Problem connecting to server at %s" % self.host)
print("status code: %s" % r.status_code)
return False
else:
print("Connected to server at %s" % self.host)
return True
except (requests.exceptions.ConnectionError,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema) as e:
print("Problem connecting to server at %s" % self.host)
print("error: %s" % e)
return False
def startup_message_ipython(self):
import os
import base64
try:
from IPython.display import display, HTML
icon = os.path.join(os.path.dirname(__file__), 'lib/icon.png')
with open(icon, "rb") as imfile:
im = b"".join([b'data:image/png;base64,', base64.b64encode(imfile.read())]).decode("utf-8")
t = "<div style='margin-top:8px'><img src='%s' width='30px' height='35px' " \
"style='display: inline-block; padding-right: 10px'>" \
"</img><span>Lightning initialized</span></div>" % im
display(HTML(t))
except:
print("Lightning initialized")
def startup_message(self):
print("Lightning initialized") |
lightning-viz/lightning-python | lightning/main.py | Lightning.set_basic_auth | python | def set_basic_auth(self, username, password):
from requests.auth import HTTPBasicAuth
self.auth = HTTPBasicAuth(username, password)
return self | Set authenatication. | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/main.py#L136-L142 | null | class Lightning(object):
def __init__(self, host="http://localhost:3000", local=False, ipython=False, auth=None, size='medium', quiet=False):
self.quiet = quiet
if not self.quiet:
if ipython:
self.startup_message_ipython()
else:
self.startup_message()
if local:
self.enable_local()
else:
self.local_enabled = False
self.set_host(host)
self.auth = auth
if auth is not None:
if isinstance(auth, tuple):
self.set_basic_auth(auth[0], auth[1])
status = self.check_status()
if not status:
raise ValueError("Could not access server")
if ipython:
self.enable_ipython()
self.set_size(size)
else:
self.ipython_enabled = False
self.set_size('full')
def __repr__(self):
s = 'Lightning\n'
if hasattr(self, 'host') and self.host is not None and not self.local_enabled:
s += 'host: %s\n' % self.host
if self.local_enabled:
s += 'host: local\n'
if hasattr(self, 'session') and self.session is not None:
s += 'session: %s\n' % self.session.id
return s
def get_ipython_markup_link(self):
return '%s/js/ipython-comm.js' % self.host
def enable_ipython(self, **kwargs):
"""
Enable plotting in the iPython notebook.
Once enabled, all lightning plots will be automatically produced
within the iPython notebook. They will also be available on
your lightning server within the current session.
"""
# inspired by code powering similar functionality in mpld3
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L357
from IPython.core.getipython import get_ipython
from IPython.display import display, Javascript, HTML
self.ipython_enabled = True
self.set_size('medium')
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
if self.local_enabled:
from lightning.visualization import VisualizationLocal
js = VisualizationLocal.load_embed()
display(HTML("<script>" + js + "</script>"))
if not self.quiet:
print('Running local mode, some functionality limited.\n')
formatter.for_type(VisualizationLocal, lambda viz, kwds=kwargs: viz.get_html())
else:
formatter.for_type(Visualization, lambda viz, kwds=kwargs: viz.get_html())
r = requests.get(self.get_ipython_markup_link(), auth=self.auth)
display(Javascript(r.text))
def disable_ipython(self):
"""
Disable plotting in the iPython notebook.
After disabling, lightning plots will be produced in your lightning server,
but will not appear in the notebook.
"""
from IPython.core.getipython import get_ipython
self.ipython_enabled = False
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.type_printers.pop(Visualization, None)
formatter.type_printers.pop(VisualizationLocal, None)
def create_session(self, name=None):
"""
Create a lightning session.
Can create a session with the provided name, otherwise session name
will be "Session No." with the number automatically generated.
"""
self.session = Session.create(self, name=name)
return self.session
def use_session(self, session_id):
"""
Use the specified lightning session.
Specify a lightning session by id number. Check the number of an existing
session in the attribute lightning.session.id.
"""
self.session = Session(lgn=self, id=session_id)
return self.session
def enable_local(self):
"""
Enable a local mode.
Data is handled locally and embedded via templates.
Does not require a running Lightning server.
Useful for notebooks, and can be used offline.
"""
self.local_enabled = True
def disable_local(self):
"""
Disable local mode.
"""
self.local_enabled = False
def set_host(self, host):
"""
Set the host for a lightning server.
Host can be local (e.g. http://localhost:3000), a heroku
instance (e.g. http://lightning-test.herokuapp.com), or
a independently hosted lightning server.
"""
if host[-1] == '/':
host = host[:-1]
self.host = host
return self
def set_size(self, size='medium'):
"""
Set a figure size using one of four options.
Convention is 'small': 400px, 'medium': 600px, 'large': 800px,
and 'full' will use the entire width
"""
if size not in ['small', 'medium', 'large', 'full']:
raise ValueError("Size must be one of 'small', 'medium', 'large', 'full'")
self.size = size
def check_status(self):
"""
Check the server for status
"""
try:
r = requests.get(self.host + '/status', auth=self.auth,
timeout=(10.0, 10.0))
if not r.status_code == requests.codes.ok:
print("Problem connecting to server at %s" % self.host)
print("status code: %s" % r.status_code)
return False
else:
print("Connected to server at %s" % self.host)
return True
except (requests.exceptions.ConnectionError,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema) as e:
print("Problem connecting to server at %s" % self.host)
print("error: %s" % e)
return False
def startup_message_ipython(self):
import os
import base64
try:
from IPython.display import display, HTML
icon = os.path.join(os.path.dirname(__file__), 'lib/icon.png')
with open(icon, "rb") as imfile:
im = b"".join([b'data:image/png;base64,', base64.b64encode(imfile.read())]).decode("utf-8")
t = "<div style='margin-top:8px'><img src='%s' width='30px' height='35px' " \
"style='display: inline-block; padding-right: 10px'>" \
"</img><span>Lightning initialized</span></div>" % im
display(HTML(t))
except:
print("Lightning initialized")
def startup_message(self):
print("Lightning initialized") |
lightning-viz/lightning-python | lightning/main.py | Lightning.set_host | python | def set_host(self, host):
if host[-1] == '/':
host = host[:-1]
self.host = host
return self | Set the host for a lightning server.
Host can be local (e.g. http://localhost:3000), a heroku
instance (e.g. http://lightning-test.herokuapp.com), or
a independently hosted lightning server. | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/main.py#L144-L156 | null | class Lightning(object):
def __init__(self, host="http://localhost:3000", local=False, ipython=False, auth=None, size='medium', quiet=False):
self.quiet = quiet
if not self.quiet:
if ipython:
self.startup_message_ipython()
else:
self.startup_message()
if local:
self.enable_local()
else:
self.local_enabled = False
self.set_host(host)
self.auth = auth
if auth is not None:
if isinstance(auth, tuple):
self.set_basic_auth(auth[0], auth[1])
status = self.check_status()
if not status:
raise ValueError("Could not access server")
if ipython:
self.enable_ipython()
self.set_size(size)
else:
self.ipython_enabled = False
self.set_size('full')
def __repr__(self):
s = 'Lightning\n'
if hasattr(self, 'host') and self.host is not None and not self.local_enabled:
s += 'host: %s\n' % self.host
if self.local_enabled:
s += 'host: local\n'
if hasattr(self, 'session') and self.session is not None:
s += 'session: %s\n' % self.session.id
return s
def get_ipython_markup_link(self):
return '%s/js/ipython-comm.js' % self.host
def enable_ipython(self, **kwargs):
"""
Enable plotting in the iPython notebook.
Once enabled, all lightning plots will be automatically produced
within the iPython notebook. They will also be available on
your lightning server within the current session.
"""
# inspired by code powering similar functionality in mpld3
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L357
from IPython.core.getipython import get_ipython
from IPython.display import display, Javascript, HTML
self.ipython_enabled = True
self.set_size('medium')
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
if self.local_enabled:
from lightning.visualization import VisualizationLocal
js = VisualizationLocal.load_embed()
display(HTML("<script>" + js + "</script>"))
if not self.quiet:
print('Running local mode, some functionality limited.\n')
formatter.for_type(VisualizationLocal, lambda viz, kwds=kwargs: viz.get_html())
else:
formatter.for_type(Visualization, lambda viz, kwds=kwargs: viz.get_html())
r = requests.get(self.get_ipython_markup_link(), auth=self.auth)
display(Javascript(r.text))
def disable_ipython(self):
"""
Disable plotting in the iPython notebook.
After disabling, lightning plots will be produced in your lightning server,
but will not appear in the notebook.
"""
from IPython.core.getipython import get_ipython
self.ipython_enabled = False
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.type_printers.pop(Visualization, None)
formatter.type_printers.pop(VisualizationLocal, None)
def create_session(self, name=None):
"""
Create a lightning session.
Can create a session with the provided name, otherwise session name
will be "Session No." with the number automatically generated.
"""
self.session = Session.create(self, name=name)
return self.session
def use_session(self, session_id):
"""
Use the specified lightning session.
Specify a lightning session by id number. Check the number of an existing
session in the attribute lightning.session.id.
"""
self.session = Session(lgn=self, id=session_id)
return self.session
def enable_local(self):
"""
Enable a local mode.
Data is handled locally and embedded via templates.
Does not require a running Lightning server.
Useful for notebooks, and can be used offline.
"""
self.local_enabled = True
def disable_local(self):
"""
Disable local mode.
"""
self.local_enabled = False
def set_basic_auth(self, username, password):
"""
Set authenatication.
"""
from requests.auth import HTTPBasicAuth
self.auth = HTTPBasicAuth(username, password)
return self
def set_size(self, size='medium'):
"""
Set a figure size using one of four options.
Convention is 'small': 400px, 'medium': 600px, 'large': 800px,
and 'full' will use the entire width
"""
if size not in ['small', 'medium', 'large', 'full']:
raise ValueError("Size must be one of 'small', 'medium', 'large', 'full'")
self.size = size
def check_status(self):
"""
Check the server for status
"""
try:
r = requests.get(self.host + '/status', auth=self.auth,
timeout=(10.0, 10.0))
if not r.status_code == requests.codes.ok:
print("Problem connecting to server at %s" % self.host)
print("status code: %s" % r.status_code)
return False
else:
print("Connected to server at %s" % self.host)
return True
except (requests.exceptions.ConnectionError,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema) as e:
print("Problem connecting to server at %s" % self.host)
print("error: %s" % e)
return False
def startup_message_ipython(self):
import os
import base64
try:
from IPython.display import display, HTML
icon = os.path.join(os.path.dirname(__file__), 'lib/icon.png')
with open(icon, "rb") as imfile:
im = b"".join([b'data:image/png;base64,', base64.b64encode(imfile.read())]).decode("utf-8")
t = "<div style='margin-top:8px'><img src='%s' width='30px' height='35px' " \
"style='display: inline-block; padding-right: 10px'>" \
"</img><span>Lightning initialized</span></div>" % im
display(HTML(t))
except:
print("Lightning initialized")
def startup_message(self):
print("Lightning initialized") |
lightning-viz/lightning-python | lightning/main.py | Lightning.check_status | python | def check_status(self):
try:
r = requests.get(self.host + '/status', auth=self.auth,
timeout=(10.0, 10.0))
if not r.status_code == requests.codes.ok:
print("Problem connecting to server at %s" % self.host)
print("status code: %s" % r.status_code)
return False
else:
print("Connected to server at %s" % self.host)
return True
except (requests.exceptions.ConnectionError,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema) as e:
print("Problem connecting to server at %s" % self.host)
print("error: %s" % e)
return False | Check the server for status | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/main.py#L169-L188 | null | class Lightning(object):
def __init__(self, host="http://localhost:3000", local=False, ipython=False, auth=None, size='medium', quiet=False):
self.quiet = quiet
if not self.quiet:
if ipython:
self.startup_message_ipython()
else:
self.startup_message()
if local:
self.enable_local()
else:
self.local_enabled = False
self.set_host(host)
self.auth = auth
if auth is not None:
if isinstance(auth, tuple):
self.set_basic_auth(auth[0], auth[1])
status = self.check_status()
if not status:
raise ValueError("Could not access server")
if ipython:
self.enable_ipython()
self.set_size(size)
else:
self.ipython_enabled = False
self.set_size('full')
def __repr__(self):
s = 'Lightning\n'
if hasattr(self, 'host') and self.host is not None and not self.local_enabled:
s += 'host: %s\n' % self.host
if self.local_enabled:
s += 'host: local\n'
if hasattr(self, 'session') and self.session is not None:
s += 'session: %s\n' % self.session.id
return s
def get_ipython_markup_link(self):
return '%s/js/ipython-comm.js' % self.host
def enable_ipython(self, **kwargs):
"""
Enable plotting in the iPython notebook.
Once enabled, all lightning plots will be automatically produced
within the iPython notebook. They will also be available on
your lightning server within the current session.
"""
# inspired by code powering similar functionality in mpld3
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L357
from IPython.core.getipython import get_ipython
from IPython.display import display, Javascript, HTML
self.ipython_enabled = True
self.set_size('medium')
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
if self.local_enabled:
from lightning.visualization import VisualizationLocal
js = VisualizationLocal.load_embed()
display(HTML("<script>" + js + "</script>"))
if not self.quiet:
print('Running local mode, some functionality limited.\n')
formatter.for_type(VisualizationLocal, lambda viz, kwds=kwargs: viz.get_html())
else:
formatter.for_type(Visualization, lambda viz, kwds=kwargs: viz.get_html())
r = requests.get(self.get_ipython_markup_link(), auth=self.auth)
display(Javascript(r.text))
def disable_ipython(self):
"""
Disable plotting in the iPython notebook.
After disabling, lightning plots will be produced in your lightning server,
but will not appear in the notebook.
"""
from IPython.core.getipython import get_ipython
self.ipython_enabled = False
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.type_printers.pop(Visualization, None)
formatter.type_printers.pop(VisualizationLocal, None)
def create_session(self, name=None):
"""
Create a lightning session.
Can create a session with the provided name, otherwise session name
will be "Session No." with the number automatically generated.
"""
self.session = Session.create(self, name=name)
return self.session
def use_session(self, session_id):
"""
Use the specified lightning session.
Specify a lightning session by id number. Check the number of an existing
session in the attribute lightning.session.id.
"""
self.session = Session(lgn=self, id=session_id)
return self.session
def enable_local(self):
"""
Enable a local mode.
Data is handled locally and embedded via templates.
Does not require a running Lightning server.
Useful for notebooks, and can be used offline.
"""
self.local_enabled = True
def disable_local(self):
"""
Disable local mode.
"""
self.local_enabled = False
def set_basic_auth(self, username, password):
"""
Set authenatication.
"""
from requests.auth import HTTPBasicAuth
self.auth = HTTPBasicAuth(username, password)
return self
def set_host(self, host):
"""
Set the host for a lightning server.
Host can be local (e.g. http://localhost:3000), a heroku
instance (e.g. http://lightning-test.herokuapp.com), or
a independently hosted lightning server.
"""
if host[-1] == '/':
host = host[:-1]
self.host = host
return self
def set_size(self, size='medium'):
"""
Set a figure size using one of four options.
Convention is 'small': 400px, 'medium': 600px, 'large': 800px,
and 'full' will use the entire width
"""
if size not in ['small', 'medium', 'large', 'full']:
raise ValueError("Size must be one of 'small', 'medium', 'large', 'full'")
self.size = size
def startup_message_ipython(self):
import os
import base64
try:
from IPython.display import display, HTML
icon = os.path.join(os.path.dirname(__file__), 'lib/icon.png')
with open(icon, "rb") as imfile:
im = b"".join([b'data:image/png;base64,', base64.b64encode(imfile.read())]).decode("utf-8")
t = "<div style='margin-top:8px'><img src='%s' width='30px' height='35px' " \
"style='display: inline-block; padding-right: 10px'>" \
"</img><span>Lightning initialized</span></div>" % im
display(HTML(t))
except:
print("Lightning initialized")
def startup_message(self):
print("Lightning initialized") |
lightning-viz/lightning-python | lightning/types/base.py | Base._clean_data | python | def _clean_data(cls, *args, **kwargs):
datadict = cls.clean(*args, **kwargs)
if 'data' in datadict:
data = datadict['data']
data = cls._ensure_dict_or_list(data)
else:
data = {}
for key in datadict:
if key == 'images':
data[key] = datadict[key]
else:
d = cls._ensure_dict_or_list(datadict[key])
data[key] = cls._check_unkeyed_arrays(key, d)
return data | Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays). | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/base.py#L80-L106 | [
"def _ensure_dict_or_list(x):\n\n if isinstance(x, dict):\n return x\n\n if isinstance(x, list):\n return x\n\n if isinstance(x, str):\n return x\n\n if isinstance(x, (int, float, complex)):\n return x\n\n try:\n # convert numpy arrays to lists\n return x.tol... | class Base(Visualization, VisualizationLocal):
_name = 'base'
_options = {
'width': {'default': None},
'height': {'default': None},
'description': {'default': None}
}
_doc = """
width : int, optional, default=None
Width of visualization in pixels.
height : int, optional, default=None
Height of visualization in pixels.
description : str, optional, default=None
Markdown formatted text to show with visualization
when displayed in a Lightning server.
"""
_data_dict_inputs = {}
@classmethod
def _check_unkeyed_arrays(cls, key, val):
if key not in cls._data_dict_inputs:
return val
if not isinstance(val, list):
raise Exception("Must provide a list")
if len(val) == 0:
return val
if isinstance(val[0], dict) and isinstance(val[-1], dict):
return val
if isinstance(val[0], list) and isinstance(val[-1], list):
# if both the first and last elements are lists
out = []
mapping = cls._data_dict_inputs[key]
for l in val:
out.append(dict(zip(mapping, l)))
return out
@staticmethod
def _ensure_dict_or_list(x):
if isinstance(x, dict):
return x
if isinstance(x, list):
return x
if isinstance(x, str):
return x
if isinstance(x, (int, float, complex)):
return x
try:
# convert numpy arrays to lists
return x.tolist()
except Exception:
pass
# add other data type conversions here
raise Exception("Could not convert to correct data type")
@classmethod
@classmethod
def _clean_options(cls, **kwargs):
options = {}
description = None
if hasattr(cls, '_options'):
for key, value in six.iteritems(kwargs):
if key in cls._options:
lgn_option = cls._options[key].get('name', key)
options[lgn_option] = value
if key == 'description':
description = value
return options, description
@classmethod
def _baseplot_local(cls, type, *args, **kwargs):
data = cls._clean_data(*args)
options, description = cls._clean_options(**kwargs)
payload = {'type': type, 'options': options}
if 'images' in data:
payload['images'] = data['images']
else:
payload['data'] = data
viz = VisualizationLocal._create(**payload)
return viz
@classmethod
def _baseplot(cls, session, type, *args, **kwargs):
"""
Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization.
"""
if not type:
raise Exception("Must provide a plot type")
options, description = cls._clean_options(**kwargs)
data = cls._clean_data(*args)
if 'images' in data and len(data) > 1:
images = data['images']
del data['images']
viz = cls._create(session, data=data, type=type, options=options, description=description)
first_image, remaining_images = images[0], images[1:]
viz._append_image(first_image)
for image in remaining_images:
viz._append_image(image)
elif 'images' in data:
images = data['images']
viz = cls._create(session, images=images, type=type, options=options, description=description)
else:
viz = cls._create(session, data=data, type=type, options=options, description=description)
return viz
def update(self, *args, **kwargs):
"""
Base method for updating data.
Applies a plot-type specific cleaning operation, then
updates the data in the visualization.
"""
data = self._clean_data(*args, **kwargs)
if 'images' in data:
images = data['images']
for img in images:
self._update_image(img)
else:
self._update_data(data=data)
def append(self, *args, **kwargs):
"""
Base method for appending data.
Applies a plot-type specific cleaning operation, then
appends data to the visualization.
"""
data = self._clean_data(*args, **kwargs)
if 'images' in data:
images = data['images']
for img in images:
self._append_image(img)
else:
self._append_data(data=data)
def _get_user_data(self):
"""
Base method for retrieving user data from a viz.
"""
url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/settings/'
r = requests.get(url)
if r.status_code == 200:
content = r.json()
else:
raise Exception('Error retrieving user data from server')
return content
|
lightning-viz/lightning-python | lightning/types/base.py | Base._baseplot | python | def _baseplot(cls, session, type, *args, **kwargs):
if not type:
raise Exception("Must provide a plot type")
options, description = cls._clean_options(**kwargs)
data = cls._clean_data(*args)
if 'images' in data and len(data) > 1:
images = data['images']
del data['images']
viz = cls._create(session, data=data, type=type, options=options, description=description)
first_image, remaining_images = images[0], images[1:]
viz._append_image(first_image)
for image in remaining_images:
viz._append_image(image)
elif 'images' in data:
images = data['images']
viz = cls._create(session, images=images, type=type, options=options, description=description)
else:
viz = cls._create(session, data=data, type=type, options=options, description=description)
return viz | Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization. | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/base.py#L141-L179 | null | class Base(Visualization, VisualizationLocal):
_name = 'base'
_options = {
'width': {'default': None},
'height': {'default': None},
'description': {'default': None}
}
_doc = """
width : int, optional, default=None
Width of visualization in pixels.
height : int, optional, default=None
Height of visualization in pixels.
description : str, optional, default=None
Markdown formatted text to show with visualization
when displayed in a Lightning server.
"""
_data_dict_inputs = {}
@classmethod
def _check_unkeyed_arrays(cls, key, val):
if key not in cls._data_dict_inputs:
return val
if not isinstance(val, list):
raise Exception("Must provide a list")
if len(val) == 0:
return val
if isinstance(val[0], dict) and isinstance(val[-1], dict):
return val
if isinstance(val[0], list) and isinstance(val[-1], list):
# if both the first and last elements are lists
out = []
mapping = cls._data_dict_inputs[key]
for l in val:
out.append(dict(zip(mapping, l)))
return out
@staticmethod
def _ensure_dict_or_list(x):
if isinstance(x, dict):
return x
if isinstance(x, list):
return x
if isinstance(x, str):
return x
if isinstance(x, (int, float, complex)):
return x
try:
# convert numpy arrays to lists
return x.tolist()
except Exception:
pass
# add other data type conversions here
raise Exception("Could not convert to correct data type")
@classmethod
def _clean_data(cls, *args, **kwargs):
"""
Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays).
"""
datadict = cls.clean(*args, **kwargs)
if 'data' in datadict:
data = datadict['data']
data = cls._ensure_dict_or_list(data)
else:
data = {}
for key in datadict:
if key == 'images':
data[key] = datadict[key]
else:
d = cls._ensure_dict_or_list(datadict[key])
data[key] = cls._check_unkeyed_arrays(key, d)
return data
@classmethod
def _clean_options(cls, **kwargs):
options = {}
description = None
if hasattr(cls, '_options'):
for key, value in six.iteritems(kwargs):
if key in cls._options:
lgn_option = cls._options[key].get('name', key)
options[lgn_option] = value
if key == 'description':
description = value
return options, description
@classmethod
def _baseplot_local(cls, type, *args, **kwargs):
data = cls._clean_data(*args)
options, description = cls._clean_options(**kwargs)
payload = {'type': type, 'options': options}
if 'images' in data:
payload['images'] = data['images']
else:
payload['data'] = data
viz = VisualizationLocal._create(**payload)
return viz
@classmethod
def update(self, *args, **kwargs):
"""
Base method for updating data.
Applies a plot-type specific cleaning operation, then
updates the data in the visualization.
"""
data = self._clean_data(*args, **kwargs)
if 'images' in data:
images = data['images']
for img in images:
self._update_image(img)
else:
self._update_data(data=data)
def append(self, *args, **kwargs):
"""
Base method for appending data.
Applies a plot-type specific cleaning operation, then
appends data to the visualization.
"""
data = self._clean_data(*args, **kwargs)
if 'images' in data:
images = data['images']
for img in images:
self._append_image(img)
else:
self._append_data(data=data)
def _get_user_data(self):
"""
Base method for retrieving user data from a viz.
"""
url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/settings/'
r = requests.get(url)
if r.status_code == 200:
content = r.json()
else:
raise Exception('Error retrieving user data from server')
return content
|
lightning-viz/lightning-python | lightning/types/base.py | Base.update | python | def update(self, *args, **kwargs):
data = self._clean_data(*args, **kwargs)
if 'images' in data:
images = data['images']
for img in images:
self._update_image(img)
else:
self._update_data(data=data) | Base method for updating data.
Applies a plot-type specific cleaning operation, then
updates the data in the visualization. | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/base.py#L181-L195 | [
"def _clean_data(cls, *args, **kwargs):\n \"\"\"\n Convert raw data into a dictionary with plot-type specific methods.\n\n The result of the cleaning operation should be a dictionary.\n If the dictionary contains a 'data' field it will be passed directly\n (ensuring appropriate formatting). Otherwise... | class Base(Visualization, VisualizationLocal):
_name = 'base'
_options = {
'width': {'default': None},
'height': {'default': None},
'description': {'default': None}
}
_doc = """
width : int, optional, default=None
Width of visualization in pixels.
height : int, optional, default=None
Height of visualization in pixels.
description : str, optional, default=None
Markdown formatted text to show with visualization
when displayed in a Lightning server.
"""
_data_dict_inputs = {}
@classmethod
def _check_unkeyed_arrays(cls, key, val):
if key not in cls._data_dict_inputs:
return val
if not isinstance(val, list):
raise Exception("Must provide a list")
if len(val) == 0:
return val
if isinstance(val[0], dict) and isinstance(val[-1], dict):
return val
if isinstance(val[0], list) and isinstance(val[-1], list):
# if both the first and last elements are lists
out = []
mapping = cls._data_dict_inputs[key]
for l in val:
out.append(dict(zip(mapping, l)))
return out
@staticmethod
def _ensure_dict_or_list(x):
if isinstance(x, dict):
return x
if isinstance(x, list):
return x
if isinstance(x, str):
return x
if isinstance(x, (int, float, complex)):
return x
try:
# convert numpy arrays to lists
return x.tolist()
except Exception:
pass
# add other data type conversions here
raise Exception("Could not convert to correct data type")
@classmethod
def _clean_data(cls, *args, **kwargs):
"""
Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays).
"""
datadict = cls.clean(*args, **kwargs)
if 'data' in datadict:
data = datadict['data']
data = cls._ensure_dict_or_list(data)
else:
data = {}
for key in datadict:
if key == 'images':
data[key] = datadict[key]
else:
d = cls._ensure_dict_or_list(datadict[key])
data[key] = cls._check_unkeyed_arrays(key, d)
return data
@classmethod
def _clean_options(cls, **kwargs):
options = {}
description = None
if hasattr(cls, '_options'):
for key, value in six.iteritems(kwargs):
if key in cls._options:
lgn_option = cls._options[key].get('name', key)
options[lgn_option] = value
if key == 'description':
description = value
return options, description
@classmethod
def _baseplot_local(cls, type, *args, **kwargs):
data = cls._clean_data(*args)
options, description = cls._clean_options(**kwargs)
payload = {'type': type, 'options': options}
if 'images' in data:
payload['images'] = data['images']
else:
payload['data'] = data
viz = VisualizationLocal._create(**payload)
return viz
@classmethod
def _baseplot(cls, session, type, *args, **kwargs):
"""
Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization.
"""
if not type:
raise Exception("Must provide a plot type")
options, description = cls._clean_options(**kwargs)
data = cls._clean_data(*args)
if 'images' in data and len(data) > 1:
images = data['images']
del data['images']
viz = cls._create(session, data=data, type=type, options=options, description=description)
first_image, remaining_images = images[0], images[1:]
viz._append_image(first_image)
for image in remaining_images:
viz._append_image(image)
elif 'images' in data:
images = data['images']
viz = cls._create(session, images=images, type=type, options=options, description=description)
else:
viz = cls._create(session, data=data, type=type, options=options, description=description)
return viz
def append(self, *args, **kwargs):
"""
Base method for appending data.
Applies a plot-type specific cleaning operation, then
appends data to the visualization.
"""
data = self._clean_data(*args, **kwargs)
if 'images' in data:
images = data['images']
for img in images:
self._append_image(img)
else:
self._append_data(data=data)
def _get_user_data(self):
"""
Base method for retrieving user data from a viz.
"""
url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/settings/'
r = requests.get(url)
if r.status_code == 200:
content = r.json()
else:
raise Exception('Error retrieving user data from server')
return content
|
lightning-viz/lightning-python | lightning/types/base.py | Base.append | python | def append(self, *args, **kwargs):
data = self._clean_data(*args, **kwargs)
if 'images' in data:
images = data['images']
for img in images:
self._append_image(img)
else:
self._append_data(data=data) | Base method for appending data.
Applies a plot-type specific cleaning operation, then
appends data to the visualization. | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/base.py#L197-L211 | [
"def _clean_data(cls, *args, **kwargs):\n \"\"\"\n Convert raw data into a dictionary with plot-type specific methods.\n\n The result of the cleaning operation should be a dictionary.\n If the dictionary contains a 'data' field it will be passed directly\n (ensuring appropriate formatting). Otherwise... | class Base(Visualization, VisualizationLocal):
_name = 'base'
_options = {
'width': {'default': None},
'height': {'default': None},
'description': {'default': None}
}
_doc = """
width : int, optional, default=None
Width of visualization in pixels.
height : int, optional, default=None
Height of visualization in pixels.
description : str, optional, default=None
Markdown formatted text to show with visualization
when displayed in a Lightning server.
"""
_data_dict_inputs = {}
@classmethod
def _check_unkeyed_arrays(cls, key, val):
if key not in cls._data_dict_inputs:
return val
if not isinstance(val, list):
raise Exception("Must provide a list")
if len(val) == 0:
return val
if isinstance(val[0], dict) and isinstance(val[-1], dict):
return val
if isinstance(val[0], list) and isinstance(val[-1], list):
# if both the first and last elements are lists
out = []
mapping = cls._data_dict_inputs[key]
for l in val:
out.append(dict(zip(mapping, l)))
return out
@staticmethod
def _ensure_dict_or_list(x):
if isinstance(x, dict):
return x
if isinstance(x, list):
return x
if isinstance(x, str):
return x
if isinstance(x, (int, float, complex)):
return x
try:
# convert numpy arrays to lists
return x.tolist()
except Exception:
pass
# add other data type conversions here
raise Exception("Could not convert to correct data type")
@classmethod
def _clean_data(cls, *args, **kwargs):
"""
Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays).
"""
datadict = cls.clean(*args, **kwargs)
if 'data' in datadict:
data = datadict['data']
data = cls._ensure_dict_or_list(data)
else:
data = {}
for key in datadict:
if key == 'images':
data[key] = datadict[key]
else:
d = cls._ensure_dict_or_list(datadict[key])
data[key] = cls._check_unkeyed_arrays(key, d)
return data
@classmethod
def _clean_options(cls, **kwargs):
options = {}
description = None
if hasattr(cls, '_options'):
for key, value in six.iteritems(kwargs):
if key in cls._options:
lgn_option = cls._options[key].get('name', key)
options[lgn_option] = value
if key == 'description':
description = value
return options, description
@classmethod
def _baseplot_local(cls, type, *args, **kwargs):
data = cls._clean_data(*args)
options, description = cls._clean_options(**kwargs)
payload = {'type': type, 'options': options}
if 'images' in data:
payload['images'] = data['images']
else:
payload['data'] = data
viz = VisualizationLocal._create(**payload)
return viz
@classmethod
def _baseplot(cls, session, type, *args, **kwargs):
"""
Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization.
"""
if not type:
raise Exception("Must provide a plot type")
options, description = cls._clean_options(**kwargs)
data = cls._clean_data(*args)
if 'images' in data and len(data) > 1:
images = data['images']
del data['images']
viz = cls._create(session, data=data, type=type, options=options, description=description)
first_image, remaining_images = images[0], images[1:]
viz._append_image(first_image)
for image in remaining_images:
viz._append_image(image)
elif 'images' in data:
images = data['images']
viz = cls._create(session, images=images, type=type, options=options, description=description)
else:
viz = cls._create(session, data=data, type=type, options=options, description=description)
return viz
def update(self, *args, **kwargs):
"""
Base method for updating data.
Applies a plot-type specific cleaning operation, then
updates the data in the visualization.
"""
data = self._clean_data(*args, **kwargs)
if 'images' in data:
images = data['images']
for img in images:
self._update_image(img)
else:
self._update_data(data=data)
def _get_user_data(self):
"""
Base method for retrieving user data from a viz.
"""
url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/settings/'
r = requests.get(url)
if r.status_code == 200:
content = r.json()
else:
raise Exception('Error retrieving user data from server')
return content
|
lightning-viz/lightning-python | lightning/types/base.py | Base._get_user_data | python | def _get_user_data(self):
url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/settings/'
r = requests.get(url)
if r.status_code == 200:
content = r.json()
else:
raise Exception('Error retrieving user data from server')
return content | Base method for retrieving user data from a viz. | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/base.py#L213-L225 | null | class Base(Visualization, VisualizationLocal):
_name = 'base'
_options = {
'width': {'default': None},
'height': {'default': None},
'description': {'default': None}
}
_doc = """
width : int, optional, default=None
Width of visualization in pixels.
height : int, optional, default=None
Height of visualization in pixels.
description : str, optional, default=None
Markdown formatted text to show with visualization
when displayed in a Lightning server.
"""
_data_dict_inputs = {}
@classmethod
def _check_unkeyed_arrays(cls, key, val):
if key not in cls._data_dict_inputs:
return val
if not isinstance(val, list):
raise Exception("Must provide a list")
if len(val) == 0:
return val
if isinstance(val[0], dict) and isinstance(val[-1], dict):
return val
if isinstance(val[0], list) and isinstance(val[-1], list):
# if both the first and last elements are lists
out = []
mapping = cls._data_dict_inputs[key]
for l in val:
out.append(dict(zip(mapping, l)))
return out
@staticmethod
def _ensure_dict_or_list(x):
if isinstance(x, dict):
return x
if isinstance(x, list):
return x
if isinstance(x, str):
return x
if isinstance(x, (int, float, complex)):
return x
try:
# convert numpy arrays to lists
return x.tolist()
except Exception:
pass
# add other data type conversions here
raise Exception("Could not convert to correct data type")
@classmethod
def _clean_data(cls, *args, **kwargs):
"""
Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays).
"""
datadict = cls.clean(*args, **kwargs)
if 'data' in datadict:
data = datadict['data']
data = cls._ensure_dict_or_list(data)
else:
data = {}
for key in datadict:
if key == 'images':
data[key] = datadict[key]
else:
d = cls._ensure_dict_or_list(datadict[key])
data[key] = cls._check_unkeyed_arrays(key, d)
return data
@classmethod
def _clean_options(cls, **kwargs):
options = {}
description = None
if hasattr(cls, '_options'):
for key, value in six.iteritems(kwargs):
if key in cls._options:
lgn_option = cls._options[key].get('name', key)
options[lgn_option] = value
if key == 'description':
description = value
return options, description
@classmethod
def _baseplot_local(cls, type, *args, **kwargs):
data = cls._clean_data(*args)
options, description = cls._clean_options(**kwargs)
payload = {'type': type, 'options': options}
if 'images' in data:
payload['images'] = data['images']
else:
payload['data'] = data
viz = VisualizationLocal._create(**payload)
return viz
@classmethod
def _baseplot(cls, session, type, *args, **kwargs):
"""
Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization.
"""
if not type:
raise Exception("Must provide a plot type")
options, description = cls._clean_options(**kwargs)
data = cls._clean_data(*args)
if 'images' in data and len(data) > 1:
images = data['images']
del data['images']
viz = cls._create(session, data=data, type=type, options=options, description=description)
first_image, remaining_images = images[0], images[1:]
viz._append_image(first_image)
for image in remaining_images:
viz._append_image(image)
elif 'images' in data:
images = data['images']
viz = cls._create(session, images=images, type=type, options=options, description=description)
else:
viz = cls._create(session, data=data, type=type, options=options, description=description)
return viz
def update(self, *args, **kwargs):
"""
Base method for updating data.
Applies a plot-type specific cleaning operation, then
updates the data in the visualization.
"""
data = self._clean_data(*args, **kwargs)
if 'images' in data:
images = data['images']
for img in images:
self._update_image(img)
else:
self._update_data(data=data)
def append(self, *args, **kwargs):
"""
Base method for appending data.
Applies a plot-type specific cleaning operation, then
appends data to the visualization.
"""
data = self._clean_data(*args, **kwargs)
if 'images' in data:
images = data['images']
for img in images:
self._append_image(img)
else:
self._append_data(data=data)
|
lightning-viz/lightning-python | lightning/types/utils.py | check_property | python | def check_property(prop, name, **kwargs):
checkers = {
'color': check_color,
'alpha': check_alpha,
'size': check_size,
'thickness': check_thickness,
'index': check_index,
'coordinates': check_coordinates,
'colormap': check_colormap,
'bins': check_bins,
'spec': check_spec
}
if name in checkers:
return checkers[name](prop, **kwargs)
elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):
return check_1d(prop, name)
else:
return prop | Check and parse a property with either a specific checking function
or a generic parser | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/utils.py#L16-L39 | [
"def check_1d(x, name):\n \"\"\"\n Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]\n \"\"\"\n\n x = asarray(x)\n if size(x) == 1:\n x = asarray([x])\n if x.ndim == 2:\n raise Exception(\"Property: %s must be one-dimensional\" % name)\n x = x.f... | from numpy import asarray, array, ndarray, vstack, newaxis, nonzero, concatenate, \
transpose, atleast_2d, size, isscalar, meshgrid, where, zeros, ones
from matplotlib.path import Path
import ast
def add_property(d, prop, name, **kwargs):
if prop is not None:
p = check_property(prop, name, **kwargs)
d[name] = p
return d
def check_coordinates(co, xy=None):
"""
Check and parse coordinates as either a single coordinate list [[r,c],[r,c]] or a
list of coordinates for multiple regions [[[r0,c0],[r0,c0]], [[r1,c1],[r1,c1]]]
"""
if isinstance(co, ndarray):
co = co.tolist()
if not (isinstance(co[0][0], list) or isinstance(co[0][0], tuple)):
co = [co]
if xy is not True:
co = map(lambda p: asarray(p)[:, ::-1].tolist(), co)
return co
def check_bins(b):
return b
def check_color(c):
"""
Check and parse color specs as either a single [r,g,b] or a list of
[[r,g,b],[r,g,b]...]
"""
c = asarray(c)
if c.ndim == 1:
c = c.flatten()
c = c[newaxis, :]
if c.shape[1] != 3:
raise Exception("Color must have three values per point")
elif c.ndim == 2:
if c.shape[1] != 3:
raise Exception("Color array must have three values per point")
return c
def check_colormap(cmap):
"""
Check if cmap is one of the colorbrewer maps
"""
names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',
'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])
if cmap not in names:
raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names))
else:
return cmap
def check_size(s):
"""
Check and parse size specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "size")
if any(map(lambda d: d <= 0, s)):
raise Exception('Size cannot be 0 or negative')
return s
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s
def check_index(i):
"""
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
"""
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i
def check_alpha(a):
"""
Check and parse alpha specs as either a single [a] or a list of [a,a,a,...]
"""
a = check_1d(a, "alpha")
if any(map(lambda d: d <= 0, a)):
raise Exception('Alpha cannot be 0 or negative')
return a
def check_1d(x, name):
"""
Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]
"""
x = asarray(x)
if size(x) == 1:
x = asarray([x])
if x.ndim == 2:
raise Exception("Property: %s must be one-dimensional" % name)
x = x.flatten()
return x
def check_spec(spec):
try:
import altair
if type(spec) == altair.api.Viz:
spec = spec.to_dict()
except ImportError:
pass
if type(spec) == str:
import ast
spec = ast.literal_eval(spec)
return spec
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
if x.ndim > 1 or y.ndim > 1:
raise Exception('x and y vectors must be one-dimensional')
if size(x) != size(y):
raise Exception('x and y vectors must be the same length')
points = vstack([x, y]).T
return points
def vecs_to_points_three(x, y, z):
x = asarray(x)
y = asarray(y)
z = asarray(z)
if x.ndim > 1 or y.ndim > 1 or z.ndim > 1:
raise Exception('x, y, and z vectors must be one-dimensional')
if (size(x) != size(y)) or (size(x) != size(z)) or (size(y) != size(z)):
raise Exception('x, y, and z vectors must be the same length')
points = vstack([x, y, z]).T
return points
def mat_to_array(mat):
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
return mat
def mat_to_links(mat):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
return links
def parse_nodes(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
nodes = list(range(0, len(data)))
else:
nodes = list(range(0, int(max(max(data[:, 0]), max(data[:, 1])) + 1)))
return nodes
def parse_links(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
links = mat_to_links(data)
else:
if len(data[0]) == 2:
links = concatenate((data, ones((len(data), 1))), axis=1)
elif len(data[0]) == 3:
links = data
else:
raise ValueError("Too many entries per link, must be 2 or 3, got %g" % len(data[0]))
return links
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
im = asarray(im)
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
def list_to_regions(reg):
if isinstance(reg, str):
return [reg]
if isinstance(reg, list):
checktwo = all(map(lambda x: len(x) == 2, reg))
checkthree = all(map(lambda x: len(x) == 3, reg))
if not (checktwo or checkthree):
raise Exception("All region names must be two letters (for US) or three letters (for world)")
return reg
def polygon_to_mask(coords, dims, z=None):
"""
Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim
"""
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask
def polygon_to_points(coords, z=None):
"""
Given a list of pairs of points which define a polygon,
return a list of points interior to the polygon
"""
bounds = array(coords).astype('int')
bmax = bounds.max(0)
bmin = bounds.min(0)
path = Path(bounds)
grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')
points = where(points)
points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()
if z is not None:
points = map(lambda p: [p[0], p[1], z], points)
return points |
lightning-viz/lightning-python | lightning/types/utils.py | check_coordinates | python | def check_coordinates(co, xy=None):
if isinstance(co, ndarray):
co = co.tolist()
if not (isinstance(co[0][0], list) or isinstance(co[0][0], tuple)):
co = [co]
if xy is not True:
co = map(lambda p: asarray(p)[:, ::-1].tolist(), co)
return co | Check and parse coordinates as either a single coordinate list [[r,c],[r,c]] or a
list of coordinates for multiple regions [[[r0,c0],[r0,c0]], [[r1,c1],[r1,c1]]] | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/utils.py#L42-L53 | null | from numpy import asarray, array, ndarray, vstack, newaxis, nonzero, concatenate, \
transpose, atleast_2d, size, isscalar, meshgrid, where, zeros, ones
from matplotlib.path import Path
import ast
def add_property(d, prop, name, **kwargs):
if prop is not None:
p = check_property(prop, name, **kwargs)
d[name] = p
return d
def check_property(prop, name, **kwargs):
"""
Check and parse a property with either a specific checking function
or a generic parser
"""
checkers = {
'color': check_color,
'alpha': check_alpha,
'size': check_size,
'thickness': check_thickness,
'index': check_index,
'coordinates': check_coordinates,
'colormap': check_colormap,
'bins': check_bins,
'spec': check_spec
}
if name in checkers:
return checkers[name](prop, **kwargs)
elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):
return check_1d(prop, name)
else:
return prop
def check_bins(b):
return b
def check_color(c):
"""
Check and parse color specs as either a single [r,g,b] or a list of
[[r,g,b],[r,g,b]...]
"""
c = asarray(c)
if c.ndim == 1:
c = c.flatten()
c = c[newaxis, :]
if c.shape[1] != 3:
raise Exception("Color must have three values per point")
elif c.ndim == 2:
if c.shape[1] != 3:
raise Exception("Color array must have three values per point")
return c
def check_colormap(cmap):
"""
Check if cmap is one of the colorbrewer maps
"""
names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',
'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])
if cmap not in names:
raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names))
else:
return cmap
def check_size(s):
"""
Check and parse size specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "size")
if any(map(lambda d: d <= 0, s)):
raise Exception('Size cannot be 0 or negative')
return s
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s
def check_index(i):
"""
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
"""
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i
def check_alpha(a):
"""
Check and parse alpha specs as either a single [a] or a list of [a,a,a,...]
"""
a = check_1d(a, "alpha")
if any(map(lambda d: d <= 0, a)):
raise Exception('Alpha cannot be 0 or negative')
return a
def check_1d(x, name):
"""
Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]
"""
x = asarray(x)
if size(x) == 1:
x = asarray([x])
if x.ndim == 2:
raise Exception("Property: %s must be one-dimensional" % name)
x = x.flatten()
return x
def check_spec(spec):
try:
import altair
if type(spec) == altair.api.Viz:
spec = spec.to_dict()
except ImportError:
pass
if type(spec) == str:
import ast
spec = ast.literal_eval(spec)
return spec
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
if x.ndim > 1 or y.ndim > 1:
raise Exception('x and y vectors must be one-dimensional')
if size(x) != size(y):
raise Exception('x and y vectors must be the same length')
points = vstack([x, y]).T
return points
def vecs_to_points_three(x, y, z):
x = asarray(x)
y = asarray(y)
z = asarray(z)
if x.ndim > 1 or y.ndim > 1 or z.ndim > 1:
raise Exception('x, y, and z vectors must be one-dimensional')
if (size(x) != size(y)) or (size(x) != size(z)) or (size(y) != size(z)):
raise Exception('x, y, and z vectors must be the same length')
points = vstack([x, y, z]).T
return points
def mat_to_array(mat):
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
return mat
def mat_to_links(mat):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
return links
def parse_nodes(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
nodes = list(range(0, len(data)))
else:
nodes = list(range(0, int(max(max(data[:, 0]), max(data[:, 1])) + 1)))
return nodes
def parse_links(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
links = mat_to_links(data)
else:
if len(data[0]) == 2:
links = concatenate((data, ones((len(data), 1))), axis=1)
elif len(data[0]) == 3:
links = data
else:
raise ValueError("Too many entries per link, must be 2 or 3, got %g" % len(data[0]))
return links
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
im = asarray(im)
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
def list_to_regions(reg):
if isinstance(reg, str):
return [reg]
if isinstance(reg, list):
checktwo = all(map(lambda x: len(x) == 2, reg))
checkthree = all(map(lambda x: len(x) == 3, reg))
if not (checktwo or checkthree):
raise Exception("All region names must be two letters (for US) or three letters (for world)")
return reg
def polygon_to_mask(coords, dims, z=None):
"""
Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim
"""
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask
def polygon_to_points(coords, z=None):
"""
Given a list of pairs of points which define a polygon,
return a list of points interior to the polygon
"""
bounds = array(coords).astype('int')
bmax = bounds.max(0)
bmin = bounds.min(0)
path = Path(bounds)
grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')
points = where(points)
points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()
if z is not None:
points = map(lambda p: [p[0], p[1], z], points)
return points |
lightning-viz/lightning-python | lightning/types/utils.py | check_color | python | def check_color(c):
c = asarray(c)
if c.ndim == 1:
c = c.flatten()
c = c[newaxis, :]
if c.shape[1] != 3:
raise Exception("Color must have three values per point")
elif c.ndim == 2:
if c.shape[1] != 3:
raise Exception("Color array must have three values per point")
return c | Check and parse color specs as either a single [r,g,b] or a list of
[[r,g,b],[r,g,b]...] | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/utils.py#L59-L74 | null | from numpy import asarray, array, ndarray, vstack, newaxis, nonzero, concatenate, \
transpose, atleast_2d, size, isscalar, meshgrid, where, zeros, ones
from matplotlib.path import Path
import ast
def add_property(d, prop, name, **kwargs):
if prop is not None:
p = check_property(prop, name, **kwargs)
d[name] = p
return d
def check_property(prop, name, **kwargs):
"""
Check and parse a property with either a specific checking function
or a generic parser
"""
checkers = {
'color': check_color,
'alpha': check_alpha,
'size': check_size,
'thickness': check_thickness,
'index': check_index,
'coordinates': check_coordinates,
'colormap': check_colormap,
'bins': check_bins,
'spec': check_spec
}
if name in checkers:
return checkers[name](prop, **kwargs)
elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):
return check_1d(prop, name)
else:
return prop
def check_coordinates(co, xy=None):
"""
Check and parse coordinates as either a single coordinate list [[r,c],[r,c]] or a
list of coordinates for multiple regions [[[r0,c0],[r0,c0]], [[r1,c1],[r1,c1]]]
"""
if isinstance(co, ndarray):
co = co.tolist()
if not (isinstance(co[0][0], list) or isinstance(co[0][0], tuple)):
co = [co]
if xy is not True:
co = map(lambda p: asarray(p)[:, ::-1].tolist(), co)
return co
def check_bins(b):
return b
def check_colormap(cmap):
"""
Check if cmap is one of the colorbrewer maps
"""
names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',
'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])
if cmap not in names:
raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names))
else:
return cmap
def check_size(s):
"""
Check and parse size specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "size")
if any(map(lambda d: d <= 0, s)):
raise Exception('Size cannot be 0 or negative')
return s
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s
def check_index(i):
"""
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
"""
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i
def check_alpha(a):
"""
Check and parse alpha specs as either a single [a] or a list of [a,a,a,...]
"""
a = check_1d(a, "alpha")
if any(map(lambda d: d <= 0, a)):
raise Exception('Alpha cannot be 0 or negative')
return a
def check_1d(x, name):
"""
Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]
"""
x = asarray(x)
if size(x) == 1:
x = asarray([x])
if x.ndim == 2:
raise Exception("Property: %s must be one-dimensional" % name)
x = x.flatten()
return x
def check_spec(spec):
try:
import altair
if type(spec) == altair.api.Viz:
spec = spec.to_dict()
except ImportError:
pass
if type(spec) == str:
import ast
spec = ast.literal_eval(spec)
return spec
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
if x.ndim > 1 or y.ndim > 1:
raise Exception('x and y vectors must be one-dimensional')
if size(x) != size(y):
raise Exception('x and y vectors must be the same length')
points = vstack([x, y]).T
return points
def vecs_to_points_three(x, y, z):
x = asarray(x)
y = asarray(y)
z = asarray(z)
if x.ndim > 1 or y.ndim > 1 or z.ndim > 1:
raise Exception('x, y, and z vectors must be one-dimensional')
if (size(x) != size(y)) or (size(x) != size(z)) or (size(y) != size(z)):
raise Exception('x, y, and z vectors must be the same length')
points = vstack([x, y, z]).T
return points
def mat_to_array(mat):
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
return mat
def mat_to_links(mat):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
return links
def parse_nodes(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
nodes = list(range(0, len(data)))
else:
nodes = list(range(0, int(max(max(data[:, 0]), max(data[:, 1])) + 1)))
return nodes
def parse_links(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
links = mat_to_links(data)
else:
if len(data[0]) == 2:
links = concatenate((data, ones((len(data), 1))), axis=1)
elif len(data[0]) == 3:
links = data
else:
raise ValueError("Too many entries per link, must be 2 or 3, got %g" % len(data[0]))
return links
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
im = asarray(im)
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
def list_to_regions(reg):
if isinstance(reg, str):
return [reg]
if isinstance(reg, list):
checktwo = all(map(lambda x: len(x) == 2, reg))
checkthree = all(map(lambda x: len(x) == 3, reg))
if not (checktwo or checkthree):
raise Exception("All region names must be two letters (for US) or three letters (for world)")
return reg
def polygon_to_mask(coords, dims, z=None):
"""
Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim
"""
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask
def polygon_to_points(coords, z=None):
"""
Given a list of pairs of points which define a polygon,
return a list of points interior to the polygon
"""
bounds = array(coords).astype('int')
bmax = bounds.max(0)
bmin = bounds.min(0)
path = Path(bounds)
grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')
points = where(points)
points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()
if z is not None:
points = map(lambda p: [p[0], p[1], z], points)
return points |
lightning-viz/lightning-python | lightning/types/utils.py | check_colormap | python | def check_colormap(cmap):
names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',
'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])
if cmap not in names:
raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names))
else:
return cmap | Check if cmap is one of the colorbrewer maps | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/utils.py#L77-L88 | null | from numpy import asarray, array, ndarray, vstack, newaxis, nonzero, concatenate, \
transpose, atleast_2d, size, isscalar, meshgrid, where, zeros, ones
from matplotlib.path import Path
import ast
def add_property(d, prop, name, **kwargs):
if prop is not None:
p = check_property(prop, name, **kwargs)
d[name] = p
return d
def check_property(prop, name, **kwargs):
"""
Check and parse a property with either a specific checking function
or a generic parser
"""
checkers = {
'color': check_color,
'alpha': check_alpha,
'size': check_size,
'thickness': check_thickness,
'index': check_index,
'coordinates': check_coordinates,
'colormap': check_colormap,
'bins': check_bins,
'spec': check_spec
}
if name in checkers:
return checkers[name](prop, **kwargs)
elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):
return check_1d(prop, name)
else:
return prop
def check_coordinates(co, xy=None):
"""
Check and parse coordinates as either a single coordinate list [[r,c],[r,c]] or a
list of coordinates for multiple regions [[[r0,c0],[r0,c0]], [[r1,c1],[r1,c1]]]
"""
if isinstance(co, ndarray):
co = co.tolist()
if not (isinstance(co[0][0], list) or isinstance(co[0][0], tuple)):
co = [co]
if xy is not True:
co = map(lambda p: asarray(p)[:, ::-1].tolist(), co)
return co
def check_bins(b):
return b
def check_color(c):
"""
Check and parse color specs as either a single [r,g,b] or a list of
[[r,g,b],[r,g,b]...]
"""
c = asarray(c)
if c.ndim == 1:
c = c.flatten()
c = c[newaxis, :]
if c.shape[1] != 3:
raise Exception("Color must have three values per point")
elif c.ndim == 2:
if c.shape[1] != 3:
raise Exception("Color array must have three values per point")
return c
def check_size(s):
"""
Check and parse size specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "size")
if any(map(lambda d: d <= 0, s)):
raise Exception('Size cannot be 0 or negative')
return s
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s
def check_index(i):
"""
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
"""
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i
def check_alpha(a):
"""
Check and parse alpha specs as either a single [a] or a list of [a,a,a,...]
"""
a = check_1d(a, "alpha")
if any(map(lambda d: d <= 0, a)):
raise Exception('Alpha cannot be 0 or negative')
return a
def check_1d(x, name):
"""
Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]
"""
x = asarray(x)
if size(x) == 1:
x = asarray([x])
if x.ndim == 2:
raise Exception("Property: %s must be one-dimensional" % name)
x = x.flatten()
return x
def check_spec(spec):
try:
import altair
if type(spec) == altair.api.Viz:
spec = spec.to_dict()
except ImportError:
pass
if type(spec) == str:
import ast
spec = ast.literal_eval(spec)
return spec
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
if x.ndim > 1 or y.ndim > 1:
raise Exception('x and y vectors must be one-dimensional')
if size(x) != size(y):
raise Exception('x and y vectors must be the same length')
points = vstack([x, y]).T
return points
def vecs_to_points_three(x, y, z):
x = asarray(x)
y = asarray(y)
z = asarray(z)
if x.ndim > 1 or y.ndim > 1 or z.ndim > 1:
raise Exception('x, y, and z vectors must be one-dimensional')
if (size(x) != size(y)) or (size(x) != size(z)) or (size(y) != size(z)):
raise Exception('x, y, and z vectors must be the same length')
points = vstack([x, y, z]).T
return points
def mat_to_array(mat):
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
return mat
def mat_to_links(mat):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
return links
def parse_nodes(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
nodes = list(range(0, len(data)))
else:
nodes = list(range(0, int(max(max(data[:, 0]), max(data[:, 1])) + 1)))
return nodes
def parse_links(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
links = mat_to_links(data)
else:
if len(data[0]) == 2:
links = concatenate((data, ones((len(data), 1))), axis=1)
elif len(data[0]) == 3:
links = data
else:
raise ValueError("Too many entries per link, must be 2 or 3, got %g" % len(data[0]))
return links
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
im = asarray(im)
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
def list_to_regions(reg):
if isinstance(reg, str):
return [reg]
if isinstance(reg, list):
checktwo = all(map(lambda x: len(x) == 2, reg))
checkthree = all(map(lambda x: len(x) == 3, reg))
if not (checktwo or checkthree):
raise Exception("All region names must be two letters (for US) or three letters (for world)")
return reg
def polygon_to_mask(coords, dims, z=None):
"""
Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim
"""
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask
def polygon_to_points(coords, z=None):
"""
Given a list of pairs of points which define a polygon,
return a list of points interior to the polygon
"""
bounds = array(coords).astype('int')
bmax = bounds.max(0)
bmin = bounds.min(0)
path = Path(bounds)
grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')
points = where(points)
points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()
if z is not None:
points = map(lambda p: [p[0], p[1], z], points)
return points |
lightning-viz/lightning-python | lightning/types/utils.py | check_size | python | def check_size(s):
s = check_1d(s, "size")
if any(map(lambda d: d <= 0, s)):
raise Exception('Size cannot be 0 or negative')
return s | Check and parse size specs as either a single [s] or a list of [s,s,s,...] | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/utils.py#L91-L100 | [
"def check_1d(x, name):\n \"\"\"\n Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]\n \"\"\"\n\n x = asarray(x)\n if size(x) == 1:\n x = asarray([x])\n if x.ndim == 2:\n raise Exception(\"Property: %s must be one-dimensional\" % name)\n x = x.f... | from numpy import asarray, array, ndarray, vstack, newaxis, nonzero, concatenate, \
transpose, atleast_2d, size, isscalar, meshgrid, where, zeros, ones
from matplotlib.path import Path
import ast
def add_property(d, prop, name, **kwargs):
if prop is not None:
p = check_property(prop, name, **kwargs)
d[name] = p
return d
def check_property(prop, name, **kwargs):
"""
Check and parse a property with either a specific checking function
or a generic parser
"""
checkers = {
'color': check_color,
'alpha': check_alpha,
'size': check_size,
'thickness': check_thickness,
'index': check_index,
'coordinates': check_coordinates,
'colormap': check_colormap,
'bins': check_bins,
'spec': check_spec
}
if name in checkers:
return checkers[name](prop, **kwargs)
elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):
return check_1d(prop, name)
else:
return prop
def check_coordinates(co, xy=None):
"""
Check and parse coordinates as either a single coordinate list [[r,c],[r,c]] or a
list of coordinates for multiple regions [[[r0,c0],[r0,c0]], [[r1,c1],[r1,c1]]]
"""
if isinstance(co, ndarray):
co = co.tolist()
if not (isinstance(co[0][0], list) or isinstance(co[0][0], tuple)):
co = [co]
if xy is not True:
co = map(lambda p: asarray(p)[:, ::-1].tolist(), co)
return co
def check_bins(b):
return b
def check_color(c):
"""
Check and parse color specs as either a single [r,g,b] or a list of
[[r,g,b],[r,g,b]...]
"""
c = asarray(c)
if c.ndim == 1:
c = c.flatten()
c = c[newaxis, :]
if c.shape[1] != 3:
raise Exception("Color must have three values per point")
elif c.ndim == 2:
if c.shape[1] != 3:
raise Exception("Color array must have three values per point")
return c
def check_colormap(cmap):
"""
Check if cmap is one of the colorbrewer maps
"""
names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',
'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])
if cmap not in names:
raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names))
else:
return cmap
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s
def check_index(i):
"""
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
"""
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i
def check_alpha(a):
"""
Check and parse alpha specs as either a single [a] or a list of [a,a,a,...]
"""
a = check_1d(a, "alpha")
if any(map(lambda d: d <= 0, a)):
raise Exception('Alpha cannot be 0 or negative')
return a
def check_1d(x, name):
"""
Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]
"""
x = asarray(x)
if size(x) == 1:
x = asarray([x])
if x.ndim == 2:
raise Exception("Property: %s must be one-dimensional" % name)
x = x.flatten()
return x
def check_spec(spec):
try:
import altair
if type(spec) == altair.api.Viz:
spec = spec.to_dict()
except ImportError:
pass
if type(spec) == str:
import ast
spec = ast.literal_eval(spec)
return spec
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
if x.ndim > 1 or y.ndim > 1:
raise Exception('x and y vectors must be one-dimensional')
if size(x) != size(y):
raise Exception('x and y vectors must be the same length')
points = vstack([x, y]).T
return points
def vecs_to_points_three(x, y, z):
x = asarray(x)
y = asarray(y)
z = asarray(z)
if x.ndim > 1 or y.ndim > 1 or z.ndim > 1:
raise Exception('x, y, and z vectors must be one-dimensional')
if (size(x) != size(y)) or (size(x) != size(z)) or (size(y) != size(z)):
raise Exception('x, y, and z vectors must be the same length')
points = vstack([x, y, z]).T
return points
def mat_to_array(mat):
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
return mat
def mat_to_links(mat):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
return links
def parse_nodes(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
nodes = list(range(0, len(data)))
else:
nodes = list(range(0, int(max(max(data[:, 0]), max(data[:, 1])) + 1)))
return nodes
def parse_links(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
links = mat_to_links(data)
else:
if len(data[0]) == 2:
links = concatenate((data, ones((len(data), 1))), axis=1)
elif len(data[0]) == 3:
links = data
else:
raise ValueError("Too many entries per link, must be 2 or 3, got %g" % len(data[0]))
return links
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
im = asarray(im)
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
def list_to_regions(reg):
if isinstance(reg, str):
return [reg]
if isinstance(reg, list):
checktwo = all(map(lambda x: len(x) == 2, reg))
checkthree = all(map(lambda x: len(x) == 3, reg))
if not (checktwo or checkthree):
raise Exception("All region names must be two letters (for US) or three letters (for world)")
return reg
def polygon_to_mask(coords, dims, z=None):
"""
Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim
"""
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask
def polygon_to_points(coords, z=None):
"""
Given a list of pairs of points which define a polygon,
return a list of points interior to the polygon
"""
bounds = array(coords).astype('int')
bmax = bounds.max(0)
bmin = bounds.min(0)
path = Path(bounds)
grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')
points = where(points)
points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()
if z is not None:
points = map(lambda p: [p[0], p[1], z], points)
return points |
lightning-viz/lightning-python | lightning/types/utils.py | check_thickness | python | def check_thickness(s):
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s | Check and parse thickness specs as either a single [s] or a list of [s,s,s,...] | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/utils.py#L102-L111 | [
"def check_1d(x, name):\n \"\"\"\n Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]\n \"\"\"\n\n x = asarray(x)\n if size(x) == 1:\n x = asarray([x])\n if x.ndim == 2:\n raise Exception(\"Property: %s must be one-dimensional\" % name)\n x = x.f... | from numpy import asarray, array, ndarray, vstack, newaxis, nonzero, concatenate, \
transpose, atleast_2d, size, isscalar, meshgrid, where, zeros, ones
from matplotlib.path import Path
import ast
def add_property(d, prop, name, **kwargs):
if prop is not None:
p = check_property(prop, name, **kwargs)
d[name] = p
return d
def check_property(prop, name, **kwargs):
"""
Check and parse a property with either a specific checking function
or a generic parser
"""
checkers = {
'color': check_color,
'alpha': check_alpha,
'size': check_size,
'thickness': check_thickness,
'index': check_index,
'coordinates': check_coordinates,
'colormap': check_colormap,
'bins': check_bins,
'spec': check_spec
}
if name in checkers:
return checkers[name](prop, **kwargs)
elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):
return check_1d(prop, name)
else:
return prop
def check_coordinates(co, xy=None):
"""
Check and parse coordinates as either a single coordinate list [[r,c],[r,c]] or a
list of coordinates for multiple regions [[[r0,c0],[r0,c0]], [[r1,c1],[r1,c1]]]
"""
if isinstance(co, ndarray):
co = co.tolist()
if not (isinstance(co[0][0], list) or isinstance(co[0][0], tuple)):
co = [co]
if xy is not True:
co = map(lambda p: asarray(p)[:, ::-1].tolist(), co)
return co
def check_bins(b):
return b
def check_color(c):
"""
Check and parse color specs as either a single [r,g,b] or a list of
[[r,g,b],[r,g,b]...]
"""
c = asarray(c)
if c.ndim == 1:
c = c.flatten()
c = c[newaxis, :]
if c.shape[1] != 3:
raise Exception("Color must have three values per point")
elif c.ndim == 2:
if c.shape[1] != 3:
raise Exception("Color array must have three values per point")
return c
def check_colormap(cmap):
"""
Check if cmap is one of the colorbrewer maps
"""
names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',
'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])
if cmap not in names:
raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names))
else:
return cmap
def check_size(s):
"""
Check and parse size specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "size")
if any(map(lambda d: d <= 0, s)):
raise Exception('Size cannot be 0 or negative')
return s
def check_index(i):
"""
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
"""
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i
def check_alpha(a):
"""
Check and parse alpha specs as either a single [a] or a list of [a,a,a,...]
"""
a = check_1d(a, "alpha")
if any(map(lambda d: d <= 0, a)):
raise Exception('Alpha cannot be 0 or negative')
return a
def check_1d(x, name):
"""
Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]
"""
x = asarray(x)
if size(x) == 1:
x = asarray([x])
if x.ndim == 2:
raise Exception("Property: %s must be one-dimensional" % name)
x = x.flatten()
return x
def check_spec(spec):
try:
import altair
if type(spec) == altair.api.Viz:
spec = spec.to_dict()
except ImportError:
pass
if type(spec) == str:
import ast
spec = ast.literal_eval(spec)
return spec
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
if x.ndim > 1 or y.ndim > 1:
raise Exception('x and y vectors must be one-dimensional')
if size(x) != size(y):
raise Exception('x and y vectors must be the same length')
points = vstack([x, y]).T
return points
def vecs_to_points_three(x, y, z):
x = asarray(x)
y = asarray(y)
z = asarray(z)
if x.ndim > 1 or y.ndim > 1 or z.ndim > 1:
raise Exception('x, y, and z vectors must be one-dimensional')
if (size(x) != size(y)) or (size(x) != size(z)) or (size(y) != size(z)):
raise Exception('x, y, and z vectors must be the same length')
points = vstack([x, y, z]).T
return points
def mat_to_array(mat):
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
return mat
def mat_to_links(mat):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
return links
def parse_nodes(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
nodes = list(range(0, len(data)))
else:
nodes = list(range(0, int(max(max(data[:, 0]), max(data[:, 1])) + 1)))
return nodes
def parse_links(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
links = mat_to_links(data)
else:
if len(data[0]) == 2:
links = concatenate((data, ones((len(data), 1))), axis=1)
elif len(data[0]) == 3:
links = data
else:
raise ValueError("Too many entries per link, must be 2 or 3, got %g" % len(data[0]))
return links
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
im = asarray(im)
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
def list_to_regions(reg):
if isinstance(reg, str):
return [reg]
if isinstance(reg, list):
checktwo = all(map(lambda x: len(x) == 2, reg))
checkthree = all(map(lambda x: len(x) == 3, reg))
if not (checktwo or checkthree):
raise Exception("All region names must be two letters (for US) or three letters (for world)")
return reg
def polygon_to_mask(coords, dims, z=None):
"""
Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim
"""
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask
def polygon_to_points(coords, z=None):
"""
Given a list of pairs of points which define a polygon,
return a list of points interior to the polygon
"""
bounds = array(coords).astype('int')
bmax = bounds.max(0)
bmin = bounds.min(0)
path = Path(bounds)
grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')
points = where(points)
points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()
if z is not None:
points = map(lambda p: [p[0], p[1], z], points)
return points |
lightning-viz/lightning-python | lightning/types/utils.py | check_index | python | def check_index(i):
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i | Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...] | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/utils.py#L113-L122 | null | from numpy import asarray, array, ndarray, vstack, newaxis, nonzero, concatenate, \
transpose, atleast_2d, size, isscalar, meshgrid, where, zeros, ones
from matplotlib.path import Path
import ast
def add_property(d, prop, name, **kwargs):
if prop is not None:
p = check_property(prop, name, **kwargs)
d[name] = p
return d
def check_property(prop, name, **kwargs):
"""
Check and parse a property with either a specific checking function
or a generic parser
"""
checkers = {
'color': check_color,
'alpha': check_alpha,
'size': check_size,
'thickness': check_thickness,
'index': check_index,
'coordinates': check_coordinates,
'colormap': check_colormap,
'bins': check_bins,
'spec': check_spec
}
if name in checkers:
return checkers[name](prop, **kwargs)
elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):
return check_1d(prop, name)
else:
return prop
def check_coordinates(co, xy=None):
"""
Check and parse coordinates as either a single coordinate list [[r,c],[r,c]] or a
list of coordinates for multiple regions [[[r0,c0],[r0,c0]], [[r1,c1],[r1,c1]]]
"""
if isinstance(co, ndarray):
co = co.tolist()
if not (isinstance(co[0][0], list) or isinstance(co[0][0], tuple)):
co = [co]
if xy is not True:
co = map(lambda p: asarray(p)[:, ::-1].tolist(), co)
return co
def check_bins(b):
return b
def check_color(c):
"""
Check and parse color specs as either a single [r,g,b] or a list of
[[r,g,b],[r,g,b]...]
"""
c = asarray(c)
if c.ndim == 1:
c = c.flatten()
c = c[newaxis, :]
if c.shape[1] != 3:
raise Exception("Color must have three values per point")
elif c.ndim == 2:
if c.shape[1] != 3:
raise Exception("Color array must have three values per point")
return c
def check_colormap(cmap):
"""
Check if cmap is one of the colorbrewer maps
"""
names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',
'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])
if cmap not in names:
raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names))
else:
return cmap
def check_size(s):
"""
Check and parse size specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "size")
if any(map(lambda d: d <= 0, s)):
raise Exception('Size cannot be 0 or negative')
return s
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s
def check_alpha(a):
"""
Check and parse alpha specs as either a single [a] or a list of [a,a,a,...]
"""
a = check_1d(a, "alpha")
if any(map(lambda d: d <= 0, a)):
raise Exception('Alpha cannot be 0 or negative')
return a
def check_1d(x, name):
"""
Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]
"""
x = asarray(x)
if size(x) == 1:
x = asarray([x])
if x.ndim == 2:
raise Exception("Property: %s must be one-dimensional" % name)
x = x.flatten()
return x
def check_spec(spec):
try:
import altair
if type(spec) == altair.api.Viz:
spec = spec.to_dict()
except ImportError:
pass
if type(spec) == str:
import ast
spec = ast.literal_eval(spec)
return spec
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
if x.ndim > 1 or y.ndim > 1:
raise Exception('x and y vectors must be one-dimensional')
if size(x) != size(y):
raise Exception('x and y vectors must be the same length')
points = vstack([x, y]).T
return points
def vecs_to_points_three(x, y, z):
x = asarray(x)
y = asarray(y)
z = asarray(z)
if x.ndim > 1 or y.ndim > 1 or z.ndim > 1:
raise Exception('x, y, and z vectors must be one-dimensional')
if (size(x) != size(y)) or (size(x) != size(z)) or (size(y) != size(z)):
raise Exception('x, y, and z vectors must be the same length')
points = vstack([x, y, z]).T
return points
def mat_to_array(mat):
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
return mat
def mat_to_links(mat):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
return links
def parse_nodes(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
nodes = list(range(0, len(data)))
else:
nodes = list(range(0, int(max(max(data[:, 0]), max(data[:, 1])) + 1)))
return nodes
def parse_links(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
links = mat_to_links(data)
else:
if len(data[0]) == 2:
links = concatenate((data, ones((len(data), 1))), axis=1)
elif len(data[0]) == 3:
links = data
else:
raise ValueError("Too many entries per link, must be 2 or 3, got %g" % len(data[0]))
return links
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
im = asarray(im)
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
def list_to_regions(reg):
if isinstance(reg, str):
return [reg]
if isinstance(reg, list):
checktwo = all(map(lambda x: len(x) == 2, reg))
checkthree = all(map(lambda x: len(x) == 3, reg))
if not (checktwo or checkthree):
raise Exception("All region names must be two letters (for US) or three letters (for world)")
return reg
def polygon_to_mask(coords, dims, z=None):
"""
Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim
"""
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask
def polygon_to_points(coords, z=None):
"""
Given a list of pairs of points which define a polygon,
return a list of points interior to the polygon
"""
bounds = array(coords).astype('int')
bmax = bounds.max(0)
bmin = bounds.min(0)
path = Path(bounds)
grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')
points = where(points)
points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()
if z is not None:
points = map(lambda p: [p[0], p[1], z], points)
return points |
lightning-viz/lightning-python | lightning/types/utils.py | check_alpha | python | def check_alpha(a):
a = check_1d(a, "alpha")
if any(map(lambda d: d <= 0, a)):
raise Exception('Alpha cannot be 0 or negative')
return a | Check and parse alpha specs as either a single [a] or a list of [a,a,a,...] | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/utils.py#L125-L134 | [
"def check_1d(x, name):\n \"\"\"\n Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]\n \"\"\"\n\n x = asarray(x)\n if size(x) == 1:\n x = asarray([x])\n if x.ndim == 2:\n raise Exception(\"Property: %s must be one-dimensional\" % name)\n x = x.f... | from numpy import asarray, array, ndarray, vstack, newaxis, nonzero, concatenate, \
transpose, atleast_2d, size, isscalar, meshgrid, where, zeros, ones
from matplotlib.path import Path
import ast
def add_property(d, prop, name, **kwargs):
if prop is not None:
p = check_property(prop, name, **kwargs)
d[name] = p
return d
def check_property(prop, name, **kwargs):
"""
Check and parse a property with either a specific checking function
or a generic parser
"""
checkers = {
'color': check_color,
'alpha': check_alpha,
'size': check_size,
'thickness': check_thickness,
'index': check_index,
'coordinates': check_coordinates,
'colormap': check_colormap,
'bins': check_bins,
'spec': check_spec
}
if name in checkers:
return checkers[name](prop, **kwargs)
elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):
return check_1d(prop, name)
else:
return prop
def check_coordinates(co, xy=None):
"""
Check and parse coordinates as either a single coordinate list [[r,c],[r,c]] or a
list of coordinates for multiple regions [[[r0,c0],[r0,c0]], [[r1,c1],[r1,c1]]]
"""
if isinstance(co, ndarray):
co = co.tolist()
if not (isinstance(co[0][0], list) or isinstance(co[0][0], tuple)):
co = [co]
if xy is not True:
co = map(lambda p: asarray(p)[:, ::-1].tolist(), co)
return co
def check_bins(b):
return b
def check_color(c):
"""
Check and parse color specs as either a single [r,g,b] or a list of
[[r,g,b],[r,g,b]...]
"""
c = asarray(c)
if c.ndim == 1:
c = c.flatten()
c = c[newaxis, :]
if c.shape[1] != 3:
raise Exception("Color must have three values per point")
elif c.ndim == 2:
if c.shape[1] != 3:
raise Exception("Color array must have three values per point")
return c
def check_colormap(cmap):
"""
Check if cmap is one of the colorbrewer maps
"""
names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',
'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])
if cmap not in names:
raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names))
else:
return cmap
def check_size(s):
"""
Check and parse size specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "size")
if any(map(lambda d: d <= 0, s)):
raise Exception('Size cannot be 0 or negative')
return s
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s
def check_index(i):
"""
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
"""
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i
def check_1d(x, name):
"""
Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]
"""
x = asarray(x)
if size(x) == 1:
x = asarray([x])
if x.ndim == 2:
raise Exception("Property: %s must be one-dimensional" % name)
x = x.flatten()
return x
def check_spec(spec):
try:
import altair
if type(spec) == altair.api.Viz:
spec = spec.to_dict()
except ImportError:
pass
if type(spec) == str:
import ast
spec = ast.literal_eval(spec)
return spec
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
if x.ndim > 1 or y.ndim > 1:
raise Exception('x and y vectors must be one-dimensional')
if size(x) != size(y):
raise Exception('x and y vectors must be the same length')
points = vstack([x, y]).T
return points
def vecs_to_points_three(x, y, z):
x = asarray(x)
y = asarray(y)
z = asarray(z)
if x.ndim > 1 or y.ndim > 1 or z.ndim > 1:
raise Exception('x, y, and z vectors must be one-dimensional')
if (size(x) != size(y)) or (size(x) != size(z)) or (size(y) != size(z)):
raise Exception('x, y, and z vectors must be the same length')
points = vstack([x, y, z]).T
return points
def mat_to_array(mat):
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
return mat
def mat_to_links(mat):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
return links
def parse_nodes(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
nodes = list(range(0, len(data)))
else:
nodes = list(range(0, int(max(max(data[:, 0]), max(data[:, 1])) + 1)))
return nodes
def parse_links(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
links = mat_to_links(data)
else:
if len(data[0]) == 2:
links = concatenate((data, ones((len(data), 1))), axis=1)
elif len(data[0]) == 3:
links = data
else:
raise ValueError("Too many entries per link, must be 2 or 3, got %g" % len(data[0]))
return links
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
im = asarray(im)
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
def list_to_regions(reg):
if isinstance(reg, str):
return [reg]
if isinstance(reg, list):
checktwo = all(map(lambda x: len(x) == 2, reg))
checkthree = all(map(lambda x: len(x) == 3, reg))
if not (checktwo or checkthree):
raise Exception("All region names must be two letters (for US) or three letters (for world)")
return reg
def polygon_to_mask(coords, dims, z=None):
"""
Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim
"""
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask
def polygon_to_points(coords, z=None):
"""
Given a list of pairs of points which define a polygon,
return a list of points interior to the polygon
"""
bounds = array(coords).astype('int')
bmax = bounds.max(0)
bmin = bounds.min(0)
path = Path(bounds)
grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')
points = where(points)
points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()
if z is not None:
points = map(lambda p: [p[0], p[1], z], points)
return points |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.