repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
hyde/fswrap | fswrap.py | Folder.child | python | def child(self, fragment):
return os.path.join(self.path, FS(fragment).path) | Returns a path of a child item represented by `fragment`. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L543-L547 | null | class Folder(FS):
"""
Represents a directory.
"""
def __init__(self, path):
super(Folder, self).__init__(path)
def child_folder(self, fragment):
"""
Returns a folder object by combining the fragment to this folder's path
"""
return Folder(os.path.join(self.path, Folder(fragment).path))
def child_file(self, fragment):
"""
Returns a `File` object representing the `fragment`.
"""
return File(self.child(fragment))
def make(self):
"""
Creates this directory and any of the missing directories in the path.
Any errors that may occur are eaten.
"""
try:
if not self.exists:
logger.info("Creating %s" % self.path)
os.makedirs(self.path)
except os.error:
pass
return self
def zip(self, target=None, basepath=None):
"""
Zips the contents of this folder. If `target` is not provided,
<name>.zip is used instead. `basepath` is used to specify the
base path for files in the archive. The path stored along with
the files in the archive will be relative to the `basepath`.
"""
target = self.parent.child(target or self.name + '.zip')
basepath = basepath or self.path
from zipfile import ZipFile
with ZipFile(target, 'w') as zip:
with self.walker as walker:
@walker.file_visitor
def add_file(f):
zip.write(f.path, f.get_relative_path(basepath))
def delete(self):
"""
Deletes the directory if it exists.
"""
if self.exists:
logger.info("Deleting %s" % self.path)
shutil.rmtree(self.path)
def copy_to(self, destination):
"""
Copies this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copytree(self.path, unicode(target))
return target
def move_to(self, destination):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Move %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def rename_to(self, destination_name):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.parent.child_folder(destination_name)
logger.info("Rename %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def _create_target_tree(self, target):
"""
There is a bug in dir_util that makes `copy_tree` crash if a folder in
the tree has been deleted before and readded now. To workaround the
bug, we first walk the tree and create directories that are needed.
"""
source = self
with source.walker as walker:
@walker.folder_visitor
def visit_folder(folder):
"""
Create the mirror directory
"""
if folder != source:
Folder(folder.get_mirror(target, source)).make()
def copy_contents_to(self, destination):
"""
Copies the contents of this directory to the given destination.
Returns a Folder object that represents the moved directory.
"""
logger.info("Copying contents of %s to %s" % (self, destination))
target = Folder(destination)
target.make()
self._create_target_tree(target)
dir_util.copy_tree(self.path, unicode(target))
return target
def get_walker(self, pattern=None):
"""
Return a `FolderWalker` object with a set pattern.
"""
return FolderWalker(self, pattern)
@property
def walker(self):
"""
Return a `FolderWalker` object
"""
return FolderWalker(self)
def get_lister(self, pattern=None):
"""
Return a `FolderLister` object with a set pattern.
"""
return FolderLister(self, pattern)
@property
def lister(self):
"""
Return a `FolderLister` object
"""
return FolderLister(self)
|
hyde/fswrap | fswrap.py | Folder.make | python | def make(self):
try:
if not self.exists:
logger.info("Creating %s" % self.path)
os.makedirs(self.path)
except os.error:
pass
return self | Creates this directory and any of the missing directories in the path.
Any errors that may occur are eaten. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L549-L560 | null | class Folder(FS):
"""
Represents a directory.
"""
def __init__(self, path):
super(Folder, self).__init__(path)
def child_folder(self, fragment):
"""
Returns a folder object by combining the fragment to this folder's path
"""
return Folder(os.path.join(self.path, Folder(fragment).path))
def child_file(self, fragment):
"""
Returns a `File` object representing the `fragment`.
"""
return File(self.child(fragment))
def child(self, fragment):
"""
Returns a path of a child item represented by `fragment`.
"""
return os.path.join(self.path, FS(fragment).path)
def zip(self, target=None, basepath=None):
"""
Zips the contents of this folder. If `target` is not provided,
<name>.zip is used instead. `basepath` is used to specify the
base path for files in the archive. The path stored along with
the files in the archive will be relative to the `basepath`.
"""
target = self.parent.child(target or self.name + '.zip')
basepath = basepath or self.path
from zipfile import ZipFile
with ZipFile(target, 'w') as zip:
with self.walker as walker:
@walker.file_visitor
def add_file(f):
zip.write(f.path, f.get_relative_path(basepath))
def delete(self):
"""
Deletes the directory if it exists.
"""
if self.exists:
logger.info("Deleting %s" % self.path)
shutil.rmtree(self.path)
def copy_to(self, destination):
"""
Copies this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copytree(self.path, unicode(target))
return target
def move_to(self, destination):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Move %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def rename_to(self, destination_name):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.parent.child_folder(destination_name)
logger.info("Rename %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def _create_target_tree(self, target):
"""
There is a bug in dir_util that makes `copy_tree` crash if a folder in
the tree has been deleted before and readded now. To workaround the
bug, we first walk the tree and create directories that are needed.
"""
source = self
with source.walker as walker:
@walker.folder_visitor
def visit_folder(folder):
"""
Create the mirror directory
"""
if folder != source:
Folder(folder.get_mirror(target, source)).make()
def copy_contents_to(self, destination):
"""
Copies the contents of this directory to the given destination.
Returns a Folder object that represents the moved directory.
"""
logger.info("Copying contents of %s to %s" % (self, destination))
target = Folder(destination)
target.make()
self._create_target_tree(target)
dir_util.copy_tree(self.path, unicode(target))
return target
def get_walker(self, pattern=None):
"""
Return a `FolderWalker` object with a set pattern.
"""
return FolderWalker(self, pattern)
@property
def walker(self):
"""
Return a `FolderWalker` object
"""
return FolderWalker(self)
def get_lister(self, pattern=None):
"""
Return a `FolderLister` object with a set pattern.
"""
return FolderLister(self, pattern)
@property
def lister(self):
"""
Return a `FolderLister` object
"""
return FolderLister(self)
|
hyde/fswrap | fswrap.py | Folder.zip | python | def zip(self, target=None, basepath=None):
target = self.parent.child(target or self.name + '.zip')
basepath = basepath or self.path
from zipfile import ZipFile
with ZipFile(target, 'w') as zip:
with self.walker as walker:
@walker.file_visitor
def add_file(f):
zip.write(f.path, f.get_relative_path(basepath)) | Zips the contents of this folder. If `target` is not provided,
<name>.zip is used instead. `basepath` is used to specify the
base path for files in the archive. The path stored along with
the files in the archive will be relative to the `basepath`. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L562-L576 | null | class Folder(FS):
"""
Represents a directory.
"""
def __init__(self, path):
super(Folder, self).__init__(path)
def child_folder(self, fragment):
"""
Returns a folder object by combining the fragment to this folder's path
"""
return Folder(os.path.join(self.path, Folder(fragment).path))
def child_file(self, fragment):
"""
Returns a `File` object representing the `fragment`.
"""
return File(self.child(fragment))
def child(self, fragment):
"""
Returns a path of a child item represented by `fragment`.
"""
return os.path.join(self.path, FS(fragment).path)
def make(self):
"""
Creates this directory and any of the missing directories in the path.
Any errors that may occur are eaten.
"""
try:
if not self.exists:
logger.info("Creating %s" % self.path)
os.makedirs(self.path)
except os.error:
pass
return self
def delete(self):
"""
Deletes the directory if it exists.
"""
if self.exists:
logger.info("Deleting %s" % self.path)
shutil.rmtree(self.path)
def copy_to(self, destination):
"""
Copies this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copytree(self.path, unicode(target))
return target
def move_to(self, destination):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Move %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def rename_to(self, destination_name):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.parent.child_folder(destination_name)
logger.info("Rename %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def _create_target_tree(self, target):
"""
There is a bug in dir_util that makes `copy_tree` crash if a folder in
the tree has been deleted before and readded now. To workaround the
bug, we first walk the tree and create directories that are needed.
"""
source = self
with source.walker as walker:
@walker.folder_visitor
def visit_folder(folder):
"""
Create the mirror directory
"""
if folder != source:
Folder(folder.get_mirror(target, source)).make()
def copy_contents_to(self, destination):
"""
Copies the contents of this directory to the given destination.
Returns a Folder object that represents the moved directory.
"""
logger.info("Copying contents of %s to %s" % (self, destination))
target = Folder(destination)
target.make()
self._create_target_tree(target)
dir_util.copy_tree(self.path, unicode(target))
return target
def get_walker(self, pattern=None):
"""
Return a `FolderWalker` object with a set pattern.
"""
return FolderWalker(self, pattern)
@property
def walker(self):
"""
Return a `FolderWalker` object
"""
return FolderWalker(self)
def get_lister(self, pattern=None):
"""
Return a `FolderLister` object with a set pattern.
"""
return FolderLister(self, pattern)
@property
def lister(self):
"""
Return a `FolderLister` object
"""
return FolderLister(self)
|
hyde/fswrap | fswrap.py | Folder.delete | python | def delete(self):
if self.exists:
logger.info("Deleting %s" % self.path)
shutil.rmtree(self.path) | Deletes the directory if it exists. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L578-L584 | null | class Folder(FS):
"""
Represents a directory.
"""
def __init__(self, path):
super(Folder, self).__init__(path)
def child_folder(self, fragment):
"""
Returns a folder object by combining the fragment to this folder's path
"""
return Folder(os.path.join(self.path, Folder(fragment).path))
def child_file(self, fragment):
"""
Returns a `File` object representing the `fragment`.
"""
return File(self.child(fragment))
def child(self, fragment):
"""
Returns a path of a child item represented by `fragment`.
"""
return os.path.join(self.path, FS(fragment).path)
def make(self):
"""
Creates this directory and any of the missing directories in the path.
Any errors that may occur are eaten.
"""
try:
if not self.exists:
logger.info("Creating %s" % self.path)
os.makedirs(self.path)
except os.error:
pass
return self
def zip(self, target=None, basepath=None):
"""
Zips the contents of this folder. If `target` is not provided,
<name>.zip is used instead. `basepath` is used to specify the
base path for files in the archive. The path stored along with
the files in the archive will be relative to the `basepath`.
"""
target = self.parent.child(target or self.name + '.zip')
basepath = basepath or self.path
from zipfile import ZipFile
with ZipFile(target, 'w') as zip:
with self.walker as walker:
@walker.file_visitor
def add_file(f):
zip.write(f.path, f.get_relative_path(basepath))
def copy_to(self, destination):
"""
Copies this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copytree(self.path, unicode(target))
return target
def move_to(self, destination):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Move %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def rename_to(self, destination_name):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.parent.child_folder(destination_name)
logger.info("Rename %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def _create_target_tree(self, target):
"""
There is a bug in dir_util that makes `copy_tree` crash if a folder in
the tree has been deleted before and readded now. To workaround the
bug, we first walk the tree and create directories that are needed.
"""
source = self
with source.walker as walker:
@walker.folder_visitor
def visit_folder(folder):
"""
Create the mirror directory
"""
if folder != source:
Folder(folder.get_mirror(target, source)).make()
def copy_contents_to(self, destination):
"""
Copies the contents of this directory to the given destination.
Returns a Folder object that represents the moved directory.
"""
logger.info("Copying contents of %s to %s" % (self, destination))
target = Folder(destination)
target.make()
self._create_target_tree(target)
dir_util.copy_tree(self.path, unicode(target))
return target
def get_walker(self, pattern=None):
"""
Return a `FolderWalker` object with a set pattern.
"""
return FolderWalker(self, pattern)
@property
def walker(self):
"""
Return a `FolderWalker` object
"""
return FolderWalker(self)
def get_lister(self, pattern=None):
"""
Return a `FolderLister` object with a set pattern.
"""
return FolderLister(self, pattern)
@property
def lister(self):
"""
Return a `FolderLister` object
"""
return FolderLister(self)
|
hyde/fswrap | fswrap.py | Folder.move_to | python | def move_to(self, destination):
target = self.__get_destination__(destination)
logger.info("Move %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target | Moves this directory to the given destination. Returns a Folder object
that represents the moved directory. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L596-L604 | [
"def __get_destination__(self, destination):\n \"\"\"\n Returns a File or Folder object that would represent this entity\n if it were copied or moved to `destination`.\n \"\"\"\n if isinstance(destination,\n File) or os.path.isfile(unicode(destination)):\n return destination\n... | class Folder(FS):
"""
Represents a directory.
"""
def __init__(self, path):
super(Folder, self).__init__(path)
def child_folder(self, fragment):
"""
Returns a folder object by combining the fragment to this folder's path
"""
return Folder(os.path.join(self.path, Folder(fragment).path))
def child_file(self, fragment):
"""
Returns a `File` object representing the `fragment`.
"""
return File(self.child(fragment))
def child(self, fragment):
"""
Returns a path of a child item represented by `fragment`.
"""
return os.path.join(self.path, FS(fragment).path)
def make(self):
"""
Creates this directory and any of the missing directories in the path.
Any errors that may occur are eaten.
"""
try:
if not self.exists:
logger.info("Creating %s" % self.path)
os.makedirs(self.path)
except os.error:
pass
return self
def zip(self, target=None, basepath=None):
"""
Zips the contents of this folder. If `target` is not provided,
<name>.zip is used instead. `basepath` is used to specify the
base path for files in the archive. The path stored along with
the files in the archive will be relative to the `basepath`.
"""
target = self.parent.child(target or self.name + '.zip')
basepath = basepath or self.path
from zipfile import ZipFile
with ZipFile(target, 'w') as zip:
with self.walker as walker:
@walker.file_visitor
def add_file(f):
zip.write(f.path, f.get_relative_path(basepath))
def delete(self):
"""
Deletes the directory if it exists.
"""
if self.exists:
logger.info("Deleting %s" % self.path)
shutil.rmtree(self.path)
def copy_to(self, destination):
"""
Copies this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copytree(self.path, unicode(target))
return target
def rename_to(self, destination_name):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.parent.child_folder(destination_name)
logger.info("Rename %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def _create_target_tree(self, target):
"""
There is a bug in dir_util that makes `copy_tree` crash if a folder in
the tree has been deleted before and readded now. To workaround the
bug, we first walk the tree and create directories that are needed.
"""
source = self
with source.walker as walker:
@walker.folder_visitor
def visit_folder(folder):
"""
Create the mirror directory
"""
if folder != source:
Folder(folder.get_mirror(target, source)).make()
def copy_contents_to(self, destination):
"""
Copies the contents of this directory to the given destination.
Returns a Folder object that represents the moved directory.
"""
logger.info("Copying contents of %s to %s" % (self, destination))
target = Folder(destination)
target.make()
self._create_target_tree(target)
dir_util.copy_tree(self.path, unicode(target))
return target
def get_walker(self, pattern=None):
"""
Return a `FolderWalker` object with a set pattern.
"""
return FolderWalker(self, pattern)
@property
def walker(self):
"""
Return a `FolderWalker` object
"""
return FolderWalker(self)
def get_lister(self, pattern=None):
"""
Return a `FolderLister` object with a set pattern.
"""
return FolderLister(self, pattern)
@property
def lister(self):
"""
Return a `FolderLister` object
"""
return FolderLister(self)
|
hyde/fswrap | fswrap.py | Folder.rename_to | python | def rename_to(self, destination_name):
target = self.parent.child_folder(destination_name)
logger.info("Rename %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target | Moves this directory to the given destination. Returns a Folder object
that represents the moved directory. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L606-L614 | null | class Folder(FS):
"""
Represents a directory.
"""
def __init__(self, path):
super(Folder, self).__init__(path)
def child_folder(self, fragment):
"""
Returns a folder object by combining the fragment to this folder's path
"""
return Folder(os.path.join(self.path, Folder(fragment).path))
def child_file(self, fragment):
"""
Returns a `File` object representing the `fragment`.
"""
return File(self.child(fragment))
def child(self, fragment):
"""
Returns a path of a child item represented by `fragment`.
"""
return os.path.join(self.path, FS(fragment).path)
def make(self):
"""
Creates this directory and any of the missing directories in the path.
Any errors that may occur are eaten.
"""
try:
if not self.exists:
logger.info("Creating %s" % self.path)
os.makedirs(self.path)
except os.error:
pass
return self
def zip(self, target=None, basepath=None):
"""
Zips the contents of this folder. If `target` is not provided,
<name>.zip is used instead. `basepath` is used to specify the
base path for files in the archive. The path stored along with
the files in the archive will be relative to the `basepath`.
"""
target = self.parent.child(target or self.name + '.zip')
basepath = basepath or self.path
from zipfile import ZipFile
with ZipFile(target, 'w') as zip:
with self.walker as walker:
@walker.file_visitor
def add_file(f):
zip.write(f.path, f.get_relative_path(basepath))
def delete(self):
"""
Deletes the directory if it exists.
"""
if self.exists:
logger.info("Deleting %s" % self.path)
shutil.rmtree(self.path)
def copy_to(self, destination):
"""
Copies this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copytree(self.path, unicode(target))
return target
def move_to(self, destination):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Move %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def _create_target_tree(self, target):
"""
There is a bug in dir_util that makes `copy_tree` crash if a folder in
the tree has been deleted before and readded now. To workaround the
bug, we first walk the tree and create directories that are needed.
"""
source = self
with source.walker as walker:
@walker.folder_visitor
def visit_folder(folder):
"""
Create the mirror directory
"""
if folder != source:
Folder(folder.get_mirror(target, source)).make()
def copy_contents_to(self, destination):
"""
Copies the contents of this directory to the given destination.
Returns a Folder object that represents the moved directory.
"""
logger.info("Copying contents of %s to %s" % (self, destination))
target = Folder(destination)
target.make()
self._create_target_tree(target)
dir_util.copy_tree(self.path, unicode(target))
return target
def get_walker(self, pattern=None):
"""
Return a `FolderWalker` object with a set pattern.
"""
return FolderWalker(self, pattern)
@property
def walker(self):
"""
Return a `FolderWalker` object
"""
return FolderWalker(self)
def get_lister(self, pattern=None):
"""
Return a `FolderLister` object with a set pattern.
"""
return FolderLister(self, pattern)
@property
def lister(self):
"""
Return a `FolderLister` object
"""
return FolderLister(self)
|
hyde/fswrap | fswrap.py | Folder._create_target_tree | python | def _create_target_tree(self, target):
source = self
with source.walker as walker:
@walker.folder_visitor
def visit_folder(folder):
"""
Create the mirror directory
"""
if folder != source:
Folder(folder.get_mirror(target, source)).make() | There is a bug in dir_util that makes `copy_tree` crash if a folder in
the tree has been deleted before and readded now. To workaround the
bug, we first walk the tree and create directories that are needed. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L616-L631 | null | class Folder(FS):
"""
Represents a directory.
"""
def __init__(self, path):
super(Folder, self).__init__(path)
def child_folder(self, fragment):
"""
Returns a folder object by combining the fragment to this folder's path
"""
return Folder(os.path.join(self.path, Folder(fragment).path))
def child_file(self, fragment):
"""
Returns a `File` object representing the `fragment`.
"""
return File(self.child(fragment))
def child(self, fragment):
"""
Returns a path of a child item represented by `fragment`.
"""
return os.path.join(self.path, FS(fragment).path)
def make(self):
"""
Creates this directory and any of the missing directories in the path.
Any errors that may occur are eaten.
"""
try:
if not self.exists:
logger.info("Creating %s" % self.path)
os.makedirs(self.path)
except os.error:
pass
return self
def zip(self, target=None, basepath=None):
"""
Zips the contents of this folder. If `target` is not provided,
<name>.zip is used instead. `basepath` is used to specify the
base path for files in the archive. The path stored along with
the files in the archive will be relative to the `basepath`.
"""
target = self.parent.child(target or self.name + '.zip')
basepath = basepath or self.path
from zipfile import ZipFile
with ZipFile(target, 'w') as zip:
with self.walker as walker:
@walker.file_visitor
def add_file(f):
zip.write(f.path, f.get_relative_path(basepath))
def delete(self):
"""
Deletes the directory if it exists.
"""
if self.exists:
logger.info("Deleting %s" % self.path)
shutil.rmtree(self.path)
def copy_to(self, destination):
"""
Copies this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copytree(self.path, unicode(target))
return target
def move_to(self, destination):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Move %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def rename_to(self, destination_name):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.parent.child_folder(destination_name)
logger.info("Rename %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def copy_contents_to(self, destination):
"""
Copies the contents of this directory to the given destination.
Returns a Folder object that represents the moved directory.
"""
logger.info("Copying contents of %s to %s" % (self, destination))
target = Folder(destination)
target.make()
self._create_target_tree(target)
dir_util.copy_tree(self.path, unicode(target))
return target
def get_walker(self, pattern=None):
"""
Return a `FolderWalker` object with a set pattern.
"""
return FolderWalker(self, pattern)
@property
def walker(self):
"""
Return a `FolderWalker` object
"""
return FolderWalker(self)
def get_lister(self, pattern=None):
"""
Return a `FolderLister` object with a set pattern.
"""
return FolderLister(self, pattern)
@property
def lister(self):
"""
Return a `FolderLister` object
"""
return FolderLister(self)
|
hyde/fswrap | fswrap.py | Folder.copy_contents_to | python | def copy_contents_to(self, destination):
logger.info("Copying contents of %s to %s" % (self, destination))
target = Folder(destination)
target.make()
self._create_target_tree(target)
dir_util.copy_tree(self.path, unicode(target))
return target | Copies the contents of this directory to the given destination.
Returns a Folder object that represents the moved directory. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L633-L643 | [
"def make(self):\n \"\"\"\n Creates this directory and any of the missing directories in the path.\n Any errors that may occur are eaten.\n \"\"\"\n try:\n if not self.exists:\n logger.info(\"Creating %s\" % self.path)\n os.makedirs(self.path)\n except os.error:\n ... | class Folder(FS):
"""
Represents a directory.
"""
def __init__(self, path):
super(Folder, self).__init__(path)
def child_folder(self, fragment):
"""
Returns a folder object by combining the fragment to this folder's path
"""
return Folder(os.path.join(self.path, Folder(fragment).path))
def child_file(self, fragment):
"""
Returns a `File` object representing the `fragment`.
"""
return File(self.child(fragment))
def child(self, fragment):
"""
Returns a path of a child item represented by `fragment`.
"""
return os.path.join(self.path, FS(fragment).path)
def make(self):
"""
Creates this directory and any of the missing directories in the path.
Any errors that may occur are eaten.
"""
try:
if not self.exists:
logger.info("Creating %s" % self.path)
os.makedirs(self.path)
except os.error:
pass
return self
def zip(self, target=None, basepath=None):
"""
Zips the contents of this folder. If `target` is not provided,
<name>.zip is used instead. `basepath` is used to specify the
base path for files in the archive. The path stored along with
the files in the archive will be relative to the `basepath`.
"""
target = self.parent.child(target or self.name + '.zip')
basepath = basepath or self.path
from zipfile import ZipFile
with ZipFile(target, 'w') as zip:
with self.walker as walker:
@walker.file_visitor
def add_file(f):
zip.write(f.path, f.get_relative_path(basepath))
def delete(self):
"""
Deletes the directory if it exists.
"""
if self.exists:
logger.info("Deleting %s" % self.path)
shutil.rmtree(self.path)
def copy_to(self, destination):
"""
Copies this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copytree(self.path, unicode(target))
return target
def move_to(self, destination):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Move %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def rename_to(self, destination_name):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.parent.child_folder(destination_name)
logger.info("Rename %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def _create_target_tree(self, target):
"""
There is a bug in dir_util that makes `copy_tree` crash if a folder in
the tree has been deleted before and readded now. To workaround the
bug, we first walk the tree and create directories that are needed.
"""
source = self
with source.walker as walker:
@walker.folder_visitor
def visit_folder(folder):
"""
Create the mirror directory
"""
if folder != source:
Folder(folder.get_mirror(target, source)).make()
def get_walker(self, pattern=None):
"""
Return a `FolderWalker` object with a set pattern.
"""
return FolderWalker(self, pattern)
@property
def walker(self):
"""
Return a `FolderWalker` object
"""
return FolderWalker(self)
def get_lister(self, pattern=None):
"""
Return a `FolderLister` object with a set pattern.
"""
return FolderLister(self, pattern)
@property
def lister(self):
"""
Return a `FolderLister` object
"""
return FolderLister(self)
|
staggerpkg/stagger | stagger/frames.py | Frame._from_frame | python | def _from_frame(cls, frame):
"Copy constructor"
assert frame._framespec == cls._framespec
new = cls(flags=frame.flags, frameno=frame.frameno)
for spec in cls._framespec:
setattr(new, spec.name, getattr(frame, spec.name, None))
return new | Copy constructor | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/frames.py#L91-L97 | null | class Frame(metaclass=abc.ABCMeta):
_framespec = tuple()
_version = tuple()
_allow_duplicates = False
def __init__(self, frameid=None, flags=None, frameno=None, **kwargs):
self.frameid = frameid if frameid else type(self).__name__
self.flags = flags if flags else set()
self.frameno = frameno
assert len(self._framespec) > 0
for spec in self._framespec:
val = kwargs.get(spec.name, None)
setattr(self, spec.name, val)
def __setattr__(self, name, value):
# Automatic validation on assignment
for spec in self._framespec:
if name == spec.name:
value = spec.validate(self, value)
break
super().__setattr__(name, value)
def __eq__(self, other):
return (isinstance(other, type(self))
and self.frameid == other.frameid
and self.flags == other.flags
and self._framespec == other._framespec
and all(getattr(self, spec.name, None) ==
getattr(other, spec.name, None)
for spec in self._framespec))
@classmethod
def _decode(cls, frameid, data, flags=None, frameno=None):
frame = cls(frameid=frameid, flags=flags, frameno=frameno)
if getattr(frame, "_untested", False):
warn("{0}: Untested frame; please verify results".format(frameid),
UntestedFrameWarning)
for spec in frame._framespec:
try:
val, data = spec.read(frame, data)
setattr(frame, spec.name, val)
except EOFError:
if not spec._optional:
raise
return frame
@classmethod
@classmethod
def _merge(cls, frames):
if cls._allow_duplicates:
return frames
else:
if len(frames) > 1:
# TODO: Research what iTunes does in this case
# Mutagen displays the first frame only.
warn("{0}: Duplicate frame; only the first instance is kept"
.format(frames[0].frameid),
DuplicateFrameWarning)
return frames[0:1]
@classmethod
def _in_version(self, *versions):
"Returns true if this frame is in any of the specified versions of ID3."
for version in versions:
if (self._version == version
or (isinstance(self._version, collections.Container)
and version in self._version)):
return True
return False
def _to_version(self, version):
if self._in_version(version):
return self
if version == 2 and hasattr(self, "_v2_frame"):
return self._v2_frame._from_frame(self)
if self._in_version(2):
base = type(self).__bases__[0]
if issubclass(base, Frame) and base._in_version(version):
return base._from_frame(self)
raise IncompatibleFrameError("Frame {0} cannot be converted "
"to ID3v2.{1} format".format(self.frameid, version))
def _encode(self, encodings=("latin-1", "utf-16")):
# if getattr(self, "_bozo", False):
# warn("{0}: Frame type is not widely implemented, "
# "its use is discouraged".format(self.frameid),
# BozoFrameWarning)
def encode_fields():
data = bytearray()
for spec in self._framespec:
if spec._optional and getattr(self, spec.name) is None:
break
data.extend(spec.write(self, getattr(self, spec.name)))
return data
def try_preferred_encodings():
orig_encoding = self.encoding
try:
for encoding in encodings:
try:
self.encoding = encoding
return encode_fields()
except UnicodeEncodeError:
pass
finally:
self.encoding = orig_encoding
raise ValueError("Could not encode strings")
if not isinstance(self._framespec[0], EncodingSpec):
return encode_fields()
elif self.encoding is None:
return try_preferred_encodings()
else:
try:
# Try specified encoding before others
return encode_fields()
except UnicodeEncodeError:
return try_preferred_encodings()
def __repr__(self):
stype = type(self).__name__
args = []
if stype != self.frameid:
args.append("frameid={0!r}".format(self.frameid))
if self.flags:
args.append("flags={0!r}".format(self.flags))
for spec in self._framespec:
if isinstance(spec, BinaryDataSpec):
data = getattr(self, spec.name)
if isinstance(data, (bytes, bytearray)):
args.append("{0}=<{1} bytes of binary data {2!r}{3}>".format(
spec.name, len(data),
data[:20], "..." if len(data) > 20 else ""))
else:
args.append(repr(data))
else:
args.append("{0}={1!r}".format(spec.name, getattr(self, spec.name)))
return "{0}({1})".format(stype, ", ".join(args))
def _spec(self, name):
"Return the named spec."
for s in self._framespec:
if s.name == name:
return s
raise ValueError("Unknown spec: " + name)
def _str_fields(self):
fields = []
# Determine how many fields to show
cutoff = max(i for i in range(len(self._framespec))
if i == 0 # don't call max with an the empty sequence
or not self._framespec[i]._optional
or getattr(self, self._framespec[i].name, None) is not None)
for spec in self._framespec[:cutoff + 1]:
fields.append("{0}={1}"
.format(spec.name,
repr(spec.to_str(getattr(self, spec.name, None)))))
return ", ".join(fields)
def __str__(self):
flag = " "
if "unknown" in self.flags: flag = "?"
if isinstance(self, ErrorFrame): flag = "!"
return "{0}{1}({2})".format(flag, self.frameid, self._str_fields())
|
staggerpkg/stagger | stagger/frames.py | Frame._in_version | python | def _in_version(self, *versions):
"Returns true if this frame is in any of the specified versions of ID3."
for version in versions:
if (self._version == version
or (isinstance(self._version, collections.Container)
and version in self._version)):
return True
return False | Returns true if this frame is in any of the specified versions of ID3. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/frames.py#L113-L120 | null | class Frame(metaclass=abc.ABCMeta):
_framespec = tuple()
_version = tuple()
_allow_duplicates = False
def __init__(self, frameid=None, flags=None, frameno=None, **kwargs):
self.frameid = frameid if frameid else type(self).__name__
self.flags = flags if flags else set()
self.frameno = frameno
assert len(self._framespec) > 0
for spec in self._framespec:
val = kwargs.get(spec.name, None)
setattr(self, spec.name, val)
def __setattr__(self, name, value):
# Automatic validation on assignment
for spec in self._framespec:
if name == spec.name:
value = spec.validate(self, value)
break
super().__setattr__(name, value)
def __eq__(self, other):
return (isinstance(other, type(self))
and self.frameid == other.frameid
and self.flags == other.flags
and self._framespec == other._framespec
and all(getattr(self, spec.name, None) ==
getattr(other, spec.name, None)
for spec in self._framespec))
@classmethod
def _decode(cls, frameid, data, flags=None, frameno=None):
frame = cls(frameid=frameid, flags=flags, frameno=frameno)
if getattr(frame, "_untested", False):
warn("{0}: Untested frame; please verify results".format(frameid),
UntestedFrameWarning)
for spec in frame._framespec:
try:
val, data = spec.read(frame, data)
setattr(frame, spec.name, val)
except EOFError:
if not spec._optional:
raise
return frame
@classmethod
def _from_frame(cls, frame):
"Copy constructor"
assert frame._framespec == cls._framespec
new = cls(flags=frame.flags, frameno=frame.frameno)
for spec in cls._framespec:
setattr(new, spec.name, getattr(frame, spec.name, None))
return new
@classmethod
def _merge(cls, frames):
if cls._allow_duplicates:
return frames
else:
if len(frames) > 1:
# TODO: Research what iTunes does in this case
# Mutagen displays the first frame only.
warn("{0}: Duplicate frame; only the first instance is kept"
.format(frames[0].frameid),
DuplicateFrameWarning)
return frames[0:1]
@classmethod
def _to_version(self, version):
if self._in_version(version):
return self
if version == 2 and hasattr(self, "_v2_frame"):
return self._v2_frame._from_frame(self)
if self._in_version(2):
base = type(self).__bases__[0]
if issubclass(base, Frame) and base._in_version(version):
return base._from_frame(self)
raise IncompatibleFrameError("Frame {0} cannot be converted "
"to ID3v2.{1} format".format(self.frameid, version))
def _encode(self, encodings=("latin-1", "utf-16")):
# if getattr(self, "_bozo", False):
# warn("{0}: Frame type is not widely implemented, "
# "its use is discouraged".format(self.frameid),
# BozoFrameWarning)
def encode_fields():
data = bytearray()
for spec in self._framespec:
if spec._optional and getattr(self, spec.name) is None:
break
data.extend(spec.write(self, getattr(self, spec.name)))
return data
def try_preferred_encodings():
orig_encoding = self.encoding
try:
for encoding in encodings:
try:
self.encoding = encoding
return encode_fields()
except UnicodeEncodeError:
pass
finally:
self.encoding = orig_encoding
raise ValueError("Could not encode strings")
if not isinstance(self._framespec[0], EncodingSpec):
return encode_fields()
elif self.encoding is None:
return try_preferred_encodings()
else:
try:
# Try specified encoding before others
return encode_fields()
except UnicodeEncodeError:
return try_preferred_encodings()
def __repr__(self):
stype = type(self).__name__
args = []
if stype != self.frameid:
args.append("frameid={0!r}".format(self.frameid))
if self.flags:
args.append("flags={0!r}".format(self.flags))
for spec in self._framespec:
if isinstance(spec, BinaryDataSpec):
data = getattr(self, spec.name)
if isinstance(data, (bytes, bytearray)):
args.append("{0}=<{1} bytes of binary data {2!r}{3}>".format(
spec.name, len(data),
data[:20], "..." if len(data) > 20 else ""))
else:
args.append(repr(data))
else:
args.append("{0}={1!r}".format(spec.name, getattr(self, spec.name)))
return "{0}({1})".format(stype, ", ".join(args))
def _spec(self, name):
"Return the named spec."
for s in self._framespec:
if s.name == name:
return s
raise ValueError("Unknown spec: " + name)
def _str_fields(self):
fields = []
# Determine how many fields to show
cutoff = max(i for i in range(len(self._framespec))
if i == 0 # don't call max with an the empty sequence
or not self._framespec[i]._optional
or getattr(self, self._framespec[i].name, None) is not None)
for spec in self._framespec[:cutoff + 1]:
fields.append("{0}={1}"
.format(spec.name,
repr(spec.to_str(getattr(self, spec.name, None)))))
return ", ".join(fields)
def __str__(self):
flag = " "
if "unknown" in self.flags: flag = "?"
if isinstance(self, ErrorFrame): flag = "!"
return "{0}{1}({2})".format(flag, self.frameid, self._str_fields())
|
staggerpkg/stagger | stagger/frames.py | Frame._spec | python | def _spec(self, name):
"Return the named spec."
for s in self._framespec:
if s.name == name:
return s
raise ValueError("Unknown spec: " + name) | Return the named spec. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/frames.py#L192-L197 | null | class Frame(metaclass=abc.ABCMeta):
_framespec = tuple()
_version = tuple()
_allow_duplicates = False
def __init__(self, frameid=None, flags=None, frameno=None, **kwargs):
self.frameid = frameid if frameid else type(self).__name__
self.flags = flags if flags else set()
self.frameno = frameno
assert len(self._framespec) > 0
for spec in self._framespec:
val = kwargs.get(spec.name, None)
setattr(self, spec.name, val)
def __setattr__(self, name, value):
# Automatic validation on assignment
for spec in self._framespec:
if name == spec.name:
value = spec.validate(self, value)
break
super().__setattr__(name, value)
def __eq__(self, other):
return (isinstance(other, type(self))
and self.frameid == other.frameid
and self.flags == other.flags
and self._framespec == other._framespec
and all(getattr(self, spec.name, None) ==
getattr(other, spec.name, None)
for spec in self._framespec))
@classmethod
def _decode(cls, frameid, data, flags=None, frameno=None):
frame = cls(frameid=frameid, flags=flags, frameno=frameno)
if getattr(frame, "_untested", False):
warn("{0}: Untested frame; please verify results".format(frameid),
UntestedFrameWarning)
for spec in frame._framespec:
try:
val, data = spec.read(frame, data)
setattr(frame, spec.name, val)
except EOFError:
if not spec._optional:
raise
return frame
@classmethod
def _from_frame(cls, frame):
"Copy constructor"
assert frame._framespec == cls._framespec
new = cls(flags=frame.flags, frameno=frame.frameno)
for spec in cls._framespec:
setattr(new, spec.name, getattr(frame, spec.name, None))
return new
@classmethod
def _merge(cls, frames):
if cls._allow_duplicates:
return frames
else:
if len(frames) > 1:
# TODO: Research what iTunes does in this case
# Mutagen displays the first frame only.
warn("{0}: Duplicate frame; only the first instance is kept"
.format(frames[0].frameid),
DuplicateFrameWarning)
return frames[0:1]
@classmethod
def _in_version(self, *versions):
"Returns true if this frame is in any of the specified versions of ID3."
for version in versions:
if (self._version == version
or (isinstance(self._version, collections.Container)
and version in self._version)):
return True
return False
def _to_version(self, version):
if self._in_version(version):
return self
if version == 2 and hasattr(self, "_v2_frame"):
return self._v2_frame._from_frame(self)
if self._in_version(2):
base = type(self).__bases__[0]
if issubclass(base, Frame) and base._in_version(version):
return base._from_frame(self)
raise IncompatibleFrameError("Frame {0} cannot be converted "
"to ID3v2.{1} format".format(self.frameid, version))
def _encode(self, encodings=("latin-1", "utf-16")):
# if getattr(self, "_bozo", False):
# warn("{0}: Frame type is not widely implemented, "
# "its use is discouraged".format(self.frameid),
# BozoFrameWarning)
def encode_fields():
data = bytearray()
for spec in self._framespec:
if spec._optional and getattr(self, spec.name) is None:
break
data.extend(spec.write(self, getattr(self, spec.name)))
return data
def try_preferred_encodings():
orig_encoding = self.encoding
try:
for encoding in encodings:
try:
self.encoding = encoding
return encode_fields()
except UnicodeEncodeError:
pass
finally:
self.encoding = orig_encoding
raise ValueError("Could not encode strings")
if not isinstance(self._framespec[0], EncodingSpec):
return encode_fields()
elif self.encoding is None:
return try_preferred_encodings()
else:
try:
# Try specified encoding before others
return encode_fields()
except UnicodeEncodeError:
return try_preferred_encodings()
def __repr__(self):
stype = type(self).__name__
args = []
if stype != self.frameid:
args.append("frameid={0!r}".format(self.frameid))
if self.flags:
args.append("flags={0!r}".format(self.flags))
for spec in self._framespec:
if isinstance(spec, BinaryDataSpec):
data = getattr(self, spec.name)
if isinstance(data, (bytes, bytearray)):
args.append("{0}=<{1} bytes of binary data {2!r}{3}>".format(
spec.name, len(data),
data[:20], "..." if len(data) > 20 else ""))
else:
args.append(repr(data))
else:
args.append("{0}={1!r}".format(spec.name, getattr(self, spec.name)))
return "{0}({1})".format(stype, ", ".join(args))
def _str_fields(self):
fields = []
# Determine how many fields to show
cutoff = max(i for i in range(len(self._framespec))
if i == 0 # don't call max with an the empty sequence
or not self._framespec[i]._optional
or getattr(self, self._framespec[i].name, None) is not None)
for spec in self._framespec[:cutoff + 1]:
fields.append("{0}={1}"
.format(spec.name,
repr(spec.to_str(getattr(self, spec.name, None)))))
return ", ".join(fields)
def __str__(self):
flag = " "
if "unknown" in self.flags: flag = "?"
if isinstance(self, ErrorFrame): flag = "!"
return "{0}{1}({2})".format(flag, self.frameid, self._str_fields())
|
staggerpkg/stagger | stagger/util.py | check_tag_data | python | def check_tag_data(data):
"Raise a ValueError if DATA doesn't seem to be a well-formed ID3 tag."
if len(data) < 10:
raise ValueError("Tag too short")
if data[0:3] != b"ID3":
raise ValueError("Missing ID3 identifier")
if data[3] >= 5 or data[4] != 0:
raise ValueError("Unknown ID3 version")
length = stagger.conversion.Syncsafe.decode(data[6:10]) + 10
if len(data) != length:
raise ValueError("Tag size mismatch") | Raise a ValueError if DATA doesn't seem to be a well-formed ID3 tag. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/util.py#L52-L62 | [
"def decode(data):\n \"Decodes a syncsafe integer\"\n value = 0\n for b in data:\n if b > 127: # iTunes bug\n raise ValueError(\"Invalid syncsafe integer\")\n value <<= 7\n value += b\n return value\n"
] | #
# util.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
import sys
from contextlib import contextmanager
import stagger
def python_version_check():
if sys.version_info[0:3] == (3, 1, 0):
print("There are data corruption issues with Python 3.1.0's io module; \n"
"please upgrade Python to at least 3.1.1 in order for Stagger \n"
"to work reliably.\n\n"
"For more information, see http://bugs.python.org/issue6629.",
file=sys.stderr)
exit(2)
def verb(verbose, *args, **kwargs):
if verbose:
print(*args, **kwargs)
def get_raw_tag_data(filename):
"Return the ID3 tag in FILENAME as a raw byte string."
with open(filename, "rb") as file:
try:
(cls, offset, length) = stagger.tags.detect_tag(file)
except stagger.NoTagError:
return bytes()
file.seek(offset)
return file.read(length)
def set_raw_tag_data(filename, data, act=True, verbose=False):
"Replace the ID3 tag in FILENAME with DATA."
check_tag_data(data)
with open(filename, "rb+") as file:
try:
(cls, offset, length) = stagger.tags.detect_tag(file)
except stagger.NoTagError:
(offset, length) = (0, 0)
if length > 0:
verb(verbose, "{0}: replaced tag with {1} bytes of data"
.format(filename, len(data)))
else:
verb(verbose, "{0}: created tag with {1} bytes of data"
.format(filename, len(data)))
if act:
stagger.fileutil.replace_chunk(file, offset, length, data)
def set_frames(filename, valuedict, act=True, verbose=False):
try:
tag = stagger.read_tag(filename)
except stagger.NoTagError:
verb(verbose, "{0}: new ID3v2.{1} tag"
.format(filename, stagger.default_tag.version))
tag = stagger.default_tag()
for (key, value) in valuedict.items():
if key.lower() in tag._friendly_names:
# Use friendly name API
key = key.lower().replace("-", "_")
assert hasattr(tag, key)
setattr(tag, key, value)
newval = repr(getattr(tag, key))
else:
# Use frameid API
tag[key] = value
newval = tag[key]
verb(verbose, "{0}: {1}: set to {2}".format(filename, key, newval))
if act:
tag.write(filename)
def remove_frames(filename, frameids, act=True, verbose=False):
try:
tag = stagger.read_tag(filename)
except stagger.NoTagError:
verb(verbose, "{0}: no ID3 tag".format(filename))
return
for frameid in frameids:
try:
del tag[frameid]
verb(verbose, "{0}: {1}: deleted".format(filename, frameid))
except KeyError:
verb(verbose, "{0}: {1}: not in file".format(filename, frameid))
if act:
tag.write(filename)
@contextmanager
def print_warnings(filename, options):
with warnings.catch_warnings(record=True) as ws:
try:
yield None
finally:
if not options.quiet and len(ws) > 0:
for w in ws:
print(filename + ":warning: " + str(w.message),
file=sys.stderr)
sys.stderr.flush()
|
staggerpkg/stagger | stagger/util.py | get_raw_tag_data | python | def get_raw_tag_data(filename):
"Return the ID3 tag in FILENAME as a raw byte string."
with open(filename, "rb") as file:
try:
(cls, offset, length) = stagger.tags.detect_tag(file)
except stagger.NoTagError:
return bytes()
file.seek(offset)
return file.read(length) | Return the ID3 tag in FILENAME as a raw byte string. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/util.py#L64-L72 | [
"def detect_tag(filename):\n \"\"\"Return type and position of ID3v2 tag in filename.\n Returns (tag_class, offset, length), where tag_class\n is either Tag22, Tag23, or Tag24, and (offset, length)\n is the position of the tag in the file.\n \"\"\"\n with fileutil.opened(filename, \"rb\") as file:... | #
# util.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
import sys
from contextlib import contextmanager
import stagger
def python_version_check():
if sys.version_info[0:3] == (3, 1, 0):
print("There are data corruption issues with Python 3.1.0's io module; \n"
"please upgrade Python to at least 3.1.1 in order for Stagger \n"
"to work reliably.\n\n"
"For more information, see http://bugs.python.org/issue6629.",
file=sys.stderr)
exit(2)
def verb(verbose, *args, **kwargs):
if verbose:
print(*args, **kwargs)
def check_tag_data(data):
"Raise a ValueError if DATA doesn't seem to be a well-formed ID3 tag."
if len(data) < 10:
raise ValueError("Tag too short")
if data[0:3] != b"ID3":
raise ValueError("Missing ID3 identifier")
if data[3] >= 5 or data[4] != 0:
raise ValueError("Unknown ID3 version")
length = stagger.conversion.Syncsafe.decode(data[6:10]) + 10
if len(data) != length:
raise ValueError("Tag size mismatch")
def set_raw_tag_data(filename, data, act=True, verbose=False):
"Replace the ID3 tag in FILENAME with DATA."
check_tag_data(data)
with open(filename, "rb+") as file:
try:
(cls, offset, length) = stagger.tags.detect_tag(file)
except stagger.NoTagError:
(offset, length) = (0, 0)
if length > 0:
verb(verbose, "{0}: replaced tag with {1} bytes of data"
.format(filename, len(data)))
else:
verb(verbose, "{0}: created tag with {1} bytes of data"
.format(filename, len(data)))
if act:
stagger.fileutil.replace_chunk(file, offset, length, data)
def set_frames(filename, valuedict, act=True, verbose=False):
try:
tag = stagger.read_tag(filename)
except stagger.NoTagError:
verb(verbose, "{0}: new ID3v2.{1} tag"
.format(filename, stagger.default_tag.version))
tag = stagger.default_tag()
for (key, value) in valuedict.items():
if key.lower() in tag._friendly_names:
# Use friendly name API
key = key.lower().replace("-", "_")
assert hasattr(tag, key)
setattr(tag, key, value)
newval = repr(getattr(tag, key))
else:
# Use frameid API
tag[key] = value
newval = tag[key]
verb(verbose, "{0}: {1}: set to {2}".format(filename, key, newval))
if act:
tag.write(filename)
def remove_frames(filename, frameids, act=True, verbose=False):
try:
tag = stagger.read_tag(filename)
except stagger.NoTagError:
verb(verbose, "{0}: no ID3 tag".format(filename))
return
for frameid in frameids:
try:
del tag[frameid]
verb(verbose, "{0}: {1}: deleted".format(filename, frameid))
except KeyError:
verb(verbose, "{0}: {1}: not in file".format(filename, frameid))
if act:
tag.write(filename)
@contextmanager
def print_warnings(filename, options):
with warnings.catch_warnings(record=True) as ws:
try:
yield None
finally:
if not options.quiet and len(ws) > 0:
for w in ws:
print(filename + ":warning: " + str(w.message),
file=sys.stderr)
sys.stderr.flush()
|
staggerpkg/stagger | stagger/util.py | set_raw_tag_data | python | def set_raw_tag_data(filename, data, act=True, verbose=False):
"Replace the ID3 tag in FILENAME with DATA."
check_tag_data(data)
with open(filename, "rb+") as file:
try:
(cls, offset, length) = stagger.tags.detect_tag(file)
except stagger.NoTagError:
(offset, length) = (0, 0)
if length > 0:
verb(verbose, "{0}: replaced tag with {1} bytes of data"
.format(filename, len(data)))
else:
verb(verbose, "{0}: created tag with {1} bytes of data"
.format(filename, len(data)))
if act:
stagger.fileutil.replace_chunk(file, offset, length, data) | Replace the ID3 tag in FILENAME with DATA. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/util.py#L74-L89 | [
"def replace_chunk(filename, offset, length, chunk, in_place=True, max_mem=5):\n \"\"\"Replace length bytes of data with chunk, starting at offset.\n Any KeyboardInterrupts arriving while replace_chunk is runnning\n are deferred until the operation is complete.\n\n If in_place is true, the operation wor... | #
# util.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
import sys
from contextlib import contextmanager
import stagger
def python_version_check():
if sys.version_info[0:3] == (3, 1, 0):
print("There are data corruption issues with Python 3.1.0's io module; \n"
"please upgrade Python to at least 3.1.1 in order for Stagger \n"
"to work reliably.\n\n"
"For more information, see http://bugs.python.org/issue6629.",
file=sys.stderr)
exit(2)
def verb(verbose, *args, **kwargs):
if verbose:
print(*args, **kwargs)
def check_tag_data(data):
"Raise a ValueError if DATA doesn't seem to be a well-formed ID3 tag."
if len(data) < 10:
raise ValueError("Tag too short")
if data[0:3] != b"ID3":
raise ValueError("Missing ID3 identifier")
if data[3] >= 5 or data[4] != 0:
raise ValueError("Unknown ID3 version")
length = stagger.conversion.Syncsafe.decode(data[6:10]) + 10
if len(data) != length:
raise ValueError("Tag size mismatch")
def get_raw_tag_data(filename):
"Return the ID3 tag in FILENAME as a raw byte string."
with open(filename, "rb") as file:
try:
(cls, offset, length) = stagger.tags.detect_tag(file)
except stagger.NoTagError:
return bytes()
file.seek(offset)
return file.read(length)
def set_frames(filename, valuedict, act=True, verbose=False):
try:
tag = stagger.read_tag(filename)
except stagger.NoTagError:
verb(verbose, "{0}: new ID3v2.{1} tag"
.format(filename, stagger.default_tag.version))
tag = stagger.default_tag()
for (key, value) in valuedict.items():
if key.lower() in tag._friendly_names:
# Use friendly name API
key = key.lower().replace("-", "_")
assert hasattr(tag, key)
setattr(tag, key, value)
newval = repr(getattr(tag, key))
else:
# Use frameid API
tag[key] = value
newval = tag[key]
verb(verbose, "{0}: {1}: set to {2}".format(filename, key, newval))
if act:
tag.write(filename)
def remove_frames(filename, frameids, act=True, verbose=False):
try:
tag = stagger.read_tag(filename)
except stagger.NoTagError:
verb(verbose, "{0}: no ID3 tag".format(filename))
return
for frameid in frameids:
try:
del tag[frameid]
verb(verbose, "{0}: {1}: deleted".format(filename, frameid))
except KeyError:
verb(verbose, "{0}: {1}: not in file".format(filename, frameid))
if act:
tag.write(filename)
@contextmanager
def print_warnings(filename, options):
with warnings.catch_warnings(record=True) as ws:
try:
yield None
finally:
if not options.quiet and len(ws) > 0:
for w in ws:
print(filename + ":warning: " + str(w.message),
file=sys.stderr)
sys.stderr.flush()
|
staggerpkg/stagger | stagger/specs.py | SequenceSpec.read | python | def read(self, frame, data):
"Returns a list of values, eats all of data."
seq = []
while data:
elem, data = self.spec.read(frame, data)
seq.append(elem)
return seq, data | Returns a list of values, eats all of data. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/specs.py#L367-L373 | [
"def read(self, frame, data):\n rawstr, sep, data = data.partition(b\"\\x00\")\n return rawstr.decode('latin-1'), data\n"
] | class SequenceSpec(Spec):
"""Recognizes a sequence of values, all of the same spec."""
def __init__(self, name, spec):
super().__init__(name)
self.spec = spec
def write(self, frame, values):
if isinstance(values, str):
return self.spec.write(frame, values)
data = bytearray()
for v in values:
data.extend(self.spec.write(frame, v))
return data
def validate(self, frame, values):
if values is None:
return []
if isinstance(values, str):
values = [values]
return [self.spec.validate(frame, v) for v in values]
|
staggerpkg/stagger | stagger/conversion.py | Unsync.gen_decode | python | def gen_decode(iterable):
"A generator for de-unsynchronizing a byte iterable."
sync = False
for b in iterable:
if sync and b & 0xE0:
warn("Invalid unsynched data", Warning)
if not (sync and b == 0x00):
yield b
sync = (b == 0xFF) | A generator for de-unsynchronizing a byte iterable. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/conversion.py#L39-L47 | null | class Unsync:
"Conversion from/to unsynchronized byte sequences."
@staticmethod
@staticmethod
def gen_encode(data):
"A generator for unsynchronizing a byte iterable."
sync = False
for b in data:
if sync and (b == 0x00 or b & 0xE0):
yield 0x00 # Insert sync char
yield b
sync = (b == 0xFF)
if sync:
yield 0x00 # Data ends on 0xFF
@staticmethod
def decode(data):
"Remove unsynchronization bytes from data."
return bytes(Unsync.gen_decode(data))
@staticmethod
def encode(data):
"Insert unsynchronization bytes into data."
return bytes(Unsync.gen_encode(data))
|
staggerpkg/stagger | stagger/conversion.py | Unsync.gen_encode | python | def gen_encode(data):
"A generator for unsynchronizing a byte iterable."
sync = False
for b in data:
if sync and (b == 0x00 or b & 0xE0):
yield 0x00 # Insert sync char
yield b
sync = (b == 0xFF)
if sync:
yield 0x00 | A generator for unsynchronizing a byte iterable. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/conversion.py#L50-L59 | null | class Unsync:
"Conversion from/to unsynchronized byte sequences."
@staticmethod
def gen_decode(iterable):
"A generator for de-unsynchronizing a byte iterable."
sync = False
for b in iterable:
if sync and b & 0xE0:
warn("Invalid unsynched data", Warning)
if not (sync and b == 0x00):
yield b
sync = (b == 0xFF)
@staticmethod
# Data ends on 0xFF
@staticmethod
def decode(data):
"Remove unsynchronization bytes from data."
return bytes(Unsync.gen_decode(data))
@staticmethod
def encode(data):
"Insert unsynchronization bytes into data."
return bytes(Unsync.gen_encode(data))
|
staggerpkg/stagger | stagger/conversion.py | Syncsafe.decode | python | def decode(data):
"Decodes a syncsafe integer"
value = 0
for b in data:
if b > 127: # iTunes bug
raise ValueError("Invalid syncsafe integer")
value <<= 7
value += b
return value | Decodes a syncsafe integer | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/conversion.py#L95-L103 | null | class Syncsafe:
"""Conversion to/from syncsafe integers.
Syncsafe integers are big-endian 7-bit byte sequences.
"""
@staticmethod
@staticmethod
def encode(i, *, width=-1):
"""Encodes a nonnegative integer into syncsafe format
When width > 0, then len(result) == width
When width < 0, then len(result) >= abs(width)
"""
if i < 0:
raise ValueError("value is negative")
assert width != 0
data = bytearray()
while i:
data.append(i & 127)
i >>= 7
if width > 0 and len(data) > width:
raise ValueError("Integer too large")
if len(data) < abs(width):
data.extend([0] * (abs(width) - len(data)))
data.reverse()
return data
|
staggerpkg/stagger | stagger/conversion.py | Syncsafe.encode | python | def encode(i, *, width=-1):
if i < 0:
raise ValueError("value is negative")
assert width != 0
data = bytearray()
while i:
data.append(i & 127)
i >>= 7
if width > 0 and len(data) > width:
raise ValueError("Integer too large")
if len(data) < abs(width):
data.extend([0] * (abs(width) - len(data)))
data.reverse()
return data | Encodes a nonnegative integer into syncsafe format
When width > 0, then len(result) == width
When width < 0, then len(result) >= abs(width) | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/conversion.py#L106-L124 | null | class Syncsafe:
"""Conversion to/from syncsafe integers.
Syncsafe integers are big-endian 7-bit byte sequences.
"""
@staticmethod
def decode(data):
"Decodes a syncsafe integer"
value = 0
for b in data:
if b > 127: # iTunes bug
raise ValueError("Invalid syncsafe integer")
value <<= 7
value += b
return value
@staticmethod
|
staggerpkg/stagger | stagger/tags.py | detect_tag | python | def detect_tag(filename):
with fileutil.opened(filename, "rb") as file:
file.seek(0)
header = file.read(10)
file.seek(0)
if len(header) < 10:
raise NoTagError("File too short")
if header[0:3] != b"ID3":
raise NoTagError("ID3v2 tag not found")
if header[3] not in _tag_versions or header[4] != 0:
raise TagError("Unknown ID3 version: 2.{0}.{1}"
.format(*header[3:5]))
cls = _tag_versions[header[3]]
offset = 0
length = Syncsafe.decode(header[6:10]) + 10
if header[3] == 4 and header[5] & _TAG24_FOOTER:
length += 10
return (cls, offset, length) | Return type and position of ID3v2 tag in filename.
Returns (tag_class, offset, length), where tag_class
is either Tag22, Tag23, or Tag24, and (offset, length)
is the position of the tag in the file. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/tags.py#L95-L117 | null | #
# tags.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import abc
import struct
import re
import collections
import io
import imghdr
import zlib
from abc import abstractmethod, abstractproperty
from warnings import warn
from contextlib import contextmanager
from stagger.errors import *
from stagger.conversion import *
import stagger.frames as Frames
import stagger.fileutil as fileutil
_FRAME23_FORMAT_COMPRESSED = 0x0080
_FRAME23_FORMAT_ENCRYPTED = 0x0040
_FRAME23_FORMAT_GROUP = 0x0020
_FRAME23_FORMAT_UNKNOWN_MASK = 0x001F
_FRAME23_STATUS_DISCARD_ON_TAG_ALTER = 0x8000
_FRAME23_STATUS_DISCARD_ON_FILE_ALTER = 0x4000
_FRAME23_STATUS_READ_ONLY = 0x2000
_FRAME23_STATUS_UNKNOWN_MASK = 0x1F00
_TAG24_UNSYNCHRONISED = 0x80
_TAG24_EXTENDED_HEADER = 0x40
_TAG24_EXPERIMENTAL = 0x20
_TAG24_FOOTER = 0x10
_TAG24_UNKNOWN_MASK = 0x0F
_FRAME24_FORMAT_GROUP = 0x0040
_FRAME24_FORMAT_COMPRESSED = 0x0008
_FRAME24_FORMAT_ENCRYPTED = 0x0004
_FRAME24_FORMAT_UNSYNCHRONISED = 0x0002
_FRAME24_FORMAT_DATA_LENGTH_INDICATOR = 0x0001
_FRAME24_FORMAT_UNKNOWN_MASK = 0x00B0
_FRAME24_STATUS_DISCARD_ON_TAG_ALTER = 0x4000
_FRAME24_STATUS_DISCARD_ON_FILE_ALTER = 0x2000
_FRAME24_STATUS_READ_ONLY = 0x1000
_FRAME24_STATUS_UNKNOWN_MASK = 0x8F00
def read_tag(filename):
with fileutil.opened(filename, "rb") as file:
(cls, offset, length) = detect_tag(file)
return cls.read(file, offset)
def decode_tag(data):
return read_tag(io.BytesIO(data))
def delete_tag(filename):
with fileutil.opened(filename, "rb+") as file:
try:
(cls, offset, length) = detect_tag(file)
fileutil.replace_chunk(file, offset, length, bytes())
except NoTagError:
pass
def frameclass(cls):
"""Register cls as a class representing an ID3 frame.
Sets cls.frameid and cls._version if not present, and registers the
new frame in Tag's known_frames dictionary.
To be used as a decorator on the class definition:
@frameclass
class UFID(Frame):
_framespec = (NullTerminatedStringSpec("owner"), BinaryDataSpec("data"))
"""
assert issubclass(cls, Frames.Frame)
# Register v2.2 versions of v2.3/v2.4 frames if encoded by inheritance.
if len(cls.__name__) == 3:
base = cls.__bases__[0]
if issubclass(base, Frames.Frame) and base._in_version(3, 4):
assert not hasattr(base, "_v2_frame")
base._v2_frame = cls
# Override frameid from base with v2.2 name
if base.frameid == cls.frameid:
cls.frameid = cls.__name__
# Add frameid.
if not hasattr(cls, "frameid"):
cls.frameid = cls.__name__
assert Tag._is_frame_id(cls.frameid)
# Supply _version attribute if missing.
if len(cls.frameid) == 3:
cls._version = 2
if len(cls.frameid) == 4 and not cls._version:
cls._version = (3, 4)
# Register cls as a known frame.
assert cls.frameid not in Tag.known_frames
Tag.known_frames[cls.frameid] = cls
return cls
class FrameOrder:
"""Order frames based on their position in a predefined list of patterns,
and their original position in the source tag.
A pattern may be a frame class, or a regular expression that is to be
matched against the frame id.
>>> order = FrameOrder(TIT1, "T.*", TXXX)
>>> order.key(TIT1())
(0, 1)
>>> order.key(TPE1())
(1, 1)
>>> order.key(TXXX())
(2, 1)
>>> order.key(APIC())
(3, 1)
>>> order.key(APIC(frameno=3))
(3, 0, 3)
"""
def __init__(self, *patterns):
self.re_keys = []
self.frame_keys = dict()
i = -1
for (i, pattern) in zip(range(len(patterns)), patterns):
if isinstance(pattern, str):
self.re_keys.append((pattern, i))
else:
assert issubclass(pattern, Frames.Frame)
self.frame_keys[pattern] = i
self.unknown_key = i + 1
def key(self, frame):
"Return the sort key for the given frame."
def keytuple(primary):
if frame.frameno is None:
return (primary, 1)
return (primary, 0, frame.frameno)
# Look up frame by exact match
if type(frame) in self.frame_keys:
return keytuple(self.frame_keys[type(frame)])
# Look up parent frame for v2.2 frames
if frame._in_version(2) and type(frame).__bases__[0] in self.frame_keys:
return keytuple(self.frame_keys[type(frame).__bases__[0]])
# Try each pattern
for (pattern, key) in self.re_keys:
if re.match(pattern, frame.frameid):
return keytuple(key)
return keytuple(self.unknown_key)
def __repr__(self):
order = []
order.extend((repr(pair[0]), pair[1]) for pair in self.re_keys)
order.extend((cls.__name__, self.frame_keys[cls])
for cls in self.frame_keys)
order.sort(key=lambda pair: pair[1])
return "<FrameOrder: {0}>".format(", ".join(pair[0] for pair in order))
class Tag(collections.MutableMapping, metaclass=abc.ABCMeta):
known_frames = { } # Maps known frameids to Frame class objects
frame_order = None # Initialized by stagger.id3
def __init__(self):
self.flags = set()
self._frames = dict()
self._filename = None
# Primary accessor (no magic)
def frames(self, key=None, orig_order=False):
"""Returns a list of frames in this tag.
If KEY is None, returns all frames in the tag; otherwise returns all frames
whose frameid matches KEY.
If ORIG_ORDER is True, then the frames are returned in their original order.
Otherwise the frames are sorted in canonical order according to the frame_order
field of this tag.
"""
if key is not None:
# If there are multiple frames, then they are already in original order.
key = self._normalize_key(key)
if len(self._frames[key]) == 0:
raise KeyError("Key not found: " + repr(key))
return self._frames[key]
frames = []
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
frames.append(frame)
if orig_order:
key = (lambda frame:
(0, frame.frameno)
if frame.frameno is not None
else (1,))
else:
key = self.frame_order.key
frames.sort(key=key)
return frames
# MutableMapping API
def __iter__(self):
for frameid in self._frames:
yield frameid
def __len__(self):
return sum(len(self._frames[l]) for l in self._frames)
def __eq__(self, other):
return (self.version == other.version
and self.flags == other.flags
and self._frames == other._frames)
def _normalize_key(self, key, unknown_ok=True):
"""Return the normalized version of KEY.
KEY may be a frameid (a string), or a Frame class object.
If KEY corresponds to a registered frameid, then that frameid is returned.
Otherwise, either KeyError is raised, or KEY is returned verbatim,
depending on the value of UNKNOWN_OK.
"""
if Frames.is_frame_class(key):
key = key.frameid
if isinstance(key, str):
if not self._is_frame_id(key):
raise KeyError("{0}: Invalid frame id".format(key))
if key not in self.known_frames:
if unknown_ok:
warn("{0}: Unknown frame id".format(key), UnknownFrameWarning)
else:
raise KeyError("{0}: Unknown frame id".format(key))
return key
# Mapping accessor (with extra magic, for convenience)
def __getitem__(self, key):
key = self._normalize_key(key)
fs = self.frames(key)
allow_duplicates = (key not in self.known_frames
or self.known_frames[key]._allow_duplicates)
if allow_duplicates:
return fs
if len(fs) > 1:
# Merge duplicates into one ephemeral frame, and return that.
# This may break users' expectations when they try to make changes
# to the attributes of the returned frame; however, I think
# sometimes returning a list, sometimes a single frame for the same
# frame id would be even worse.
fs = fs[0]._merge(fs)
assert len(fs) == 1
return fs[0]
def __setitem__(self, key, value):
key = self._normalize_key(key, unknown_ok=False)
if isinstance(value, self.known_frames[key]):
self._frames[key] = [value]
return
if self.known_frames[key]._allow_duplicates:
if not isinstance(value, collections.Iterable) or isinstance(value, str):
raise ValueError("{0} requires a list of frame values".format(key))
self._frames[key] = [val if isinstance(val, self.known_frames[key])
else self.known_frames[key](val)
for val in value]
else: # not _allow_duplicates
self._frames[key] = [self.known_frames[key](value)]
def __delitem__(self, key):
del self._frames[self._normalize_key(key)]
def values(self):
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
yield frame
# Friendly names API
_friendly_names = [ "title", "artist",
"date",
"album-artist", "album",
"track", "track-total",
"disc", "disc-total",
"grouping", "composer",
"genre",
"comment",
#"compilation",
"picture",
"sort-title", "sort-artist",
"sort-album-artist", "sort-album",
"sort-composer",
]
title = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
date = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
album_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
album = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
track = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
track_total = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
disc = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
disc_total = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
composer = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
genre = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
comment = abstractproperty(fget=lambda self: Non, fset=lambda self, value: None)
grouping = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
picture = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_title = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_album_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_album = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_composer = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
def __friendly_text_collect(self, frameid):
"""Collect text values from all instances of FRAMEID into a single list.
Returns an empty list if there are no instances of FRAMEID with a text attribute.
"""
try:
return self[frameid].text
except (KeyError, AttributeError):
return []
@classmethod
def _friendly_text_frame(cls, frameid):
def getter(self):
return " / ".join(self.__friendly_text_collect(frameid))
def setter(self, value):
if isinstance(value, str):
if len(value):
# For non-empty strings, split value
self[frameid] = value.split(" / ")
elif frameid in self:
# For empty strings, delete frame
del self[frameid]
else:
self[frameid] = value
return (getter, setter)
@classmethod
def _friendly_track(cls, frameid, totalattr):
def getter(self):
ts = self.__friendly_text_collect(frameid)
try:
return int(ts[0].partition("/")[0])
except (ValueError, IndexError):
return 0
def setter(self, value):
value = int(value)
total = getattr(self, totalattr)
if total > 0:
self[frameid] = "{0}/{1}".format(value, total)
elif value:
self[frameid] = str(value)
elif frameid in self:
del self[frameid]
return (getter, setter)
@classmethod
def _friendly_track_total(cls, frameid, trackattr):
def getter(self):
ts = self.__friendly_text_collect(frameid)
try:
return int(ts[0].partition("/")[2])
except (ValueError, IndexError):
return 0
def setter(self, value):
value = int(value)
track = getattr(self, trackattr)
if value:
self[frameid] = "{0}/{1}".format(track, value)
elif track:
self[frameid] = str(track)
elif frameid in self:
del self[frameid]
return (getter, setter)
__date_pattern = re.compile(r"""(?x)\s*
((?P<year>[0-9]{4}) # YYYY
(-(?P<month>[01][0-9]) # -MM
(-(?P<day>[0-3][0-9]) # -DD
)?)?)?
[ T]?
((?P<hour>[0-2][0-9]) # HH
(:(?P<min>[0-6][0-9]) # :MM
(:(?P<sec>[0-6][0-9]) # :SS
)?)?)?\s*
""")
@classmethod
def _validate_friendly_date(cls, string):
m = cls.__date_pattern.match(string)
if m is None or m.end() != len(string):
raise ValueError("date must be in 'YYYY-MM-DD HH:MM:SS' format")
@classmethod
def _get_friendly_date(cls, string):
m = cls.__date_pattern.match(string)
if m is None:
return (None, None, None, None, None, None)
res = []
for field in ("year", "month", "day", "hour", "min", "sec"):
v = m.group(field)
res.append(int(v) if v is not None else None)
return res
def _get_date(self, yearframe, dateframe, timeframe):
year = month = day = hour = minute = second = None
# Parse year.
try:
year = int(self.__friendly_text_collect(yearframe)[0])
except (IndexError, ValueError):
pass
# Parse month and date.
try:
date = self.__friendly_text_collect(dateframe)[0]
m = re.match(r"\s*(?P<month>[01][0-9])\s*-?\s*(?P<day>[0-3][0-9])?\s*$",
date)
if m is not None:
month = int(m.group("month"))
day = int(m.group("day"))
except IndexError:
pass
# Parse time.
try:
time = self.__friendly_text_collect(timeframe)[0]
m = re.match(r"\s*(?P<hour>[0-2][0-9])\s*:?\s*"
"(?P<minute>[0-5][0-9])\s*:?\s*"
"(?P<second>[0-5][0-9])?\s*$", time)
if m is not None:
hour = int(m.group("hour"))
minute = int(m.group("minute"))
s = m.group("second")
second = int(s) if s is not None else None
except IndexError:
pass
return (year, month, day, hour, minute, second)
def _friendly_date_string(self, *fields):
seps = ("", "-", "-", " ", ":", ":")
formats = ("04", "02", "02", "02", "02", "02")
res = []
for i in range(len(fields)):
if fields[i] is None:
break
res.append(seps[i])
res.append("{0:{1}}".format(fields[i], formats[i]))
return "".join(res)
@classmethod
def _friendly_picture(cls, frameid):
def getter(self):
if frameid not in self:
return ""
else:
return ", ".join("{0}:{1}:<{2} bytes of {3} data>"
.format(f._spec("type").to_str(f.type),
f.desc,
len(f.data),
imghdr.what(None, f.data[:32]))
for f in self[frameid])
def setter(self, value):
if len(value) > 0:
self[frameid] = [self.known_frames[frameid](value=value)]
elif frameid in self:
del self[frameid]
return (getter, setter)
@classmethod
def _friendly_comment(cls, frameid):
def comment_frame_index(self):
if frameid not in self:
return None
# Return comment with lang="eng", desc="", if present.
# Otherwise return the first comment with no description,
# regardless of language.
icmt = None
for i in range(len(self[frameid])):
f = self[frameid][i]
if f.desc == "":
if f.lang == "eng":
return i
if icmt is None:
icmt = i
return icmt
def getter(self):
i = comment_frame_index(self)
if i is None:
return ""
else:
return self[frameid][i].text
def setter(self, value):
assert isinstance(value, str)
i = comment_frame_index(self)
if i is not None:
del self._frames[frameid][i]
if len(value) > 0:
frame = self.known_frames[frameid](lang="eng", desc="", text=value)
if frameid not in self._frames:
self._frames[frameid] = []
self._frames[frameid].append(frame)
return (getter, setter)
# Misc
def __repr__(self):
return "<{0}: ID3v2.{1} tag{2} with {3} frames>".format(
type(self).__name__,
self.version,
("({0})".format(", ".join(self.flags))
if len(self.flags) > 0 else ""),
len(self._frames))
# Reading tags
@classmethod
def read(cls, filename, offset=0):
"""Read an ID3v2 tag from a file."""
i = 0
with fileutil.opened(filename, "rb") as file:
file.seek(offset)
tag = cls()
tag._read_header(file)
for (frameid, bflags, data) in tag._read_frames(file):
if len(data) == 0:
warn("{0}: Ignoring empty frame".format(frameid),
EmptyFrameWarning)
else:
frame = tag._decode_frame(frameid, bflags, data, i)
if frame is not None:
l = tag._frames.setdefault(frame.frameid, [])
l.append(frame)
if file.tell() > tag.offset + tag.size:
break
i += 1
try:
tag._filename = file.name
except AttributeError:
pass
return tag
@classmethod
def decode(cls, data):
return cls.read(io.BytesIO(data))
def _decode_frame(self, frameid, bflags, data, frameno=None):
try:
(flags, data) = self._interpret_frame_flags(frameid, bflags, data)
if flags is None:
flags = set()
if frameid in self.known_frames:
return self.known_frames[frameid]._decode(frameid, data,
flags,
frameno=frameno)
else:
# Unknown frame
flags.add("unknown")
warn("{0}: Unknown frame".format(frameid), UnknownFrameWarning)
if frameid.startswith('T'): # Unknown text frame
return Frames.TextFrame._decode(frameid, data, flags,
frameno=frameno)
elif frameid.startswith('W'): # Unknown URL frame
return Frames.URLFrame._decode(frameid, data, flags,
frameno=frameno)
else:
return Frames.UnknownFrame._decode(frameid, data, flags,
frameno=frameno)
except (FrameError, ValueError, EOFError) as e:
warn("{0}: Invalid frame".format(frameid), ErrorFrameWarning)
return Frames.ErrorFrame(frameid, data, exception=e, frameno=frameno)
@abstractmethod
def _read_header(self, file): pass
@abstractmethod
def _read_frames(self, file): pass
@abstractmethod
def _interpret_frame_flags(self, frameid, bflags, data): pass
# Writing tags
def write(self, filename=None):
if not filename:
filename = self._filename
if not filename:
raise TypeError("invalid file: {0}".format(filename))
with fileutil.opened(filename, "rb+") as file:
try:
(offset, length) = detect_tag(file)[1:3]
except NoTagError:
(offset, length) = (0, 0)
if offset > 0:
delete_tag(file)
(offset, length) = (0, 0)
tag_data = self.encode(size_hint=length)
fileutil.replace_chunk(file, offset, length, tag_data)
@abstractmethod
def encode(self, size_hint=None):
pass
padding_default = 128
padding_max = 1024
def _get_size_with_padding(self, size_desired, size_actual):
size = size_actual
if (size_desired is not None and size < size_desired
and (self.padding_max is None or
size_desired - size_actual <= self.padding_max)):
size = size_desired
elif self.padding_default:
size += min(self.padding_default, self.padding_max)
return size
@staticmethod
def _is_frame_id(data):
if isinstance(data, str):
try:
data = data.encode("ASCII")
except UnicodeEncodeError:
return false
# Allow a single space at end of four-character ids
# Some programs (e.g. iTunes 8.2) generate such frames when converting
# from 2.2 to 2.3/2.4 tags.
pattern = re.compile(b"^[A-Z][A-Z0-9]{2}[A-Z0-9 ]?$")
return pattern.match(data)
def _prepare_frames_hook(self):
pass
def _prepare_frames(self):
# Generate dictionary of frames
d = self._frames
# Merge duplicate frames
for frameid in self._frames.keys():
fs = self._frames[frameid]
if len(fs) > 1:
d[frameid] = fs[0]._merge(fs)
self._prepare_frames_hook()
# Convert frames
newframes = []
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
try:
newframes.append(frame._to_version(self.version))
except IncompatibleFrameError:
warn("{0}: Ignoring incompatible frame".format(frameid),
FrameWarning)
except ValueError as e:
warn("{0}: Ignoring invalid frame ({1})".format(frameid, e),
FrameWarning)
# Sort frames
newframes.sort(key=self.frame_order.key)
return newframes
class Tag22(Tag):
version = 2
encodings = ("latin-1", "utf-16")
def __init__(self):
super().__init__()
title = property(*Tag._friendly_text_frame("TT2"))
artist = property(*Tag._friendly_text_frame("TP1"))
@property
def date(self):
fields = self._get_date("TYE", "TDA", "TIM")
return self._friendly_date_string(*fields)
@date.setter
def date(self, value):
self._validate_friendly_date(value)
(year, month, day, hour, minute, second) = self._get_friendly_date(value)
for f in "TYE", "TDA", "TIM":
if f in self:
del self[f]
if year is not None:
self["TYE"] = "{0:04}".format(year)
if month is not None and day is not None:
self["TDA"] = "{0:02}{1:02}".format(month, day)
if hour is not None and minute is not None:
self["TIM"] = "{0:02}{1:02}".format(hour, minute)
album_artist = property(*Tag._friendly_text_frame("TP2"))
album = property(*Tag._friendly_text_frame("TAL"))
track = property(*Tag._friendly_track("TRK", "track_total"))
track_total = property(*Tag._friendly_track_total("TRK", "track"))
disc = property(*Tag._friendly_track("TPA", "disc_total"))
disc_total = property(*Tag._friendly_track_total("TPA", "disc"))
composer = property(*Tag._friendly_text_frame("TCM"))
genre = property(*Tag._friendly_text_frame("TCO"))
comment = property(*Tag._friendly_comment("COM"))
grouping = property(*Tag._friendly_text_frame("TT1"))
# TODO: compilation
picture = property(*Tag._friendly_picture("PIC"))
sort_title = property(*Tag._friendly_text_frame("TST"))
sort_artist = property(*Tag._friendly_text_frame("TSP"))
sort_album_artist = property(*Tag._friendly_text_frame("TS2"))
sort_album = property(*Tag._friendly_text_frame("TSA"))
sort_composer = property(*Tag._friendly_text_frame("TSC"))
def _read_header(self, file):
self.offset = file.tell()
header = fileutil.xread(file, 10)
if header[0:5] != b"ID3\x02\00":
raise NoTagError("ID3v2.2 header not found")
if header[5] & 0x80:
self.flags.add("unsynchronised")
if header[5] & 0x40: # Compression bit is ill-defined in standard
raise TagError("ID3v2.2 tag compression is not supported")
if header[5] & 0x3F:
warn("Unknown ID3v2.2 flags", TagWarning)
self.size = Syncsafe.decode(header[6:10]) + 10
def _read_frames(self, file):
if "unsynchronised" in self.flags:
ufile = UnsyncReader(file)
else:
ufile = file
while file.tell() < self.offset + self.size:
header = fileutil.xread(ufile, 6)
if not self._is_frame_id(header[0:3]):
break
frameid = header[0:3].decode("ASCII")
size = Int8.decode(header[3:6])
data = fileutil.xread(ufile, size)
yield (frameid, None, data)
def _interpret_frame_flags(self, frameid, bflags, data):
# No frame flags in v2.2
return (None, data)
def __encode_one_frame(self, frame):
framedata = frame._encode(encodings=self.encodings)
data = bytearray()
# Frame id
if len(frame.frameid) != 3 or not self._is_frame_id(frame.frameid):
raise "Invalid ID3v2.2 frame id {0}".format(repr(frame.frameid))
data.extend(frame.frameid.encode("ASCII"))
# Size
data.extend(Int8.encode(len(framedata), width=3))
assert(len(data) == 6)
data.extend(framedata)
return data
def _prepare_frames_hook(self):
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
if isinstance(frame, Frames.TextFrame):
# ID3v2.2 doesn't support multiple text values
if len(frame.text) > 1:
warn("{0}: merged multiple text strings into one value"
.format(frame.frameid), FrameWarning)
frame.text = [" / ".join(frame.text)]
def encode(self, size_hint=None):
if len(self) == 0: # No frames -> no tag
return b""
frames = self._prepare_frames()
framedata = bytearray().join(self.__encode_one_frame(frame)
for frame in frames)
if "unsynchronised" in self.flags:
framedata = Unsync.encode(framedata)
size = self._get_size_with_padding(size_hint, len(framedata) + 10)
data = bytearray()
data.extend(b"ID3\x02\x00")
data.append(0x80 if "unsynchronised" in self.flags else 0x00)
data.extend(Syncsafe.encode(size - 10, width=4))
assert len(data) == 10
data.extend(framedata)
if size > len(data):
data.extend(b"\x00" * (size - len(data)))
assert len(data) == size
return data
class Tag23(Tag):
version = 3
encodings = ("latin-1", "utf-16")
def __init__(self):
super().__init__()
title = property(*Tag._friendly_text_frame("TIT2"))
artist = property(*Tag._friendly_text_frame("TPE1"))
@property
def date(self):
fields = self._get_date("TYER", "TDAT", "TIME")
return self._friendly_date_string(*fields)
@date.setter
def date(self, value):
self._validate_friendly_date(value)
(year, month, day, hour, minute, second) = self._get_friendly_date(value)
for f in "TYER", "TDAT", "TIME":
if f in self:
del self[f]
if year is not None:
self["TYER"] = "{0:04}".format(year)
if month is not None and day is not None:
self["TDAT"] = "{0:02}{1:02}".format(month, day)
if hour is not None and minute is not None:
self["TIME"] = "{0:02}{1:02}".format(hour, minute)
album_artist = property(*Tag._friendly_text_frame("TPE2"))
album = property(*Tag._friendly_text_frame("TALB"))
track = property(*Tag._friendly_track("TRCK", "track_total"))
track_total = property(*Tag._friendly_track_total("TRCK", "track"))
disc = property(*Tag._friendly_track("TPOS", "disc_total"))
disc_total = property(*Tag._friendly_track_total("TPOS", "disc"))
composer = property(*Tag._friendly_text_frame("TCOM"))
genre = property(*Tag._friendly_text_frame("TCON"))
comment = property(*Tag._friendly_comment("COMM"))
grouping = property(*Tag._friendly_text_frame("TIT1"))
# TODO: compilation
picture = property(*Tag._friendly_picture("APIC"))
sort_title = property(*Tag._friendly_text_frame("TSOT"))
sort_artist = property(*Tag._friendly_text_frame("TSOP"))
sort_album_artist = property(*Tag._friendly_text_frame("TSO2"))
sort_album = property(*Tag._friendly_text_frame("TSOA"))
sort_composer = property(*Tag._friendly_text_frame("TSOC"))
def _read_header(self, file):
self.offset = file.tell()
header = fileutil.xread(file, 10)
if header[0:5] != b"ID3\x03\x00":
raise NoTagError("ID3v2.3 header not found")
if header[5] & 0x80:
self.flags.add("unsynchronised")
if header[5] & 0x40:
self.flags.add("extended_header")
if header[5] & 0x20:
self.flags.add("experimental")
if header[5] & 0x1F:
warn("Unknown ID3v2.3 flags", TagWarning)
self.size = Syncsafe.decode(header[6:10]) + 10
if "extended_header" in self.flags:
self.__read_extended_header(file)
def __read_extended_header(self, file):
(size, ext_flags, self.padding_size) = \
struct.unpack("!IHI", fileutil.xread(file, 10))
if size != 6 and size != 10:
warn("Unexpected size of ID3v2.3 extended header: {0}".format(size),
TagWarning)
if ext_flags & 32768:
if size < 10:
warn("Extended header is too short for a CRC field: {0} bytes instead of 10".format(size),
TagWarning)
else:
self.flags.add("ext:crc_present")
(self.crc32,) = struct.unpack("!I", fileutil.xread(file, 4))
size -= 6
if size > 6:
fileutil.xread(file, size - 6)
def _read_frames(self, file):
if "unsynchronised" in self.flags:
ufile = UnsyncReader(file)
else:
ufile = file
while file.tell() < self.offset + self.size:
header = fileutil.xread(ufile, 10)
if not self._is_frame_id(header[0:4]):
break
frameid = header[0:4].decode("ASCII")
size = Int8.decode(header[4:8])
bflags = Int8.decode(header[8:10])
data = fileutil.xread(ufile, size)
yield (frameid, bflags, data)
def _interpret_frame_flags(self, frameid, bflags, data):
flags = set()
# Frame encoding flags
if bflags & _FRAME23_FORMAT_UNKNOWN_MASK:
raise FrameError("{0}: Invalid ID3v2.3 frame encoding flags: 0x{0:X}".format(frameid, bflags))
if bflags & _FRAME23_FORMAT_COMPRESSED:
flags.add("compressed")
expanded_size = Int8.decode(data[0:4])
data = zlib.decompress(data[4:])
if bflags & _FRAME23_FORMAT_ENCRYPTED:
raise FrameError("{0}: Can't read ID3v2.3 encrypted frames".format(frameid))
if bflags & _FRAME23_FORMAT_GROUP:
flags.add("group")
flags.add("group={0}".format(data[0])) # Hack
data = data[1:]
# Frame status messages
if bflags & _FRAME23_STATUS_DISCARD_ON_TAG_ALTER:
flags.add("discard_on_tag_alter")
if bflags & _FRAME23_STATUS_DISCARD_ON_FILE_ALTER:
flags.add("discard_on_file_alter")
if bflags & _FRAME23_STATUS_READ_ONLY:
flags.add("read_only")
if bflags & _FRAME23_STATUS_UNKNOWN_MASK:
warn("{0}: Unexpected ID3v2.3 frame status flags: 0x{1:X}".format(frameid, bflags),
TagWarning)
return flags, data
def __encode_one_frame(self, frame):
framedata = frame._encode(encodings=self.encodings)
origlen = len(framedata)
flagval = 0
frameinfo = bytearray()
if "compressed" in frame.flags:
framedata = zlib.compress(framedata)
flagval |= _FRAME23_FORMAT_COMPRESSED
frameinfo.extend(Int8.encode(origlen, width=4))
if "group" in frame.flags:
grp = 0
for flag in frame.flags:
if flag.startswith("group="):
grp = int(flag[6:])
frameinfo.append(grp)
flagval |= _FRAME23_FORMAT_GROUP
if "discard_on_tag_alter" in frame.flags:
flagval |= _FRAME23_STATUS_DISCARD_ON_TAG_ALTER
if "discard_on_file_alter" in frame.flags:
flagval |= _FRAME23_STATUS_DISCARD_ON_FILE_ALTER
if "read_only" in frame.flags:
flagval |= _FRAME23_STATUS_READ_ONLY
data = bytearray()
# Frame id
if len(frame.frameid) != 4 or not self._is_frame_id(frame.frameid.encode("ASCII")):
raise ValueError("Invalid ID3v2.3 frame id {0}".format(repr(frame.frameid)))
data.extend(frame.frameid.encode("ASCII"))
# Size
data.extend(Int8.encode(len(frameinfo) + len(framedata), width=4))
# Flags
data.extend(Int8.encode(flagval, width=2))
assert len(data) == 10
# Format info
data.extend(frameinfo)
# Frame data
data.extend(framedata)
return data
def encode(self, size_hint=None):
if len(self) == 0: # No frames -> no tag
return b""
frames = self._prepare_frames()
framedata = bytearray().join(self.__encode_one_frame(frame)
for frame in frames)
if "unsynchronised" in self.flags:
framedata = Unsync.encode(framedata)
size = self._get_size_with_padding(size_hint, len(framedata) + 10)
data = bytearray()
data.extend(b"ID3\x03\x00")
flagval = 0x00
if "unsynchronised" in self.flags:
flagval |= 0x80
data.append(flagval)
data.extend(Syncsafe.encode(size - 10, width=4))
assert len(data) == 10
data.extend(framedata)
if size > len(data):
data.extend(b"\x00" * (size - len(data)))
assert len(data) == size
return data
class Tag24(Tag):
ITUNES_WORKAROUND = False
version = 4
encodings = ("latin-1", "utf-8")
def __init__(self):
super().__init__()
title = property(*Tag._friendly_text_frame("TIT2"))
artist = property(*Tag._friendly_text_frame("TPE1"))
@property
def date(self):
try:
frame = self["TDRC"]
except KeyError:
return ""
else:
return frame.text[0]
@date.setter
def date(self, value):
self._validate_friendly_date(value)
fields = self._get_friendly_date(value)
val = self._friendly_date_string(*fields)
if val:
self["TDRC"] = val
elif "TDRC" in self:
del self["TDRC"]
album = property(*Tag._friendly_text_frame("TALB"))
album_artist = property(*Tag._friendly_text_frame("TPE2"))
track = property(*Tag._friendly_track("TRCK", "track_total"))
track_total = property(*Tag._friendly_track_total("TRCK", "track"))
disc = property(*Tag._friendly_track("TPOS", "disc_total"))
disc_total = property(*Tag._friendly_track_total("TPOS", "disc"))
composer = property(*Tag._friendly_text_frame("TCOM"))
genre = property(*Tag._friendly_text_frame("TCON"))
comment = property(*Tag._friendly_comment("COMM"))
grouping = property(*Tag._friendly_text_frame("TIT1"))
# TODO: compilation
picture = property(*Tag._friendly_picture("APIC"))
sort_title = property(*Tag._friendly_text_frame("TSOT"))
sort_artist = property(*Tag._friendly_text_frame("TSOP"))
sort_album_artist = property(*Tag._friendly_text_frame("TSO2"))
sort_album = property(*Tag._friendly_text_frame("TSOA"))
sort_composer = property(*Tag._friendly_text_frame("TSOC"))
def _read_header(self, file):
self.offset = file.tell()
header = fileutil.xread(file, 10)
if header[0:5] != b"ID3\x04\x00":
raise NoTagError("ID3v2 header not found")
if header[5] & _TAG24_UNSYNCHRONISED:
self.flags.add("unsynchronised")
if header[5] & _TAG24_EXTENDED_HEADER:
self.flags.add("extended_header")
if header[5] & _TAG24_EXPERIMENTAL:
self.flags.add("experimental")
if header[5] & _TAG24_FOOTER:
self.flags.add("footer")
if header[5] & _TAG24_UNKNOWN_MASK:
warn("Unknown ID3v2.4 flags", TagWarning)
self.size = (Syncsafe.decode(header[6:10]) + 10
+ (10 if "footer" in self.flags else 0))
if "extended_header" in self.flags:
self.__read_extended_header(file)
def __read_extended_header_flag_data(self, data):
# 1-byte length + data
length = data[0]
if length & 128:
raise TagError("Invalid size of extended header field")
return (data[1:1+length], data[1+length:])
def __read_extended_header(self, file):
size = Syncsafe.decode(fileutil.xread(file, 4))
if size < 6:
warn("Unexpected size of ID3v2.4 extended header: {0}".format(size),
TagWarning)
data = fileutil.xread(file, size - 4)
numflags = data[0]
if numflags != 1:
warn("Unexpected number of ID3v2.4 extended flag bytes: {0}"
.format(numflags),
TagWarning)
flags = data[1]
data = data[1+numflags:]
if flags & 0x40:
self.flags.add("ext:update")
(dummy, data) = self.__read_extended_header_flag_data(data)
if flags & 0x20:
self.flags.add("ext:crc_present")
(self.crc32, data) = self.__read_extended_header_flag_data(data)
self.crc32 = Syncsafe.decode(self.crc32)
if flags & 0x10:
self.flags.add("ext:restrictions")
(self.restrictions, data) = self.__read_extended_header_flag_data(data)
def _read_frames(self, file, syncsafe_workaround = None):
# Older versions of iTunes stored frame sizes as straight 8bit integers,
# not syncsafe values as the spec requires.
# (The bug is known to be fixed in iTunes 8.2.)
#
# To work around such an erroneous encoding, we re-read the entire tag
# in non-syncsafe mode when we detect a frame with a bad size.
# This heuristic does not detect all badly encoded tags;
# it fails when the 8-bit frame size happens to be in syncsafe format.
#
# We could improve detection by parsing the tag both ways and see which
# interpretation produces more frames. However, the extra effort doesn't
# seem worthwhile to do by default.
#
# If you have many files with iTunes-encoded tags, you can force stagger
# to read them in non-syncsafe mode setting the ITUNES_WORKAROUND
# class attribute to True and let stagger reencode your tags. (Stagger
# will never produce a 2.4 tag with non-syncsafe frame lengths.)
if syncsafe_workaround is None:
syncsafe_workaround = self.ITUNES_WORKAROUND
origfpos = file.tell()
frames = []
while file.tell() < self.offset + self.size:
header = fileutil.xread(file, 10)
if not self._is_frame_id(header[0:4]):
break
frameid = header[0:4].decode("ASCII")
if syncsafe_workaround:
size = Int8.decode(header[4:8])
else:
try:
size = Syncsafe.decode(header[4:8])
except ValueError:
if syncsafe_workaround:
raise
warn("Invalid syncsafe frame size; switching to 8-bit mode")
file.seek(origfpos)
return self._read_frames(file, True)
bflags = Int8.decode(header[8:10])
data = fileutil.xread(file, size)
frames.append((frameid, bflags, data))
return frames
def _interpret_frame_flags(self, frameid, bflags, data):
flags = set()
# Frame format flags
if bflags & _FRAME24_FORMAT_UNKNOWN_MASK:
raise FrameError("{0}: Unknown frame encoding flags: 0x{1:X}".format(frameid, bflags))
if bflags & _FRAME24_FORMAT_GROUP:
flags.add("group")
flags.add("group={0}".format(data[0])) # hack
data = data[1:]
if bflags & _FRAME24_FORMAT_COMPRESSED:
flags.add("compressed")
if bflags & _FRAME24_FORMAT_ENCRYPTED:
raise FrameError("{0}: Can't read encrypted frames".format(frameid))
if bflags & _FRAME24_FORMAT_UNSYNCHRONISED:
flags.add("unsynchronised")
expanded_size = len(data)
if bflags & _FRAME24_FORMAT_DATA_LENGTH_INDICATOR:
flags.add("data_length_indicator")
expanded_size = Syncsafe.decode(data[0:4])
data = data[4:]
if "unsynchronised" in self.flags:
data = Unsync.decode(data)
if "compressed" in self.flags:
data = zlib.decompress(data)
# Frame status flags
if bflags & _FRAME24_STATUS_DISCARD_ON_TAG_ALTER:
flags.add("discard_on_tag_alter")
if bflags & _FRAME24_STATUS_DISCARD_ON_FILE_ALTER:
flags.add("discard_on_file_alter")
if bflags & _FRAME24_STATUS_READ_ONLY:
flags.add("read_only")
if bflags & _FRAME24_STATUS_UNKNOWN_MASK:
warn("{0}: Unexpected status flags: 0x{1:X}"
.format(frameid, bflags), FrameWarning)
return flags, data
def __encode_one_frame(self, frame):
framedata = frame._encode(encodings=self.encodings)
origlen = len(framedata)
flagval = 0
frameinfo = bytearray()
if "group" in frame.flags:
grp = 0
for flag in frame.flags:
if flag.startswith("group="):
grp = int(flag[6:])
frameinfo.append(grp)
flagval |= _FRAME24_FORMAT_GROUP
if "compressed" in frame.flags:
frame.flags.add("data_length_indicator")
framedata = zlib.compress(framedata)
flagval |= _FRAME24_FORMAT_COMPRESSED
if "unsynchronised" in frame.flags:
frame.flags.add("data_length_indicator")
framedata = Unsync.encode(framedata)
flagval |= _FRAME24_FORMAT_UNSYNCHRONISED
if "data_length_indicator" in frame.flags:
frameinfo.extend(Syncsafe.encode(origlen, width=4))
flagval |= _FRAME24_FORMAT_DATA_LENGTH_INDICATOR
if "discard_on_tag_alter" in frame.flags:
flagval |= _FRAME24_STATUS_DISCARD_ON_TAG_ALTER
if "discard_on_file_alter" in frame.flags:
flagval |= _FRAME24_STATUS_DISCARD_ON_FILE_ALTER
if "read_only" in frame.flags:
flagval |= _FRAME24_STATUS_READ_ONLY
data = bytearray()
# Frame id
if len(frame.frameid) != 4 or not self._is_frame_id(frame.frameid):
raise "{0}: Invalid frame id".format(repr(frame.frameid))
data.extend(frame.frameid.encode("ASCII"))
# Size
data.extend(Syncsafe.encode(len(frameinfo) + len(framedata), width=4))
# Flags
data.extend(Int8.encode(flagval, width=2))
assert len(data) == 10
# Format info
data.extend(frameinfo)
# Frame data
data.extend(framedata)
return data
def encode(self, size_hint=None):
if len(self) == 0: # No frames -> no tag
return b""
frames = self._prepare_frames()
if "unsynchronised" in self.flags:
for frame in frames:
frame.flags.add("unsynchronised")
framedata = bytearray().join(self.__encode_one_frame(frame)
for frame in frames)
size = self._get_size_with_padding(size_hint, len(framedata) + 10)
data = bytearray()
data.extend(b"ID3\x04\x00")
flagval = 0x00
if "unsynchronised" in self.flags:
flagval |= 0x80
data.append(flagval)
data.extend(Syncsafe.encode(size - 10, width=4))
assert len(data) == 10
data.extend(framedata)
if size > len(data):
data.extend(b"\x00" * (size - len(data)))
assert len(data) == size
return data
_tag_versions = {
2: Tag22,
3: Tag23,
4: Tag24,
}
|
staggerpkg/stagger | stagger/tags.py | frameclass | python | def frameclass(cls):
assert issubclass(cls, Frames.Frame)
# Register v2.2 versions of v2.3/v2.4 frames if encoded by inheritance.
if len(cls.__name__) == 3:
base = cls.__bases__[0]
if issubclass(base, Frames.Frame) and base._in_version(3, 4):
assert not hasattr(base, "_v2_frame")
base._v2_frame = cls
# Override frameid from base with v2.2 name
if base.frameid == cls.frameid:
cls.frameid = cls.__name__
# Add frameid.
if not hasattr(cls, "frameid"):
cls.frameid = cls.__name__
assert Tag._is_frame_id(cls.frameid)
# Supply _version attribute if missing.
if len(cls.frameid) == 3:
cls._version = 2
if len(cls.frameid) == 4 and not cls._version:
cls._version = (3, 4)
# Register cls as a known frame.
assert cls.frameid not in Tag.known_frames
Tag.known_frames[cls.frameid] = cls
return cls | Register cls as a class representing an ID3 frame.
Sets cls.frameid and cls._version if not present, and registers the
new frame in Tag's known_frames dictionary.
To be used as a decorator on the class definition:
@frameclass
class UFID(Frame):
_framespec = (NullTerminatedStringSpec("owner"), BinaryDataSpec("data")) | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/tags.py#L119-L158 | null | #
# tags.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import abc
import struct
import re
import collections
import io
import imghdr
import zlib
from abc import abstractmethod, abstractproperty
from warnings import warn
from contextlib import contextmanager
from stagger.errors import *
from stagger.conversion import *
import stagger.frames as Frames
import stagger.fileutil as fileutil
_FRAME23_FORMAT_COMPRESSED = 0x0080
_FRAME23_FORMAT_ENCRYPTED = 0x0040
_FRAME23_FORMAT_GROUP = 0x0020
_FRAME23_FORMAT_UNKNOWN_MASK = 0x001F
_FRAME23_STATUS_DISCARD_ON_TAG_ALTER = 0x8000
_FRAME23_STATUS_DISCARD_ON_FILE_ALTER = 0x4000
_FRAME23_STATUS_READ_ONLY = 0x2000
_FRAME23_STATUS_UNKNOWN_MASK = 0x1F00
_TAG24_UNSYNCHRONISED = 0x80
_TAG24_EXTENDED_HEADER = 0x40
_TAG24_EXPERIMENTAL = 0x20
_TAG24_FOOTER = 0x10
_TAG24_UNKNOWN_MASK = 0x0F
_FRAME24_FORMAT_GROUP = 0x0040
_FRAME24_FORMAT_COMPRESSED = 0x0008
_FRAME24_FORMAT_ENCRYPTED = 0x0004
_FRAME24_FORMAT_UNSYNCHRONISED = 0x0002
_FRAME24_FORMAT_DATA_LENGTH_INDICATOR = 0x0001
_FRAME24_FORMAT_UNKNOWN_MASK = 0x00B0
_FRAME24_STATUS_DISCARD_ON_TAG_ALTER = 0x4000
_FRAME24_STATUS_DISCARD_ON_FILE_ALTER = 0x2000
_FRAME24_STATUS_READ_ONLY = 0x1000
_FRAME24_STATUS_UNKNOWN_MASK = 0x8F00
def read_tag(filename):
with fileutil.opened(filename, "rb") as file:
(cls, offset, length) = detect_tag(file)
return cls.read(file, offset)
def decode_tag(data):
return read_tag(io.BytesIO(data))
def delete_tag(filename):
with fileutil.opened(filename, "rb+") as file:
try:
(cls, offset, length) = detect_tag(file)
fileutil.replace_chunk(file, offset, length, bytes())
except NoTagError:
pass
def detect_tag(filename):
"""Return type and position of ID3v2 tag in filename.
Returns (tag_class, offset, length), where tag_class
is either Tag22, Tag23, or Tag24, and (offset, length)
is the position of the tag in the file.
"""
with fileutil.opened(filename, "rb") as file:
file.seek(0)
header = file.read(10)
file.seek(0)
if len(header) < 10:
raise NoTagError("File too short")
if header[0:3] != b"ID3":
raise NoTagError("ID3v2 tag not found")
if header[3] not in _tag_versions or header[4] != 0:
raise TagError("Unknown ID3 version: 2.{0}.{1}"
.format(*header[3:5]))
cls = _tag_versions[header[3]]
offset = 0
length = Syncsafe.decode(header[6:10]) + 10
if header[3] == 4 and header[5] & _TAG24_FOOTER:
length += 10
return (cls, offset, length)
def frameclass(cls):
"""Register cls as a class representing an ID3 frame.
Sets cls.frameid and cls._version if not present, and registers the
new frame in Tag's known_frames dictionary.
To be used as a decorator on the class definition:
@frameclass
class UFID(Frame):
_framespec = (NullTerminatedStringSpec("owner"), BinaryDataSpec("data"))
"""
assert issubclass(cls, Frames.Frame)
# Register v2.2 versions of v2.3/v2.4 frames if encoded by inheritance.
if len(cls.__name__) == 3:
base = cls.__bases__[0]
if issubclass(base, Frames.Frame) and base._in_version(3, 4):
assert not hasattr(base, "_v2_frame")
base._v2_frame = cls
# Override frameid from base with v2.2 name
if base.frameid == cls.frameid:
cls.frameid = cls.__name__
# Add frameid.
if not hasattr(cls, "frameid"):
cls.frameid = cls.__name__
assert Tag._is_frame_id(cls.frameid)
# Supply _version attribute if missing.
if len(cls.frameid) == 3:
cls._version = 2
if len(cls.frameid) == 4 and not cls._version:
cls._version = (3, 4)
# Register cls as a known frame.
assert cls.frameid not in Tag.known_frames
Tag.known_frames[cls.frameid] = cls
return cls
class FrameOrder:
"""Order frames based on their position in a predefined list of patterns,
and their original position in the source tag.
A pattern may be a frame class, or a regular expression that is to be
matched against the frame id.
>>> order = FrameOrder(TIT1, "T.*", TXXX)
>>> order.key(TIT1())
(0, 1)
>>> order.key(TPE1())
(1, 1)
>>> order.key(TXXX())
(2, 1)
>>> order.key(APIC())
(3, 1)
>>> order.key(APIC(frameno=3))
(3, 0, 3)
"""
def __init__(self, *patterns):
self.re_keys = []
self.frame_keys = dict()
i = -1
for (i, pattern) in zip(range(len(patterns)), patterns):
if isinstance(pattern, str):
self.re_keys.append((pattern, i))
else:
assert issubclass(pattern, Frames.Frame)
self.frame_keys[pattern] = i
self.unknown_key = i + 1
def key(self, frame):
"Return the sort key for the given frame."
def keytuple(primary):
if frame.frameno is None:
return (primary, 1)
return (primary, 0, frame.frameno)
# Look up frame by exact match
if type(frame) in self.frame_keys:
return keytuple(self.frame_keys[type(frame)])
# Look up parent frame for v2.2 frames
if frame._in_version(2) and type(frame).__bases__[0] in self.frame_keys:
return keytuple(self.frame_keys[type(frame).__bases__[0]])
# Try each pattern
for (pattern, key) in self.re_keys:
if re.match(pattern, frame.frameid):
return keytuple(key)
return keytuple(self.unknown_key)
def __repr__(self):
order = []
order.extend((repr(pair[0]), pair[1]) for pair in self.re_keys)
order.extend((cls.__name__, self.frame_keys[cls])
for cls in self.frame_keys)
order.sort(key=lambda pair: pair[1])
return "<FrameOrder: {0}>".format(", ".join(pair[0] for pair in order))
class Tag(collections.MutableMapping, metaclass=abc.ABCMeta):
known_frames = { } # Maps known frameids to Frame class objects
frame_order = None # Initialized by stagger.id3
def __init__(self):
self.flags = set()
self._frames = dict()
self._filename = None
# Primary accessor (no magic)
def frames(self, key=None, orig_order=False):
"""Returns a list of frames in this tag.
If KEY is None, returns all frames in the tag; otherwise returns all frames
whose frameid matches KEY.
If ORIG_ORDER is True, then the frames are returned in their original order.
Otherwise the frames are sorted in canonical order according to the frame_order
field of this tag.
"""
if key is not None:
# If there are multiple frames, then they are already in original order.
key = self._normalize_key(key)
if len(self._frames[key]) == 0:
raise KeyError("Key not found: " + repr(key))
return self._frames[key]
frames = []
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
frames.append(frame)
if orig_order:
key = (lambda frame:
(0, frame.frameno)
if frame.frameno is not None
else (1,))
else:
key = self.frame_order.key
frames.sort(key=key)
return frames
# MutableMapping API
def __iter__(self):
for frameid in self._frames:
yield frameid
def __len__(self):
return sum(len(self._frames[l]) for l in self._frames)
def __eq__(self, other):
return (self.version == other.version
and self.flags == other.flags
and self._frames == other._frames)
def _normalize_key(self, key, unknown_ok=True):
"""Return the normalized version of KEY.
KEY may be a frameid (a string), or a Frame class object.
If KEY corresponds to a registered frameid, then that frameid is returned.
Otherwise, either KeyError is raised, or KEY is returned verbatim,
depending on the value of UNKNOWN_OK.
"""
if Frames.is_frame_class(key):
key = key.frameid
if isinstance(key, str):
if not self._is_frame_id(key):
raise KeyError("{0}: Invalid frame id".format(key))
if key not in self.known_frames:
if unknown_ok:
warn("{0}: Unknown frame id".format(key), UnknownFrameWarning)
else:
raise KeyError("{0}: Unknown frame id".format(key))
return key
# Mapping accessor (with extra magic, for convenience)
def __getitem__(self, key):
key = self._normalize_key(key)
fs = self.frames(key)
allow_duplicates = (key not in self.known_frames
or self.known_frames[key]._allow_duplicates)
if allow_duplicates:
return fs
if len(fs) > 1:
# Merge duplicates into one ephemeral frame, and return that.
# This may break users' expectations when they try to make changes
# to the attributes of the returned frame; however, I think
# sometimes returning a list, sometimes a single frame for the same
# frame id would be even worse.
fs = fs[0]._merge(fs)
assert len(fs) == 1
return fs[0]
def __setitem__(self, key, value):
key = self._normalize_key(key, unknown_ok=False)
if isinstance(value, self.known_frames[key]):
self._frames[key] = [value]
return
if self.known_frames[key]._allow_duplicates:
if not isinstance(value, collections.Iterable) or isinstance(value, str):
raise ValueError("{0} requires a list of frame values".format(key))
self._frames[key] = [val if isinstance(val, self.known_frames[key])
else self.known_frames[key](val)
for val in value]
else: # not _allow_duplicates
self._frames[key] = [self.known_frames[key](value)]
def __delitem__(self, key):
del self._frames[self._normalize_key(key)]
def values(self):
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
yield frame
# Friendly names API
_friendly_names = [ "title", "artist",
"date",
"album-artist", "album",
"track", "track-total",
"disc", "disc-total",
"grouping", "composer",
"genre",
"comment",
#"compilation",
"picture",
"sort-title", "sort-artist",
"sort-album-artist", "sort-album",
"sort-composer",
]
title = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
date = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
album_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
album = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
track = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
track_total = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
disc = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
disc_total = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
composer = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
genre = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
comment = abstractproperty(fget=lambda self: Non, fset=lambda self, value: None)
grouping = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
picture = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_title = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_album_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_album = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_composer = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
def __friendly_text_collect(self, frameid):
"""Collect text values from all instances of FRAMEID into a single list.
Returns an empty list if there are no instances of FRAMEID with a text attribute.
"""
try:
return self[frameid].text
except (KeyError, AttributeError):
return []
@classmethod
def _friendly_text_frame(cls, frameid):
def getter(self):
return " / ".join(self.__friendly_text_collect(frameid))
def setter(self, value):
if isinstance(value, str):
if len(value):
# For non-empty strings, split value
self[frameid] = value.split(" / ")
elif frameid in self:
# For empty strings, delete frame
del self[frameid]
else:
self[frameid] = value
return (getter, setter)
@classmethod
def _friendly_track(cls, frameid, totalattr):
def getter(self):
ts = self.__friendly_text_collect(frameid)
try:
return int(ts[0].partition("/")[0])
except (ValueError, IndexError):
return 0
def setter(self, value):
value = int(value)
total = getattr(self, totalattr)
if total > 0:
self[frameid] = "{0}/{1}".format(value, total)
elif value:
self[frameid] = str(value)
elif frameid in self:
del self[frameid]
return (getter, setter)
@classmethod
def _friendly_track_total(cls, frameid, trackattr):
def getter(self):
ts = self.__friendly_text_collect(frameid)
try:
return int(ts[0].partition("/")[2])
except (ValueError, IndexError):
return 0
def setter(self, value):
value = int(value)
track = getattr(self, trackattr)
if value:
self[frameid] = "{0}/{1}".format(track, value)
elif track:
self[frameid] = str(track)
elif frameid in self:
del self[frameid]
return (getter, setter)
__date_pattern = re.compile(r"""(?x)\s*
((?P<year>[0-9]{4}) # YYYY
(-(?P<month>[01][0-9]) # -MM
(-(?P<day>[0-3][0-9]) # -DD
)?)?)?
[ T]?
((?P<hour>[0-2][0-9]) # HH
(:(?P<min>[0-6][0-9]) # :MM
(:(?P<sec>[0-6][0-9]) # :SS
)?)?)?\s*
""")
@classmethod
def _validate_friendly_date(cls, string):
m = cls.__date_pattern.match(string)
if m is None or m.end() != len(string):
raise ValueError("date must be in 'YYYY-MM-DD HH:MM:SS' format")
@classmethod
def _get_friendly_date(cls, string):
m = cls.__date_pattern.match(string)
if m is None:
return (None, None, None, None, None, None)
res = []
for field in ("year", "month", "day", "hour", "min", "sec"):
v = m.group(field)
res.append(int(v) if v is not None else None)
return res
def _get_date(self, yearframe, dateframe, timeframe):
year = month = day = hour = minute = second = None
# Parse year.
try:
year = int(self.__friendly_text_collect(yearframe)[0])
except (IndexError, ValueError):
pass
# Parse month and date.
try:
date = self.__friendly_text_collect(dateframe)[0]
m = re.match(r"\s*(?P<month>[01][0-9])\s*-?\s*(?P<day>[0-3][0-9])?\s*$",
date)
if m is not None:
month = int(m.group("month"))
day = int(m.group("day"))
except IndexError:
pass
# Parse time.
try:
time = self.__friendly_text_collect(timeframe)[0]
m = re.match(r"\s*(?P<hour>[0-2][0-9])\s*:?\s*"
"(?P<minute>[0-5][0-9])\s*:?\s*"
"(?P<second>[0-5][0-9])?\s*$", time)
if m is not None:
hour = int(m.group("hour"))
minute = int(m.group("minute"))
s = m.group("second")
second = int(s) if s is not None else None
except IndexError:
pass
return (year, month, day, hour, minute, second)
def _friendly_date_string(self, *fields):
seps = ("", "-", "-", " ", ":", ":")
formats = ("04", "02", "02", "02", "02", "02")
res = []
for i in range(len(fields)):
if fields[i] is None:
break
res.append(seps[i])
res.append("{0:{1}}".format(fields[i], formats[i]))
return "".join(res)
@classmethod
def _friendly_picture(cls, frameid):
def getter(self):
if frameid not in self:
return ""
else:
return ", ".join("{0}:{1}:<{2} bytes of {3} data>"
.format(f._spec("type").to_str(f.type),
f.desc,
len(f.data),
imghdr.what(None, f.data[:32]))
for f in self[frameid])
def setter(self, value):
if len(value) > 0:
self[frameid] = [self.known_frames[frameid](value=value)]
elif frameid in self:
del self[frameid]
return (getter, setter)
@classmethod
def _friendly_comment(cls, frameid):
def comment_frame_index(self):
if frameid not in self:
return None
# Return comment with lang="eng", desc="", if present.
# Otherwise return the first comment with no description,
# regardless of language.
icmt = None
for i in range(len(self[frameid])):
f = self[frameid][i]
if f.desc == "":
if f.lang == "eng":
return i
if icmt is None:
icmt = i
return icmt
def getter(self):
i = comment_frame_index(self)
if i is None:
return ""
else:
return self[frameid][i].text
def setter(self, value):
assert isinstance(value, str)
i = comment_frame_index(self)
if i is not None:
del self._frames[frameid][i]
if len(value) > 0:
frame = self.known_frames[frameid](lang="eng", desc="", text=value)
if frameid not in self._frames:
self._frames[frameid] = []
self._frames[frameid].append(frame)
return (getter, setter)
# Misc
def __repr__(self):
return "<{0}: ID3v2.{1} tag{2} with {3} frames>".format(
type(self).__name__,
self.version,
("({0})".format(", ".join(self.flags))
if len(self.flags) > 0 else ""),
len(self._frames))
# Reading tags
@classmethod
def read(cls, filename, offset=0):
"""Read an ID3v2 tag from a file."""
i = 0
with fileutil.opened(filename, "rb") as file:
file.seek(offset)
tag = cls()
tag._read_header(file)
for (frameid, bflags, data) in tag._read_frames(file):
if len(data) == 0:
warn("{0}: Ignoring empty frame".format(frameid),
EmptyFrameWarning)
else:
frame = tag._decode_frame(frameid, bflags, data, i)
if frame is not None:
l = tag._frames.setdefault(frame.frameid, [])
l.append(frame)
if file.tell() > tag.offset + tag.size:
break
i += 1
try:
tag._filename = file.name
except AttributeError:
pass
return tag
@classmethod
def decode(cls, data):
return cls.read(io.BytesIO(data))
def _decode_frame(self, frameid, bflags, data, frameno=None):
try:
(flags, data) = self._interpret_frame_flags(frameid, bflags, data)
if flags is None:
flags = set()
if frameid in self.known_frames:
return self.known_frames[frameid]._decode(frameid, data,
flags,
frameno=frameno)
else:
# Unknown frame
flags.add("unknown")
warn("{0}: Unknown frame".format(frameid), UnknownFrameWarning)
if frameid.startswith('T'): # Unknown text frame
return Frames.TextFrame._decode(frameid, data, flags,
frameno=frameno)
elif frameid.startswith('W'): # Unknown URL frame
return Frames.URLFrame._decode(frameid, data, flags,
frameno=frameno)
else:
return Frames.UnknownFrame._decode(frameid, data, flags,
frameno=frameno)
except (FrameError, ValueError, EOFError) as e:
warn("{0}: Invalid frame".format(frameid), ErrorFrameWarning)
return Frames.ErrorFrame(frameid, data, exception=e, frameno=frameno)
@abstractmethod
def _read_header(self, file): pass
@abstractmethod
def _read_frames(self, file): pass
@abstractmethod
def _interpret_frame_flags(self, frameid, bflags, data): pass
# Writing tags
def write(self, filename=None):
if not filename:
filename = self._filename
if not filename:
raise TypeError("invalid file: {0}".format(filename))
with fileutil.opened(filename, "rb+") as file:
try:
(offset, length) = detect_tag(file)[1:3]
except NoTagError:
(offset, length) = (0, 0)
if offset > 0:
delete_tag(file)
(offset, length) = (0, 0)
tag_data = self.encode(size_hint=length)
fileutil.replace_chunk(file, offset, length, tag_data)
@abstractmethod
def encode(self, size_hint=None):
pass
padding_default = 128
padding_max = 1024
def _get_size_with_padding(self, size_desired, size_actual):
size = size_actual
if (size_desired is not None and size < size_desired
and (self.padding_max is None or
size_desired - size_actual <= self.padding_max)):
size = size_desired
elif self.padding_default:
size += min(self.padding_default, self.padding_max)
return size
@staticmethod
def _is_frame_id(data):
if isinstance(data, str):
try:
data = data.encode("ASCII")
except UnicodeEncodeError:
return false
# Allow a single space at end of four-character ids
# Some programs (e.g. iTunes 8.2) generate such frames when converting
# from 2.2 to 2.3/2.4 tags.
pattern = re.compile(b"^[A-Z][A-Z0-9]{2}[A-Z0-9 ]?$")
return pattern.match(data)
def _prepare_frames_hook(self):
pass
def _prepare_frames(self):
# Generate dictionary of frames
d = self._frames
# Merge duplicate frames
for frameid in self._frames.keys():
fs = self._frames[frameid]
if len(fs) > 1:
d[frameid] = fs[0]._merge(fs)
self._prepare_frames_hook()
# Convert frames
newframes = []
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
try:
newframes.append(frame._to_version(self.version))
except IncompatibleFrameError:
warn("{0}: Ignoring incompatible frame".format(frameid),
FrameWarning)
except ValueError as e:
warn("{0}: Ignoring invalid frame ({1})".format(frameid, e),
FrameWarning)
# Sort frames
newframes.sort(key=self.frame_order.key)
return newframes
class Tag22(Tag):
version = 2
encodings = ("latin-1", "utf-16")
def __init__(self):
super().__init__()
title = property(*Tag._friendly_text_frame("TT2"))
artist = property(*Tag._friendly_text_frame("TP1"))
@property
def date(self):
fields = self._get_date("TYE", "TDA", "TIM")
return self._friendly_date_string(*fields)
@date.setter
def date(self, value):
self._validate_friendly_date(value)
(year, month, day, hour, minute, second) = self._get_friendly_date(value)
for f in "TYE", "TDA", "TIM":
if f in self:
del self[f]
if year is not None:
self["TYE"] = "{0:04}".format(year)
if month is not None and day is not None:
self["TDA"] = "{0:02}{1:02}".format(month, day)
if hour is not None and minute is not None:
self["TIM"] = "{0:02}{1:02}".format(hour, minute)
album_artist = property(*Tag._friendly_text_frame("TP2"))
album = property(*Tag._friendly_text_frame("TAL"))
track = property(*Tag._friendly_track("TRK", "track_total"))
track_total = property(*Tag._friendly_track_total("TRK", "track"))
disc = property(*Tag._friendly_track("TPA", "disc_total"))
disc_total = property(*Tag._friendly_track_total("TPA", "disc"))
composer = property(*Tag._friendly_text_frame("TCM"))
genre = property(*Tag._friendly_text_frame("TCO"))
comment = property(*Tag._friendly_comment("COM"))
grouping = property(*Tag._friendly_text_frame("TT1"))
# TODO: compilation
picture = property(*Tag._friendly_picture("PIC"))
sort_title = property(*Tag._friendly_text_frame("TST"))
sort_artist = property(*Tag._friendly_text_frame("TSP"))
sort_album_artist = property(*Tag._friendly_text_frame("TS2"))
sort_album = property(*Tag._friendly_text_frame("TSA"))
sort_composer = property(*Tag._friendly_text_frame("TSC"))
def _read_header(self, file):
self.offset = file.tell()
header = fileutil.xread(file, 10)
if header[0:5] != b"ID3\x02\00":
raise NoTagError("ID3v2.2 header not found")
if header[5] & 0x80:
self.flags.add("unsynchronised")
if header[5] & 0x40: # Compression bit is ill-defined in standard
raise TagError("ID3v2.2 tag compression is not supported")
if header[5] & 0x3F:
warn("Unknown ID3v2.2 flags", TagWarning)
self.size = Syncsafe.decode(header[6:10]) + 10
def _read_frames(self, file):
if "unsynchronised" in self.flags:
ufile = UnsyncReader(file)
else:
ufile = file
while file.tell() < self.offset + self.size:
header = fileutil.xread(ufile, 6)
if not self._is_frame_id(header[0:3]):
break
frameid = header[0:3].decode("ASCII")
size = Int8.decode(header[3:6])
data = fileutil.xread(ufile, size)
yield (frameid, None, data)
def _interpret_frame_flags(self, frameid, bflags, data):
# No frame flags in v2.2
return (None, data)
def __encode_one_frame(self, frame):
framedata = frame._encode(encodings=self.encodings)
data = bytearray()
# Frame id
if len(frame.frameid) != 3 or not self._is_frame_id(frame.frameid):
raise "Invalid ID3v2.2 frame id {0}".format(repr(frame.frameid))
data.extend(frame.frameid.encode("ASCII"))
# Size
data.extend(Int8.encode(len(framedata), width=3))
assert(len(data) == 6)
data.extend(framedata)
return data
def _prepare_frames_hook(self):
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
if isinstance(frame, Frames.TextFrame):
# ID3v2.2 doesn't support multiple text values
if len(frame.text) > 1:
warn("{0}: merged multiple text strings into one value"
.format(frame.frameid), FrameWarning)
frame.text = [" / ".join(frame.text)]
def encode(self, size_hint=None):
if len(self) == 0: # No frames -> no tag
return b""
frames = self._prepare_frames()
framedata = bytearray().join(self.__encode_one_frame(frame)
for frame in frames)
if "unsynchronised" in self.flags:
framedata = Unsync.encode(framedata)
size = self._get_size_with_padding(size_hint, len(framedata) + 10)
data = bytearray()
data.extend(b"ID3\x02\x00")
data.append(0x80 if "unsynchronised" in self.flags else 0x00)
data.extend(Syncsafe.encode(size - 10, width=4))
assert len(data) == 10
data.extend(framedata)
if size > len(data):
data.extend(b"\x00" * (size - len(data)))
assert len(data) == size
return data
class Tag23(Tag):
version = 3
encodings = ("latin-1", "utf-16")
def __init__(self):
super().__init__()
title = property(*Tag._friendly_text_frame("TIT2"))
artist = property(*Tag._friendly_text_frame("TPE1"))
@property
def date(self):
fields = self._get_date("TYER", "TDAT", "TIME")
return self._friendly_date_string(*fields)
@date.setter
def date(self, value):
self._validate_friendly_date(value)
(year, month, day, hour, minute, second) = self._get_friendly_date(value)
for f in "TYER", "TDAT", "TIME":
if f in self:
del self[f]
if year is not None:
self["TYER"] = "{0:04}".format(year)
if month is not None and day is not None:
self["TDAT"] = "{0:02}{1:02}".format(month, day)
if hour is not None and minute is not None:
self["TIME"] = "{0:02}{1:02}".format(hour, minute)
album_artist = property(*Tag._friendly_text_frame("TPE2"))
album = property(*Tag._friendly_text_frame("TALB"))
track = property(*Tag._friendly_track("TRCK", "track_total"))
track_total = property(*Tag._friendly_track_total("TRCK", "track"))
disc = property(*Tag._friendly_track("TPOS", "disc_total"))
disc_total = property(*Tag._friendly_track_total("TPOS", "disc"))
composer = property(*Tag._friendly_text_frame("TCOM"))
genre = property(*Tag._friendly_text_frame("TCON"))
comment = property(*Tag._friendly_comment("COMM"))
grouping = property(*Tag._friendly_text_frame("TIT1"))
# TODO: compilation
picture = property(*Tag._friendly_picture("APIC"))
sort_title = property(*Tag._friendly_text_frame("TSOT"))
sort_artist = property(*Tag._friendly_text_frame("TSOP"))
sort_album_artist = property(*Tag._friendly_text_frame("TSO2"))
sort_album = property(*Tag._friendly_text_frame("TSOA"))
sort_composer = property(*Tag._friendly_text_frame("TSOC"))
def _read_header(self, file):
self.offset = file.tell()
header = fileutil.xread(file, 10)
if header[0:5] != b"ID3\x03\x00":
raise NoTagError("ID3v2.3 header not found")
if header[5] & 0x80:
self.flags.add("unsynchronised")
if header[5] & 0x40:
self.flags.add("extended_header")
if header[5] & 0x20:
self.flags.add("experimental")
if header[5] & 0x1F:
warn("Unknown ID3v2.3 flags", TagWarning)
self.size = Syncsafe.decode(header[6:10]) + 10
if "extended_header" in self.flags:
self.__read_extended_header(file)
def __read_extended_header(self, file):
(size, ext_flags, self.padding_size) = \
struct.unpack("!IHI", fileutil.xread(file, 10))
if size != 6 and size != 10:
warn("Unexpected size of ID3v2.3 extended header: {0}".format(size),
TagWarning)
if ext_flags & 32768:
if size < 10:
warn("Extended header is too short for a CRC field: {0} bytes instead of 10".format(size),
TagWarning)
else:
self.flags.add("ext:crc_present")
(self.crc32,) = struct.unpack("!I", fileutil.xread(file, 4))
size -= 6
if size > 6:
fileutil.xread(file, size - 6)
def _read_frames(self, file):
if "unsynchronised" in self.flags:
ufile = UnsyncReader(file)
else:
ufile = file
while file.tell() < self.offset + self.size:
header = fileutil.xread(ufile, 10)
if not self._is_frame_id(header[0:4]):
break
frameid = header[0:4].decode("ASCII")
size = Int8.decode(header[4:8])
bflags = Int8.decode(header[8:10])
data = fileutil.xread(ufile, size)
yield (frameid, bflags, data)
def _interpret_frame_flags(self, frameid, bflags, data):
flags = set()
# Frame encoding flags
if bflags & _FRAME23_FORMAT_UNKNOWN_MASK:
raise FrameError("{0}: Invalid ID3v2.3 frame encoding flags: 0x{0:X}".format(frameid, bflags))
if bflags & _FRAME23_FORMAT_COMPRESSED:
flags.add("compressed")
expanded_size = Int8.decode(data[0:4])
data = zlib.decompress(data[4:])
if bflags & _FRAME23_FORMAT_ENCRYPTED:
raise FrameError("{0}: Can't read ID3v2.3 encrypted frames".format(frameid))
if bflags & _FRAME23_FORMAT_GROUP:
flags.add("group")
flags.add("group={0}".format(data[0])) # Hack
data = data[1:]
# Frame status messages
if bflags & _FRAME23_STATUS_DISCARD_ON_TAG_ALTER:
flags.add("discard_on_tag_alter")
if bflags & _FRAME23_STATUS_DISCARD_ON_FILE_ALTER:
flags.add("discard_on_file_alter")
if bflags & _FRAME23_STATUS_READ_ONLY:
flags.add("read_only")
if bflags & _FRAME23_STATUS_UNKNOWN_MASK:
warn("{0}: Unexpected ID3v2.3 frame status flags: 0x{1:X}".format(frameid, bflags),
TagWarning)
return flags, data
def __encode_one_frame(self, frame):
framedata = frame._encode(encodings=self.encodings)
origlen = len(framedata)
flagval = 0
frameinfo = bytearray()
if "compressed" in frame.flags:
framedata = zlib.compress(framedata)
flagval |= _FRAME23_FORMAT_COMPRESSED
frameinfo.extend(Int8.encode(origlen, width=4))
if "group" in frame.flags:
grp = 0
for flag in frame.flags:
if flag.startswith("group="):
grp = int(flag[6:])
frameinfo.append(grp)
flagval |= _FRAME23_FORMAT_GROUP
if "discard_on_tag_alter" in frame.flags:
flagval |= _FRAME23_STATUS_DISCARD_ON_TAG_ALTER
if "discard_on_file_alter" in frame.flags:
flagval |= _FRAME23_STATUS_DISCARD_ON_FILE_ALTER
if "read_only" in frame.flags:
flagval |= _FRAME23_STATUS_READ_ONLY
data = bytearray()
# Frame id
if len(frame.frameid) != 4 or not self._is_frame_id(frame.frameid.encode("ASCII")):
raise ValueError("Invalid ID3v2.3 frame id {0}".format(repr(frame.frameid)))
data.extend(frame.frameid.encode("ASCII"))
# Size
data.extend(Int8.encode(len(frameinfo) + len(framedata), width=4))
# Flags
data.extend(Int8.encode(flagval, width=2))
assert len(data) == 10
# Format info
data.extend(frameinfo)
# Frame data
data.extend(framedata)
return data
def encode(self, size_hint=None):
if len(self) == 0: # No frames -> no tag
return b""
frames = self._prepare_frames()
framedata = bytearray().join(self.__encode_one_frame(frame)
for frame in frames)
if "unsynchronised" in self.flags:
framedata = Unsync.encode(framedata)
size = self._get_size_with_padding(size_hint, len(framedata) + 10)
data = bytearray()
data.extend(b"ID3\x03\x00")
flagval = 0x00
if "unsynchronised" in self.flags:
flagval |= 0x80
data.append(flagval)
data.extend(Syncsafe.encode(size - 10, width=4))
assert len(data) == 10
data.extend(framedata)
if size > len(data):
data.extend(b"\x00" * (size - len(data)))
assert len(data) == size
return data
class Tag24(Tag):
ITUNES_WORKAROUND = False
version = 4
encodings = ("latin-1", "utf-8")
def __init__(self):
super().__init__()
title = property(*Tag._friendly_text_frame("TIT2"))
artist = property(*Tag._friendly_text_frame("TPE1"))
@property
def date(self):
try:
frame = self["TDRC"]
except KeyError:
return ""
else:
return frame.text[0]
@date.setter
def date(self, value):
self._validate_friendly_date(value)
fields = self._get_friendly_date(value)
val = self._friendly_date_string(*fields)
if val:
self["TDRC"] = val
elif "TDRC" in self:
del self["TDRC"]
album = property(*Tag._friendly_text_frame("TALB"))
album_artist = property(*Tag._friendly_text_frame("TPE2"))
track = property(*Tag._friendly_track("TRCK", "track_total"))
track_total = property(*Tag._friendly_track_total("TRCK", "track"))
disc = property(*Tag._friendly_track("TPOS", "disc_total"))
disc_total = property(*Tag._friendly_track_total("TPOS", "disc"))
composer = property(*Tag._friendly_text_frame("TCOM"))
genre = property(*Tag._friendly_text_frame("TCON"))
comment = property(*Tag._friendly_comment("COMM"))
grouping = property(*Tag._friendly_text_frame("TIT1"))
# TODO: compilation
picture = property(*Tag._friendly_picture("APIC"))
sort_title = property(*Tag._friendly_text_frame("TSOT"))
sort_artist = property(*Tag._friendly_text_frame("TSOP"))
sort_album_artist = property(*Tag._friendly_text_frame("TSO2"))
sort_album = property(*Tag._friendly_text_frame("TSOA"))
sort_composer = property(*Tag._friendly_text_frame("TSOC"))
def _read_header(self, file):
self.offset = file.tell()
header = fileutil.xread(file, 10)
if header[0:5] != b"ID3\x04\x00":
raise NoTagError("ID3v2 header not found")
if header[5] & _TAG24_UNSYNCHRONISED:
self.flags.add("unsynchronised")
if header[5] & _TAG24_EXTENDED_HEADER:
self.flags.add("extended_header")
if header[5] & _TAG24_EXPERIMENTAL:
self.flags.add("experimental")
if header[5] & _TAG24_FOOTER:
self.flags.add("footer")
if header[5] & _TAG24_UNKNOWN_MASK:
warn("Unknown ID3v2.4 flags", TagWarning)
self.size = (Syncsafe.decode(header[6:10]) + 10
+ (10 if "footer" in self.flags else 0))
if "extended_header" in self.flags:
self.__read_extended_header(file)
def __read_extended_header_flag_data(self, data):
# 1-byte length + data
length = data[0]
if length & 128:
raise TagError("Invalid size of extended header field")
return (data[1:1+length], data[1+length:])
def __read_extended_header(self, file):
size = Syncsafe.decode(fileutil.xread(file, 4))
if size < 6:
warn("Unexpected size of ID3v2.4 extended header: {0}".format(size),
TagWarning)
data = fileutil.xread(file, size - 4)
numflags = data[0]
if numflags != 1:
warn("Unexpected number of ID3v2.4 extended flag bytes: {0}"
.format(numflags),
TagWarning)
flags = data[1]
data = data[1+numflags:]
if flags & 0x40:
self.flags.add("ext:update")
(dummy, data) = self.__read_extended_header_flag_data(data)
if flags & 0x20:
self.flags.add("ext:crc_present")
(self.crc32, data) = self.__read_extended_header_flag_data(data)
self.crc32 = Syncsafe.decode(self.crc32)
if flags & 0x10:
self.flags.add("ext:restrictions")
(self.restrictions, data) = self.__read_extended_header_flag_data(data)
def _read_frames(self, file, syncsafe_workaround = None):
# Older versions of iTunes stored frame sizes as straight 8bit integers,
# not syncsafe values as the spec requires.
# (The bug is known to be fixed in iTunes 8.2.)
#
# To work around such an erroneous encoding, we re-read the entire tag
# in non-syncsafe mode when we detect a frame with a bad size.
# This heuristic does not detect all badly encoded tags;
# it fails when the 8-bit frame size happens to be in syncsafe format.
#
# We could improve detection by parsing the tag both ways and see which
# interpretation produces more frames. However, the extra effort doesn't
# seem worthwhile to do by default.
#
# If you have many files with iTunes-encoded tags, you can force stagger
# to read them in non-syncsafe mode setting the ITUNES_WORKAROUND
# class attribute to True and let stagger reencode your tags. (Stagger
# will never produce a 2.4 tag with non-syncsafe frame lengths.)
if syncsafe_workaround is None:
syncsafe_workaround = self.ITUNES_WORKAROUND
origfpos = file.tell()
frames = []
while file.tell() < self.offset + self.size:
header = fileutil.xread(file, 10)
if not self._is_frame_id(header[0:4]):
break
frameid = header[0:4].decode("ASCII")
if syncsafe_workaround:
size = Int8.decode(header[4:8])
else:
try:
size = Syncsafe.decode(header[4:8])
except ValueError:
if syncsafe_workaround:
raise
warn("Invalid syncsafe frame size; switching to 8-bit mode")
file.seek(origfpos)
return self._read_frames(file, True)
bflags = Int8.decode(header[8:10])
data = fileutil.xread(file, size)
frames.append((frameid, bflags, data))
return frames
def _interpret_frame_flags(self, frameid, bflags, data):
flags = set()
# Frame format flags
if bflags & _FRAME24_FORMAT_UNKNOWN_MASK:
raise FrameError("{0}: Unknown frame encoding flags: 0x{1:X}".format(frameid, bflags))
if bflags & _FRAME24_FORMAT_GROUP:
flags.add("group")
flags.add("group={0}".format(data[0])) # hack
data = data[1:]
if bflags & _FRAME24_FORMAT_COMPRESSED:
flags.add("compressed")
if bflags & _FRAME24_FORMAT_ENCRYPTED:
raise FrameError("{0}: Can't read encrypted frames".format(frameid))
if bflags & _FRAME24_FORMAT_UNSYNCHRONISED:
flags.add("unsynchronised")
expanded_size = len(data)
if bflags & _FRAME24_FORMAT_DATA_LENGTH_INDICATOR:
flags.add("data_length_indicator")
expanded_size = Syncsafe.decode(data[0:4])
data = data[4:]
if "unsynchronised" in self.flags:
data = Unsync.decode(data)
if "compressed" in self.flags:
data = zlib.decompress(data)
# Frame status flags
if bflags & _FRAME24_STATUS_DISCARD_ON_TAG_ALTER:
flags.add("discard_on_tag_alter")
if bflags & _FRAME24_STATUS_DISCARD_ON_FILE_ALTER:
flags.add("discard_on_file_alter")
if bflags & _FRAME24_STATUS_READ_ONLY:
flags.add("read_only")
if bflags & _FRAME24_STATUS_UNKNOWN_MASK:
warn("{0}: Unexpected status flags: 0x{1:X}"
.format(frameid, bflags), FrameWarning)
return flags, data
def __encode_one_frame(self, frame):
framedata = frame._encode(encodings=self.encodings)
origlen = len(framedata)
flagval = 0
frameinfo = bytearray()
if "group" in frame.flags:
grp = 0
for flag in frame.flags:
if flag.startswith("group="):
grp = int(flag[6:])
frameinfo.append(grp)
flagval |= _FRAME24_FORMAT_GROUP
if "compressed" in frame.flags:
frame.flags.add("data_length_indicator")
framedata = zlib.compress(framedata)
flagval |= _FRAME24_FORMAT_COMPRESSED
if "unsynchronised" in frame.flags:
frame.flags.add("data_length_indicator")
framedata = Unsync.encode(framedata)
flagval |= _FRAME24_FORMAT_UNSYNCHRONISED
if "data_length_indicator" in frame.flags:
frameinfo.extend(Syncsafe.encode(origlen, width=4))
flagval |= _FRAME24_FORMAT_DATA_LENGTH_INDICATOR
if "discard_on_tag_alter" in frame.flags:
flagval |= _FRAME24_STATUS_DISCARD_ON_TAG_ALTER
if "discard_on_file_alter" in frame.flags:
flagval |= _FRAME24_STATUS_DISCARD_ON_FILE_ALTER
if "read_only" in frame.flags:
flagval |= _FRAME24_STATUS_READ_ONLY
data = bytearray()
# Frame id
if len(frame.frameid) != 4 or not self._is_frame_id(frame.frameid):
raise "{0}: Invalid frame id".format(repr(frame.frameid))
data.extend(frame.frameid.encode("ASCII"))
# Size
data.extend(Syncsafe.encode(len(frameinfo) + len(framedata), width=4))
# Flags
data.extend(Int8.encode(flagval, width=2))
assert len(data) == 10
# Format info
data.extend(frameinfo)
# Frame data
data.extend(framedata)
return data
def encode(self, size_hint=None):
if len(self) == 0: # No frames -> no tag
return b""
frames = self._prepare_frames()
if "unsynchronised" in self.flags:
for frame in frames:
frame.flags.add("unsynchronised")
framedata = bytearray().join(self.__encode_one_frame(frame)
for frame in frames)
size = self._get_size_with_padding(size_hint, len(framedata) + 10)
data = bytearray()
data.extend(b"ID3\x04\x00")
flagval = 0x00
if "unsynchronised" in self.flags:
flagval |= 0x80
data.append(flagval)
data.extend(Syncsafe.encode(size - 10, width=4))
assert len(data) == 10
data.extend(framedata)
if size > len(data):
data.extend(b"\x00" * (size - len(data)))
assert len(data) == size
return data
_tag_versions = {
2: Tag22,
3: Tag23,
4: Tag24,
}
|
staggerpkg/stagger | stagger/tags.py | FrameOrder.key | python | def key(self, frame):
"Return the sort key for the given frame."
def keytuple(primary):
if frame.frameno is None:
return (primary, 1)
return (primary, 0, frame.frameno)
# Look up frame by exact match
if type(frame) in self.frame_keys:
return keytuple(self.frame_keys[type(frame)])
# Look up parent frame for v2.2 frames
if frame._in_version(2) and type(frame).__bases__[0] in self.frame_keys:
return keytuple(self.frame_keys[type(frame).__bases__[0]])
# Try each pattern
for (pattern, key) in self.re_keys:
if re.match(pattern, frame.frameid):
return keytuple(key)
return keytuple(self.unknown_key) | Return the sort key for the given frame. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/tags.py#L191-L211 | [
"def keytuple(primary):\n if frame.frameno is None:\n return (primary, 1)\n return (primary, 0, frame.frameno)\n"
] | class FrameOrder:
"""Order frames based on their position in a predefined list of patterns,
and their original position in the source tag.
A pattern may be a frame class, or a regular expression that is to be
matched against the frame id.
>>> order = FrameOrder(TIT1, "T.*", TXXX)
>>> order.key(TIT1())
(0, 1)
>>> order.key(TPE1())
(1, 1)
>>> order.key(TXXX())
(2, 1)
>>> order.key(APIC())
(3, 1)
>>> order.key(APIC(frameno=3))
(3, 0, 3)
"""
def __init__(self, *patterns):
self.re_keys = []
self.frame_keys = dict()
i = -1
for (i, pattern) in zip(range(len(patterns)), patterns):
if isinstance(pattern, str):
self.re_keys.append((pattern, i))
else:
assert issubclass(pattern, Frames.Frame)
self.frame_keys[pattern] = i
self.unknown_key = i + 1
def __repr__(self):
order = []
order.extend((repr(pair[0]), pair[1]) for pair in self.re_keys)
order.extend((cls.__name__, self.frame_keys[cls])
for cls in self.frame_keys)
order.sort(key=lambda pair: pair[1])
return "<FrameOrder: {0}>".format(", ".join(pair[0] for pair in order))
|
staggerpkg/stagger | stagger/tags.py | Tag.frames | python | def frames(self, key=None, orig_order=False):
if key is not None:
# If there are multiple frames, then they are already in original order.
key = self._normalize_key(key)
if len(self._frames[key]) == 0:
raise KeyError("Key not found: " + repr(key))
return self._frames[key]
frames = []
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
frames.append(frame)
if orig_order:
key = (lambda frame:
(0, frame.frameno)
if frame.frameno is not None
else (1,))
else:
key = self.frame_order.key
frames.sort(key=key)
return frames | Returns a list of frames in this tag.
If KEY is None, returns all frames in the tag; otherwise returns all frames
whose frameid matches KEY.
If ORIG_ORDER is True, then the frames are returned in their original order.
Otherwise the frames are sorted in canonical order according to the frame_order
field of this tag. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/tags.py#L233-L261 | null | class Tag(collections.MutableMapping, metaclass=abc.ABCMeta):
known_frames = { } # Maps known frameids to Frame class objects
frame_order = None # Initialized by stagger.id3
def __init__(self):
self.flags = set()
self._frames = dict()
self._filename = None
# Primary accessor (no magic)
def frames(self, key=None, orig_order=False):
"""Returns a list of frames in this tag.
If KEY is None, returns all frames in the tag; otherwise returns all frames
whose frameid matches KEY.
If ORIG_ORDER is True, then the frames are returned in their original order.
Otherwise the frames are sorted in canonical order according to the frame_order
field of this tag.
"""
if key is not None:
# If there are multiple frames, then they are already in original order.
key = self._normalize_key(key)
if len(self._frames[key]) == 0:
raise KeyError("Key not found: " + repr(key))
return self._frames[key]
frames = []
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
frames.append(frame)
if orig_order:
key = (lambda frame:
(0, frame.frameno)
if frame.frameno is not None
else (1,))
else:
key = self.frame_order.key
frames.sort(key=key)
return frames
# MutableMapping API
def __iter__(self):
for frameid in self._frames:
yield frameid
def __len__(self):
return sum(len(self._frames[l]) for l in self._frames)
def __eq__(self, other):
return (self.version == other.version
and self.flags == other.flags
and self._frames == other._frames)
def _normalize_key(self, key, unknown_ok=True):
"""Return the normalized version of KEY.
KEY may be a frameid (a string), or a Frame class object.
If KEY corresponds to a registered frameid, then that frameid is returned.
Otherwise, either KeyError is raised, or KEY is returned verbatim,
depending on the value of UNKNOWN_OK.
"""
if Frames.is_frame_class(key):
key = key.frameid
if isinstance(key, str):
if not self._is_frame_id(key):
raise KeyError("{0}: Invalid frame id".format(key))
if key not in self.known_frames:
if unknown_ok:
warn("{0}: Unknown frame id".format(key), UnknownFrameWarning)
else:
raise KeyError("{0}: Unknown frame id".format(key))
return key
# Mapping accessor (with extra magic, for convenience)
def __getitem__(self, key):
key = self._normalize_key(key)
fs = self.frames(key)
allow_duplicates = (key not in self.known_frames
or self.known_frames[key]._allow_duplicates)
if allow_duplicates:
return fs
if len(fs) > 1:
# Merge duplicates into one ephemeral frame, and return that.
# This may break users' expectations when they try to make changes
# to the attributes of the returned frame; however, I think
# sometimes returning a list, sometimes a single frame for the same
# frame id would be even worse.
fs = fs[0]._merge(fs)
assert len(fs) == 1
return fs[0]
def __setitem__(self, key, value):
key = self._normalize_key(key, unknown_ok=False)
if isinstance(value, self.known_frames[key]):
self._frames[key] = [value]
return
if self.known_frames[key]._allow_duplicates:
if not isinstance(value, collections.Iterable) or isinstance(value, str):
raise ValueError("{0} requires a list of frame values".format(key))
self._frames[key] = [val if isinstance(val, self.known_frames[key])
else self.known_frames[key](val)
for val in value]
else: # not _allow_duplicates
self._frames[key] = [self.known_frames[key](value)]
def __delitem__(self, key):
del self._frames[self._normalize_key(key)]
def values(self):
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
yield frame
# Friendly names API
_friendly_names = [ "title", "artist",
"date",
"album-artist", "album",
"track", "track-total",
"disc", "disc-total",
"grouping", "composer",
"genre",
"comment",
#"compilation",
"picture",
"sort-title", "sort-artist",
"sort-album-artist", "sort-album",
"sort-composer",
]
title = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
date = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
album_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
album = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
track = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
track_total = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
disc = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
disc_total = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
composer = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
genre = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
comment = abstractproperty(fget=lambda self: Non, fset=lambda self, value: None)
grouping = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
picture = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_title = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_album_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_album = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_composer = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
def __friendly_text_collect(self, frameid):
"""Collect text values from all instances of FRAMEID into a single list.
Returns an empty list if there are no instances of FRAMEID with a text attribute.
"""
try:
return self[frameid].text
except (KeyError, AttributeError):
return []
@classmethod
def _friendly_text_frame(cls, frameid):
def getter(self):
return " / ".join(self.__friendly_text_collect(frameid))
def setter(self, value):
if isinstance(value, str):
if len(value):
# For non-empty strings, split value
self[frameid] = value.split(" / ")
elif frameid in self:
# For empty strings, delete frame
del self[frameid]
else:
self[frameid] = value
return (getter, setter)
@classmethod
def _friendly_track(cls, frameid, totalattr):
def getter(self):
ts = self.__friendly_text_collect(frameid)
try:
return int(ts[0].partition("/")[0])
except (ValueError, IndexError):
return 0
def setter(self, value):
value = int(value)
total = getattr(self, totalattr)
if total > 0:
self[frameid] = "{0}/{1}".format(value, total)
elif value:
self[frameid] = str(value)
elif frameid in self:
del self[frameid]
return (getter, setter)
@classmethod
def _friendly_track_total(cls, frameid, trackattr):
def getter(self):
ts = self.__friendly_text_collect(frameid)
try:
return int(ts[0].partition("/")[2])
except (ValueError, IndexError):
return 0
def setter(self, value):
value = int(value)
track = getattr(self, trackattr)
if value:
self[frameid] = "{0}/{1}".format(track, value)
elif track:
self[frameid] = str(track)
elif frameid in self:
del self[frameid]
return (getter, setter)
__date_pattern = re.compile(r"""(?x)\s*
((?P<year>[0-9]{4}) # YYYY
(-(?P<month>[01][0-9]) # -MM
(-(?P<day>[0-3][0-9]) # -DD
)?)?)?
[ T]?
((?P<hour>[0-2][0-9]) # HH
(:(?P<min>[0-6][0-9]) # :MM
(:(?P<sec>[0-6][0-9]) # :SS
)?)?)?\s*
""")
@classmethod
def _validate_friendly_date(cls, string):
m = cls.__date_pattern.match(string)
if m is None or m.end() != len(string):
raise ValueError("date must be in 'YYYY-MM-DD HH:MM:SS' format")
@classmethod
def _get_friendly_date(cls, string):
m = cls.__date_pattern.match(string)
if m is None:
return (None, None, None, None, None, None)
res = []
for field in ("year", "month", "day", "hour", "min", "sec"):
v = m.group(field)
res.append(int(v) if v is not None else None)
return res
def _get_date(self, yearframe, dateframe, timeframe):
year = month = day = hour = minute = second = None
# Parse year.
try:
year = int(self.__friendly_text_collect(yearframe)[0])
except (IndexError, ValueError):
pass
# Parse month and date.
try:
date = self.__friendly_text_collect(dateframe)[0]
m = re.match(r"\s*(?P<month>[01][0-9])\s*-?\s*(?P<day>[0-3][0-9])?\s*$",
date)
if m is not None:
month = int(m.group("month"))
day = int(m.group("day"))
except IndexError:
pass
# Parse time.
try:
time = self.__friendly_text_collect(timeframe)[0]
m = re.match(r"\s*(?P<hour>[0-2][0-9])\s*:?\s*"
"(?P<minute>[0-5][0-9])\s*:?\s*"
"(?P<second>[0-5][0-9])?\s*$", time)
if m is not None:
hour = int(m.group("hour"))
minute = int(m.group("minute"))
s = m.group("second")
second = int(s) if s is not None else None
except IndexError:
pass
return (year, month, day, hour, minute, second)
def _friendly_date_string(self, *fields):
seps = ("", "-", "-", " ", ":", ":")
formats = ("04", "02", "02", "02", "02", "02")
res = []
for i in range(len(fields)):
if fields[i] is None:
break
res.append(seps[i])
res.append("{0:{1}}".format(fields[i], formats[i]))
return "".join(res)
@classmethod
def _friendly_picture(cls, frameid):
def getter(self):
if frameid not in self:
return ""
else:
return ", ".join("{0}:{1}:<{2} bytes of {3} data>"
.format(f._spec("type").to_str(f.type),
f.desc,
len(f.data),
imghdr.what(None, f.data[:32]))
for f in self[frameid])
def setter(self, value):
if len(value) > 0:
self[frameid] = [self.known_frames[frameid](value=value)]
elif frameid in self:
del self[frameid]
return (getter, setter)
@classmethod
def _friendly_comment(cls, frameid):
def comment_frame_index(self):
if frameid not in self:
return None
# Return comment with lang="eng", desc="", if present.
# Otherwise return the first comment with no description,
# regardless of language.
icmt = None
for i in range(len(self[frameid])):
f = self[frameid][i]
if f.desc == "":
if f.lang == "eng":
return i
if icmt is None:
icmt = i
return icmt
def getter(self):
i = comment_frame_index(self)
if i is None:
return ""
else:
return self[frameid][i].text
def setter(self, value):
assert isinstance(value, str)
i = comment_frame_index(self)
if i is not None:
del self._frames[frameid][i]
if len(value) > 0:
frame = self.known_frames[frameid](lang="eng", desc="", text=value)
if frameid not in self._frames:
self._frames[frameid] = []
self._frames[frameid].append(frame)
return (getter, setter)
# Misc
def __repr__(self):
return "<{0}: ID3v2.{1} tag{2} with {3} frames>".format(
type(self).__name__,
self.version,
("({0})".format(", ".join(self.flags))
if len(self.flags) > 0 else ""),
len(self._frames))
# Reading tags
@classmethod
def read(cls, filename, offset=0):
"""Read an ID3v2 tag from a file."""
i = 0
with fileutil.opened(filename, "rb") as file:
file.seek(offset)
tag = cls()
tag._read_header(file)
for (frameid, bflags, data) in tag._read_frames(file):
if len(data) == 0:
warn("{0}: Ignoring empty frame".format(frameid),
EmptyFrameWarning)
else:
frame = tag._decode_frame(frameid, bflags, data, i)
if frame is not None:
l = tag._frames.setdefault(frame.frameid, [])
l.append(frame)
if file.tell() > tag.offset + tag.size:
break
i += 1
try:
tag._filename = file.name
except AttributeError:
pass
return tag
@classmethod
def decode(cls, data):
return cls.read(io.BytesIO(data))
def _decode_frame(self, frameid, bflags, data, frameno=None):
try:
(flags, data) = self._interpret_frame_flags(frameid, bflags, data)
if flags is None:
flags = set()
if frameid in self.known_frames:
return self.known_frames[frameid]._decode(frameid, data,
flags,
frameno=frameno)
else:
# Unknown frame
flags.add("unknown")
warn("{0}: Unknown frame".format(frameid), UnknownFrameWarning)
if frameid.startswith('T'): # Unknown text frame
return Frames.TextFrame._decode(frameid, data, flags,
frameno=frameno)
elif frameid.startswith('W'): # Unknown URL frame
return Frames.URLFrame._decode(frameid, data, flags,
frameno=frameno)
else:
return Frames.UnknownFrame._decode(frameid, data, flags,
frameno=frameno)
except (FrameError, ValueError, EOFError) as e:
warn("{0}: Invalid frame".format(frameid), ErrorFrameWarning)
return Frames.ErrorFrame(frameid, data, exception=e, frameno=frameno)
@abstractmethod
def _read_header(self, file): pass
@abstractmethod
def _read_frames(self, file): pass
@abstractmethod
def _interpret_frame_flags(self, frameid, bflags, data): pass
# Writing tags
def write(self, filename=None):
if not filename:
filename = self._filename
if not filename:
raise TypeError("invalid file: {0}".format(filename))
with fileutil.opened(filename, "rb+") as file:
try:
(offset, length) = detect_tag(file)[1:3]
except NoTagError:
(offset, length) = (0, 0)
if offset > 0:
delete_tag(file)
(offset, length) = (0, 0)
tag_data = self.encode(size_hint=length)
fileutil.replace_chunk(file, offset, length, tag_data)
@abstractmethod
def encode(self, size_hint=None):
pass
padding_default = 128
padding_max = 1024
def _get_size_with_padding(self, size_desired, size_actual):
size = size_actual
if (size_desired is not None and size < size_desired
and (self.padding_max is None or
size_desired - size_actual <= self.padding_max)):
size = size_desired
elif self.padding_default:
size += min(self.padding_default, self.padding_max)
return size
@staticmethod
def _is_frame_id(data):
if isinstance(data, str):
try:
data = data.encode("ASCII")
except UnicodeEncodeError:
return false
# Allow a single space at end of four-character ids
# Some programs (e.g. iTunes 8.2) generate such frames when converting
# from 2.2 to 2.3/2.4 tags.
pattern = re.compile(b"^[A-Z][A-Z0-9]{2}[A-Z0-9 ]?$")
return pattern.match(data)
def _prepare_frames_hook(self):
pass
def _prepare_frames(self):
# Generate dictionary of frames
d = self._frames
# Merge duplicate frames
for frameid in self._frames.keys():
fs = self._frames[frameid]
if len(fs) > 1:
d[frameid] = fs[0]._merge(fs)
self._prepare_frames_hook()
# Convert frames
newframes = []
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
try:
newframes.append(frame._to_version(self.version))
except IncompatibleFrameError:
warn("{0}: Ignoring incompatible frame".format(frameid),
FrameWarning)
except ValueError as e:
warn("{0}: Ignoring invalid frame ({1})".format(frameid, e),
FrameWarning)
# Sort frames
newframes.sort(key=self.frame_order.key)
return newframes
|
staggerpkg/stagger | stagger/tags.py | Tag._normalize_key | python | def _normalize_key(self, key, unknown_ok=True):
if Frames.is_frame_class(key):
key = key.frameid
if isinstance(key, str):
if not self._is_frame_id(key):
raise KeyError("{0}: Invalid frame id".format(key))
if key not in self.known_frames:
if unknown_ok:
warn("{0}: Unknown frame id".format(key), UnknownFrameWarning)
else:
raise KeyError("{0}: Unknown frame id".format(key))
return key | Return the normalized version of KEY.
KEY may be a frameid (a string), or a Frame class object.
If KEY corresponds to a registered frameid, then that frameid is returned.
Otherwise, either KeyError is raised, or KEY is returned verbatim,
depending on the value of UNKNOWN_OK. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/tags.py#L276-L293 | null | class Tag(collections.MutableMapping, metaclass=abc.ABCMeta):
known_frames = { } # Maps known frameids to Frame class objects
frame_order = None # Initialized by stagger.id3
def __init__(self):
self.flags = set()
self._frames = dict()
self._filename = None
# Primary accessor (no magic)
def frames(self, key=None, orig_order=False):
"""Returns a list of frames in this tag.
If KEY is None, returns all frames in the tag; otherwise returns all frames
whose frameid matches KEY.
If ORIG_ORDER is True, then the frames are returned in their original order.
Otherwise the frames are sorted in canonical order according to the frame_order
field of this tag.
"""
if key is not None:
# If there are multiple frames, then they are already in original order.
key = self._normalize_key(key)
if len(self._frames[key]) == 0:
raise KeyError("Key not found: " + repr(key))
return self._frames[key]
frames = []
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
frames.append(frame)
if orig_order:
key = (lambda frame:
(0, frame.frameno)
if frame.frameno is not None
else (1,))
else:
key = self.frame_order.key
frames.sort(key=key)
return frames
# MutableMapping API
def __iter__(self):
for frameid in self._frames:
yield frameid
def __len__(self):
return sum(len(self._frames[l]) for l in self._frames)
def __eq__(self, other):
return (self.version == other.version
and self.flags == other.flags
and self._frames == other._frames)
# Mapping accessor (with extra magic, for convenience)
def __getitem__(self, key):
key = self._normalize_key(key)
fs = self.frames(key)
allow_duplicates = (key not in self.known_frames
or self.known_frames[key]._allow_duplicates)
if allow_duplicates:
return fs
if len(fs) > 1:
# Merge duplicates into one ephemeral frame, and return that.
# This may break users' expectations when they try to make changes
# to the attributes of the returned frame; however, I think
# sometimes returning a list, sometimes a single frame for the same
# frame id would be even worse.
fs = fs[0]._merge(fs)
assert len(fs) == 1
return fs[0]
def __setitem__(self, key, value):
key = self._normalize_key(key, unknown_ok=False)
if isinstance(value, self.known_frames[key]):
self._frames[key] = [value]
return
if self.known_frames[key]._allow_duplicates:
if not isinstance(value, collections.Iterable) or isinstance(value, str):
raise ValueError("{0} requires a list of frame values".format(key))
self._frames[key] = [val if isinstance(val, self.known_frames[key])
else self.known_frames[key](val)
for val in value]
else: # not _allow_duplicates
self._frames[key] = [self.known_frames[key](value)]
def __delitem__(self, key):
del self._frames[self._normalize_key(key)]
def values(self):
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
yield frame
# Friendly names API
_friendly_names = [ "title", "artist",
"date",
"album-artist", "album",
"track", "track-total",
"disc", "disc-total",
"grouping", "composer",
"genre",
"comment",
#"compilation",
"picture",
"sort-title", "sort-artist",
"sort-album-artist", "sort-album",
"sort-composer",
]
title = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
date = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
album_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
album = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
track = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
track_total = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
disc = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
disc_total = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
composer = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
genre = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
comment = abstractproperty(fget=lambda self: Non, fset=lambda self, value: None)
grouping = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
picture = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_title = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_album_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_album = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_composer = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
def __friendly_text_collect(self, frameid):
"""Collect text values from all instances of FRAMEID into a single list.
Returns an empty list if there are no instances of FRAMEID with a text attribute.
"""
try:
return self[frameid].text
except (KeyError, AttributeError):
return []
@classmethod
def _friendly_text_frame(cls, frameid):
def getter(self):
return " / ".join(self.__friendly_text_collect(frameid))
def setter(self, value):
if isinstance(value, str):
if len(value):
# For non-empty strings, split value
self[frameid] = value.split(" / ")
elif frameid in self:
# For empty strings, delete frame
del self[frameid]
else:
self[frameid] = value
return (getter, setter)
@classmethod
def _friendly_track(cls, frameid, totalattr):
def getter(self):
ts = self.__friendly_text_collect(frameid)
try:
return int(ts[0].partition("/")[0])
except (ValueError, IndexError):
return 0
def setter(self, value):
value = int(value)
total = getattr(self, totalattr)
if total > 0:
self[frameid] = "{0}/{1}".format(value, total)
elif value:
self[frameid] = str(value)
elif frameid in self:
del self[frameid]
return (getter, setter)
@classmethod
def _friendly_track_total(cls, frameid, trackattr):
def getter(self):
ts = self.__friendly_text_collect(frameid)
try:
return int(ts[0].partition("/")[2])
except (ValueError, IndexError):
return 0
def setter(self, value):
value = int(value)
track = getattr(self, trackattr)
if value:
self[frameid] = "{0}/{1}".format(track, value)
elif track:
self[frameid] = str(track)
elif frameid in self:
del self[frameid]
return (getter, setter)
__date_pattern = re.compile(r"""(?x)\s*
((?P<year>[0-9]{4}) # YYYY
(-(?P<month>[01][0-9]) # -MM
(-(?P<day>[0-3][0-9]) # -DD
)?)?)?
[ T]?
((?P<hour>[0-2][0-9]) # HH
(:(?P<min>[0-6][0-9]) # :MM
(:(?P<sec>[0-6][0-9]) # :SS
)?)?)?\s*
""")
@classmethod
def _validate_friendly_date(cls, string):
m = cls.__date_pattern.match(string)
if m is None or m.end() != len(string):
raise ValueError("date must be in 'YYYY-MM-DD HH:MM:SS' format")
@classmethod
def _get_friendly_date(cls, string):
m = cls.__date_pattern.match(string)
if m is None:
return (None, None, None, None, None, None)
res = []
for field in ("year", "month", "day", "hour", "min", "sec"):
v = m.group(field)
res.append(int(v) if v is not None else None)
return res
def _get_date(self, yearframe, dateframe, timeframe):
year = month = day = hour = minute = second = None
# Parse year.
try:
year = int(self.__friendly_text_collect(yearframe)[0])
except (IndexError, ValueError):
pass
# Parse month and date.
try:
date = self.__friendly_text_collect(dateframe)[0]
m = re.match(r"\s*(?P<month>[01][0-9])\s*-?\s*(?P<day>[0-3][0-9])?\s*$",
date)
if m is not None:
month = int(m.group("month"))
day = int(m.group("day"))
except IndexError:
pass
# Parse time.
try:
time = self.__friendly_text_collect(timeframe)[0]
m = re.match(r"\s*(?P<hour>[0-2][0-9])\s*:?\s*"
"(?P<minute>[0-5][0-9])\s*:?\s*"
"(?P<second>[0-5][0-9])?\s*$", time)
if m is not None:
hour = int(m.group("hour"))
minute = int(m.group("minute"))
s = m.group("second")
second = int(s) if s is not None else None
except IndexError:
pass
return (year, month, day, hour, minute, second)
def _friendly_date_string(self, *fields):
seps = ("", "-", "-", " ", ":", ":")
formats = ("04", "02", "02", "02", "02", "02")
res = []
for i in range(len(fields)):
if fields[i] is None:
break
res.append(seps[i])
res.append("{0:{1}}".format(fields[i], formats[i]))
return "".join(res)
@classmethod
def _friendly_picture(cls, frameid):
def getter(self):
if frameid not in self:
return ""
else:
return ", ".join("{0}:{1}:<{2} bytes of {3} data>"
.format(f._spec("type").to_str(f.type),
f.desc,
len(f.data),
imghdr.what(None, f.data[:32]))
for f in self[frameid])
def setter(self, value):
if len(value) > 0:
self[frameid] = [self.known_frames[frameid](value=value)]
elif frameid in self:
del self[frameid]
return (getter, setter)
@classmethod
def _friendly_comment(cls, frameid):
def comment_frame_index(self):
if frameid not in self:
return None
# Return comment with lang="eng", desc="", if present.
# Otherwise return the first comment with no description,
# regardless of language.
icmt = None
for i in range(len(self[frameid])):
f = self[frameid][i]
if f.desc == "":
if f.lang == "eng":
return i
if icmt is None:
icmt = i
return icmt
def getter(self):
i = comment_frame_index(self)
if i is None:
return ""
else:
return self[frameid][i].text
def setter(self, value):
assert isinstance(value, str)
i = comment_frame_index(self)
if i is not None:
del self._frames[frameid][i]
if len(value) > 0:
frame = self.known_frames[frameid](lang="eng", desc="", text=value)
if frameid not in self._frames:
self._frames[frameid] = []
self._frames[frameid].append(frame)
return (getter, setter)
# Misc
def __repr__(self):
return "<{0}: ID3v2.{1} tag{2} with {3} frames>".format(
type(self).__name__,
self.version,
("({0})".format(", ".join(self.flags))
if len(self.flags) > 0 else ""),
len(self._frames))
# Reading tags
@classmethod
def read(cls, filename, offset=0):
"""Read an ID3v2 tag from a file."""
i = 0
with fileutil.opened(filename, "rb") as file:
file.seek(offset)
tag = cls()
tag._read_header(file)
for (frameid, bflags, data) in tag._read_frames(file):
if len(data) == 0:
warn("{0}: Ignoring empty frame".format(frameid),
EmptyFrameWarning)
else:
frame = tag._decode_frame(frameid, bflags, data, i)
if frame is not None:
l = tag._frames.setdefault(frame.frameid, [])
l.append(frame)
if file.tell() > tag.offset + tag.size:
break
i += 1
try:
tag._filename = file.name
except AttributeError:
pass
return tag
@classmethod
def decode(cls, data):
return cls.read(io.BytesIO(data))
def _decode_frame(self, frameid, bflags, data, frameno=None):
try:
(flags, data) = self._interpret_frame_flags(frameid, bflags, data)
if flags is None:
flags = set()
if frameid in self.known_frames:
return self.known_frames[frameid]._decode(frameid, data,
flags,
frameno=frameno)
else:
# Unknown frame
flags.add("unknown")
warn("{0}: Unknown frame".format(frameid), UnknownFrameWarning)
if frameid.startswith('T'): # Unknown text frame
return Frames.TextFrame._decode(frameid, data, flags,
frameno=frameno)
elif frameid.startswith('W'): # Unknown URL frame
return Frames.URLFrame._decode(frameid, data, flags,
frameno=frameno)
else:
return Frames.UnknownFrame._decode(frameid, data, flags,
frameno=frameno)
except (FrameError, ValueError, EOFError) as e:
warn("{0}: Invalid frame".format(frameid), ErrorFrameWarning)
return Frames.ErrorFrame(frameid, data, exception=e, frameno=frameno)
@abstractmethod
def _read_header(self, file): pass
@abstractmethod
def _read_frames(self, file): pass
@abstractmethod
def _interpret_frame_flags(self, frameid, bflags, data): pass
# Writing tags
def write(self, filename=None):
if not filename:
filename = self._filename
if not filename:
raise TypeError("invalid file: {0}".format(filename))
with fileutil.opened(filename, "rb+") as file:
try:
(offset, length) = detect_tag(file)[1:3]
except NoTagError:
(offset, length) = (0, 0)
if offset > 0:
delete_tag(file)
(offset, length) = (0, 0)
tag_data = self.encode(size_hint=length)
fileutil.replace_chunk(file, offset, length, tag_data)
@abstractmethod
def encode(self, size_hint=None):
pass
padding_default = 128
padding_max = 1024
def _get_size_with_padding(self, size_desired, size_actual):
size = size_actual
if (size_desired is not None and size < size_desired
and (self.padding_max is None or
size_desired - size_actual <= self.padding_max)):
size = size_desired
elif self.padding_default:
size += min(self.padding_default, self.padding_max)
return size
@staticmethod
def _is_frame_id(data):
if isinstance(data, str):
try:
data = data.encode("ASCII")
except UnicodeEncodeError:
return false
# Allow a single space at end of four-character ids
# Some programs (e.g. iTunes 8.2) generate such frames when converting
# from 2.2 to 2.3/2.4 tags.
pattern = re.compile(b"^[A-Z][A-Z0-9]{2}[A-Z0-9 ]?$")
return pattern.match(data)
def _prepare_frames_hook(self):
pass
def _prepare_frames(self):
# Generate dictionary of frames
d = self._frames
# Merge duplicate frames
for frameid in self._frames.keys():
fs = self._frames[frameid]
if len(fs) > 1:
d[frameid] = fs[0]._merge(fs)
self._prepare_frames_hook()
# Convert frames
newframes = []
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
try:
newframes.append(frame._to_version(self.version))
except IncompatibleFrameError:
warn("{0}: Ignoring incompatible frame".format(frameid),
FrameWarning)
except ValueError as e:
warn("{0}: Ignoring invalid frame ({1})".format(frameid, e),
FrameWarning)
# Sort frames
newframes.sort(key=self.frame_order.key)
return newframes
|
staggerpkg/stagger | stagger/tags.py | Tag.read | python | def read(cls, filename, offset=0):
i = 0
with fileutil.opened(filename, "rb") as file:
file.seek(offset)
tag = cls()
tag._read_header(file)
for (frameid, bflags, data) in tag._read_frames(file):
if len(data) == 0:
warn("{0}: Ignoring empty frame".format(frameid),
EmptyFrameWarning)
else:
frame = tag._decode_frame(frameid, bflags, data, i)
if frame is not None:
l = tag._frames.setdefault(frame.frameid, [])
l.append(frame)
if file.tell() > tag.offset + tag.size:
break
i += 1
try:
tag._filename = file.name
except AttributeError:
pass
return tag | Read an ID3v2 tag from a file. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/tags.py#L575-L598 | null | class Tag(collections.MutableMapping, metaclass=abc.ABCMeta):
known_frames = { } # Maps known frameids to Frame class objects
frame_order = None # Initialized by stagger.id3
def __init__(self):
self.flags = set()
self._frames = dict()
self._filename = None
# Primary accessor (no magic)
def frames(self, key=None, orig_order=False):
"""Returns a list of frames in this tag.
If KEY is None, returns all frames in the tag; otherwise returns all frames
whose frameid matches KEY.
If ORIG_ORDER is True, then the frames are returned in their original order.
Otherwise the frames are sorted in canonical order according to the frame_order
field of this tag.
"""
if key is not None:
# If there are multiple frames, then they are already in original order.
key = self._normalize_key(key)
if len(self._frames[key]) == 0:
raise KeyError("Key not found: " + repr(key))
return self._frames[key]
frames = []
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
frames.append(frame)
if orig_order:
key = (lambda frame:
(0, frame.frameno)
if frame.frameno is not None
else (1,))
else:
key = self.frame_order.key
frames.sort(key=key)
return frames
# MutableMapping API
def __iter__(self):
for frameid in self._frames:
yield frameid
def __len__(self):
return sum(len(self._frames[l]) for l in self._frames)
def __eq__(self, other):
return (self.version == other.version
and self.flags == other.flags
and self._frames == other._frames)
def _normalize_key(self, key, unknown_ok=True):
"""Return the normalized version of KEY.
KEY may be a frameid (a string), or a Frame class object.
If KEY corresponds to a registered frameid, then that frameid is returned.
Otherwise, either KeyError is raised, or KEY is returned verbatim,
depending on the value of UNKNOWN_OK.
"""
if Frames.is_frame_class(key):
key = key.frameid
if isinstance(key, str):
if not self._is_frame_id(key):
raise KeyError("{0}: Invalid frame id".format(key))
if key not in self.known_frames:
if unknown_ok:
warn("{0}: Unknown frame id".format(key), UnknownFrameWarning)
else:
raise KeyError("{0}: Unknown frame id".format(key))
return key
# Mapping accessor (with extra magic, for convenience)
def __getitem__(self, key):
key = self._normalize_key(key)
fs = self.frames(key)
allow_duplicates = (key not in self.known_frames
or self.known_frames[key]._allow_duplicates)
if allow_duplicates:
return fs
if len(fs) > 1:
# Merge duplicates into one ephemeral frame, and return that.
# This may break users' expectations when they try to make changes
# to the attributes of the returned frame; however, I think
# sometimes returning a list, sometimes a single frame for the same
# frame id would be even worse.
fs = fs[0]._merge(fs)
assert len(fs) == 1
return fs[0]
def __setitem__(self, key, value):
key = self._normalize_key(key, unknown_ok=False)
if isinstance(value, self.known_frames[key]):
self._frames[key] = [value]
return
if self.known_frames[key]._allow_duplicates:
if not isinstance(value, collections.Iterable) or isinstance(value, str):
raise ValueError("{0} requires a list of frame values".format(key))
self._frames[key] = [val if isinstance(val, self.known_frames[key])
else self.known_frames[key](val)
for val in value]
else: # not _allow_duplicates
self._frames[key] = [self.known_frames[key](value)]
def __delitem__(self, key):
del self._frames[self._normalize_key(key)]
def values(self):
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
yield frame
# Friendly names API
_friendly_names = [ "title", "artist",
"date",
"album-artist", "album",
"track", "track-total",
"disc", "disc-total",
"grouping", "composer",
"genre",
"comment",
#"compilation",
"picture",
"sort-title", "sort-artist",
"sort-album-artist", "sort-album",
"sort-composer",
]
title = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
date = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
album_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
album = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
track = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
track_total = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
disc = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
disc_total = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
composer = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
genre = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
comment = abstractproperty(fget=lambda self: Non, fset=lambda self, value: None)
grouping = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
picture = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_title = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_album_artist = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_album = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
sort_composer = abstractproperty(fget=lambda self: None, fset=lambda self, value: None)
def __friendly_text_collect(self, frameid):
"""Collect text values from all instances of FRAMEID into a single list.
Returns an empty list if there are no instances of FRAMEID with a text attribute.
"""
try:
return self[frameid].text
except (KeyError, AttributeError):
return []
@classmethod
def _friendly_text_frame(cls, frameid):
def getter(self):
return " / ".join(self.__friendly_text_collect(frameid))
def setter(self, value):
if isinstance(value, str):
if len(value):
# For non-empty strings, split value
self[frameid] = value.split(" / ")
elif frameid in self:
# For empty strings, delete frame
del self[frameid]
else:
self[frameid] = value
return (getter, setter)
@classmethod
def _friendly_track(cls, frameid, totalattr):
def getter(self):
ts = self.__friendly_text_collect(frameid)
try:
return int(ts[0].partition("/")[0])
except (ValueError, IndexError):
return 0
def setter(self, value):
value = int(value)
total = getattr(self, totalattr)
if total > 0:
self[frameid] = "{0}/{1}".format(value, total)
elif value:
self[frameid] = str(value)
elif frameid in self:
del self[frameid]
return (getter, setter)
@classmethod
def _friendly_track_total(cls, frameid, trackattr):
def getter(self):
ts = self.__friendly_text_collect(frameid)
try:
return int(ts[0].partition("/")[2])
except (ValueError, IndexError):
return 0
def setter(self, value):
value = int(value)
track = getattr(self, trackattr)
if value:
self[frameid] = "{0}/{1}".format(track, value)
elif track:
self[frameid] = str(track)
elif frameid in self:
del self[frameid]
return (getter, setter)
__date_pattern = re.compile(r"""(?x)\s*
((?P<year>[0-9]{4}) # YYYY
(-(?P<month>[01][0-9]) # -MM
(-(?P<day>[0-3][0-9]) # -DD
)?)?)?
[ T]?
((?P<hour>[0-2][0-9]) # HH
(:(?P<min>[0-6][0-9]) # :MM
(:(?P<sec>[0-6][0-9]) # :SS
)?)?)?\s*
""")
@classmethod
def _validate_friendly_date(cls, string):
m = cls.__date_pattern.match(string)
if m is None or m.end() != len(string):
raise ValueError("date must be in 'YYYY-MM-DD HH:MM:SS' format")
@classmethod
def _get_friendly_date(cls, string):
m = cls.__date_pattern.match(string)
if m is None:
return (None, None, None, None, None, None)
res = []
for field in ("year", "month", "day", "hour", "min", "sec"):
v = m.group(field)
res.append(int(v) if v is not None else None)
return res
def _get_date(self, yearframe, dateframe, timeframe):
year = month = day = hour = minute = second = None
# Parse year.
try:
year = int(self.__friendly_text_collect(yearframe)[0])
except (IndexError, ValueError):
pass
# Parse month and date.
try:
date = self.__friendly_text_collect(dateframe)[0]
m = re.match(r"\s*(?P<month>[01][0-9])\s*-?\s*(?P<day>[0-3][0-9])?\s*$",
date)
if m is not None:
month = int(m.group("month"))
day = int(m.group("day"))
except IndexError:
pass
# Parse time.
try:
time = self.__friendly_text_collect(timeframe)[0]
m = re.match(r"\s*(?P<hour>[0-2][0-9])\s*:?\s*"
"(?P<minute>[0-5][0-9])\s*:?\s*"
"(?P<second>[0-5][0-9])?\s*$", time)
if m is not None:
hour = int(m.group("hour"))
minute = int(m.group("minute"))
s = m.group("second")
second = int(s) if s is not None else None
except IndexError:
pass
return (year, month, day, hour, minute, second)
def _friendly_date_string(self, *fields):
seps = ("", "-", "-", " ", ":", ":")
formats = ("04", "02", "02", "02", "02", "02")
res = []
for i in range(len(fields)):
if fields[i] is None:
break
res.append(seps[i])
res.append("{0:{1}}".format(fields[i], formats[i]))
return "".join(res)
@classmethod
def _friendly_picture(cls, frameid):
def getter(self):
if frameid not in self:
return ""
else:
return ", ".join("{0}:{1}:<{2} bytes of {3} data>"
.format(f._spec("type").to_str(f.type),
f.desc,
len(f.data),
imghdr.what(None, f.data[:32]))
for f in self[frameid])
def setter(self, value):
if len(value) > 0:
self[frameid] = [self.known_frames[frameid](value=value)]
elif frameid in self:
del self[frameid]
return (getter, setter)
@classmethod
def _friendly_comment(cls, frameid):
def comment_frame_index(self):
if frameid not in self:
return None
# Return comment with lang="eng", desc="", if present.
# Otherwise return the first comment with no description,
# regardless of language.
icmt = None
for i in range(len(self[frameid])):
f = self[frameid][i]
if f.desc == "":
if f.lang == "eng":
return i
if icmt is None:
icmt = i
return icmt
def getter(self):
i = comment_frame_index(self)
if i is None:
return ""
else:
return self[frameid][i].text
def setter(self, value):
assert isinstance(value, str)
i = comment_frame_index(self)
if i is not None:
del self._frames[frameid][i]
if len(value) > 0:
frame = self.known_frames[frameid](lang="eng", desc="", text=value)
if frameid not in self._frames:
self._frames[frameid] = []
self._frames[frameid].append(frame)
return (getter, setter)
# Misc
def __repr__(self):
return "<{0}: ID3v2.{1} tag{2} with {3} frames>".format(
type(self).__name__,
self.version,
("({0})".format(", ".join(self.flags))
if len(self.flags) > 0 else ""),
len(self._frames))
# Reading tags
@classmethod
@classmethod
def decode(cls, data):
return cls.read(io.BytesIO(data))
def _decode_frame(self, frameid, bflags, data, frameno=None):
try:
(flags, data) = self._interpret_frame_flags(frameid, bflags, data)
if flags is None:
flags = set()
if frameid in self.known_frames:
return self.known_frames[frameid]._decode(frameid, data,
flags,
frameno=frameno)
else:
# Unknown frame
flags.add("unknown")
warn("{0}: Unknown frame".format(frameid), UnknownFrameWarning)
if frameid.startswith('T'): # Unknown text frame
return Frames.TextFrame._decode(frameid, data, flags,
frameno=frameno)
elif frameid.startswith('W'): # Unknown URL frame
return Frames.URLFrame._decode(frameid, data, flags,
frameno=frameno)
else:
return Frames.UnknownFrame._decode(frameid, data, flags,
frameno=frameno)
except (FrameError, ValueError, EOFError) as e:
warn("{0}: Invalid frame".format(frameid), ErrorFrameWarning)
return Frames.ErrorFrame(frameid, data, exception=e, frameno=frameno)
@abstractmethod
def _read_header(self, file): pass
@abstractmethod
def _read_frames(self, file): pass
@abstractmethod
def _interpret_frame_flags(self, frameid, bflags, data): pass
# Writing tags
def write(self, filename=None):
if not filename:
filename = self._filename
if not filename:
raise TypeError("invalid file: {0}".format(filename))
with fileutil.opened(filename, "rb+") as file:
try:
(offset, length) = detect_tag(file)[1:3]
except NoTagError:
(offset, length) = (0, 0)
if offset > 0:
delete_tag(file)
(offset, length) = (0, 0)
tag_data = self.encode(size_hint=length)
fileutil.replace_chunk(file, offset, length, tag_data)
@abstractmethod
def encode(self, size_hint=None):
pass
padding_default = 128
padding_max = 1024
def _get_size_with_padding(self, size_desired, size_actual):
size = size_actual
if (size_desired is not None and size < size_desired
and (self.padding_max is None or
size_desired - size_actual <= self.padding_max)):
size = size_desired
elif self.padding_default:
size += min(self.padding_default, self.padding_max)
return size
@staticmethod
def _is_frame_id(data):
if isinstance(data, str):
try:
data = data.encode("ASCII")
except UnicodeEncodeError:
return false
# Allow a single space at end of four-character ids
# Some programs (e.g. iTunes 8.2) generate such frames when converting
# from 2.2 to 2.3/2.4 tags.
pattern = re.compile(b"^[A-Z][A-Z0-9]{2}[A-Z0-9 ]?$")
return pattern.match(data)
def _prepare_frames_hook(self):
pass
def _prepare_frames(self):
# Generate dictionary of frames
d = self._frames
# Merge duplicate frames
for frameid in self._frames.keys():
fs = self._frames[frameid]
if len(fs) > 1:
d[frameid] = fs[0]._merge(fs)
self._prepare_frames_hook()
# Convert frames
newframes = []
for frameid in self._frames.keys():
for frame in self._frames[frameid]:
try:
newframes.append(frame._to_version(self.version))
except IncompatibleFrameError:
warn("{0}: Ignoring incompatible frame".format(frameid),
FrameWarning)
except ValueError as e:
warn("{0}: Ignoring invalid frame ({1})".format(frameid, e),
FrameWarning)
# Sort frames
newframes.sort(key=self.frame_order.key)
return newframes
|
staggerpkg/stagger | stagger/id3v1.py | Tag1.read | python | def read(cls, filename, offset=None, encoding="iso-8859-1"):
with fileutil.opened(filename, "rb") as file:
if offset is None:
file.seek(-128, 2)
else:
file.seek(offset)
data = file.read(128)
return cls.decode(data, encoding=encoding) | Read an ID3v1 tag from a file. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/id3v1.py#L120-L128 | [
"def decode(cls, data, encoding=\"iso-8859-1\"):\n def decode_field(data):\n try:\n data = data[:data.index(b\"\\x00\")]\n except ValueError:\n pass\n return data.decode(encoding).strip(string.whitespace)\n if data[:3] != b\"TAG\" or len(data) < 128:\n raise N... | class Tag1():
@property
def genre(self):
if self._genre < len(genres):
return "{0} ({1})".format(self._genre, genres[self._genre])
else:
return "{0} (unknown)".format(self._genre)
@genre.setter
def genre(self, value):
if value is None:
self._genre = 0
return
if type(value) == int:
if value in range(256):
self._genre = value
else:
raise ValueError("Genre must be between 0 and 255")
return
if type(value) == str:
if value.lower() == "unknown":
self._genre = 255
return
try:
self._genre = genres.index(value)
return
except ValueError:
raise ValueError("Unknown genre")
raise TypeError("Invalid genre")
def __str__(self):
return "\n".join(["title={0}".format(repr(self.title)),
"artist={0}".format(repr(self.artist)),
"album={0}".format(repr(self.album)),
"year={0}".format(repr(self.year)),
"comment={0}".format(repr(self.comment)),
"genre={0}".format(self.genre)])
def __repr__(self):
return "Tag1({0})".format(", ".join(["title={0}".format(repr(self.title)),
"artist={0}".format(repr(self.artist)),
"album={0}".format(repr(self.album)),
"year={0}".format(repr(self.year)),
"comment={0}".format(repr(self.comment)),
"genre={0}".format(self._genre)]))
def __eq__(self, other):
return (isinstance(other, Tag1)
and self.title == other.title
and self.artist == other.artist
and self.album == other.album
and self.year == other.year
and self.comment == other.comment
and self._genre == other._genre)
@classmethod
def decode(cls, data, encoding="iso-8859-1"):
def decode_field(data):
try:
data = data[:data.index(b"\x00")]
except ValueError:
pass
return data.decode(encoding).strip(string.whitespace)
if data[:3] != b"TAG" or len(data) < 128:
raise NoTagError("ID3v1 tag not found")
tag = Tag1()
tag.title = decode_field(data[3:33])
tag.artist = decode_field(data[33:63])
tag.album = decode_field(data[63:93])
tag.year = decode_field(data[93:97])
if data[125] == 0:
tag.comment = decode_field(data[97:125])
tag.track = data[126]
else:
tag.comment = decode_field(data[97:127])
tag.track = 0
tag._genre = data[127]
return tag
@classmethod
@classmethod
def delete(cls, filename, offset=None):
"""Delete ID3v1 tag from a file (if present)."""
with fileutil.opened(filename, "rb+") as file:
if offset is None:
file.seek(-128, 2)
else:
file.seek(offset)
offset = file.tell()
data = file.read(128)
if data[:3] == b"TAG":
fileutil.replace_chunk(file, offset, 128, b"", in_place=True)
def encode(self, encoding="iso-8859-1", errors="strict"):
def encode_field(field, width):
data = field.encode(encoding, errors)
if len(data) < width:
data = data + b"\x00" * (width - len(data))
return data[:width]
data = bytearray(b"TAG")
data.extend(encode_field(self.title, 30))
data.extend(encode_field(self.artist, 30))
data.extend(encode_field(self.album, 30))
data.extend(encode_field(self.year, 4))
if self.track:
data.extend(encode_field(self.comment, 28))
data.append(0)
data.append(self.track)
else:
data.extend(encode_field(self.comment, 30))
data.append(self._genre)
assert len(data) == 128
return data
def write(self, filename, encoding="iso-8859-1", errors="strict"):
with fileutil.opened(filename, "rb+") as file:
file.seek(-128, 2)
data = file.read(128)
if data[:3] == b"TAG":
file.seek(-128, 2)
else:
file.seek(0, 2)
file.write(self.encode(encoding, errors))
|
staggerpkg/stagger | stagger/id3v1.py | Tag1.delete | python | def delete(cls, filename, offset=None):
with fileutil.opened(filename, "rb+") as file:
if offset is None:
file.seek(-128, 2)
else:
file.seek(offset)
offset = file.tell()
data = file.read(128)
if data[:3] == b"TAG":
fileutil.replace_chunk(file, offset, 128, b"", in_place=True) | Delete ID3v1 tag from a file (if present). | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/id3v1.py#L131-L141 | null | class Tag1():
@property
def genre(self):
if self._genre < len(genres):
return "{0} ({1})".format(self._genre, genres[self._genre])
else:
return "{0} (unknown)".format(self._genre)
@genre.setter
def genre(self, value):
if value is None:
self._genre = 0
return
if type(value) == int:
if value in range(256):
self._genre = value
else:
raise ValueError("Genre must be between 0 and 255")
return
if type(value) == str:
if value.lower() == "unknown":
self._genre = 255
return
try:
self._genre = genres.index(value)
return
except ValueError:
raise ValueError("Unknown genre")
raise TypeError("Invalid genre")
def __str__(self):
return "\n".join(["title={0}".format(repr(self.title)),
"artist={0}".format(repr(self.artist)),
"album={0}".format(repr(self.album)),
"year={0}".format(repr(self.year)),
"comment={0}".format(repr(self.comment)),
"genre={0}".format(self.genre)])
def __repr__(self):
return "Tag1({0})".format(", ".join(["title={0}".format(repr(self.title)),
"artist={0}".format(repr(self.artist)),
"album={0}".format(repr(self.album)),
"year={0}".format(repr(self.year)),
"comment={0}".format(repr(self.comment)),
"genre={0}".format(self._genre)]))
def __eq__(self, other):
return (isinstance(other, Tag1)
and self.title == other.title
and self.artist == other.artist
and self.album == other.album
and self.year == other.year
and self.comment == other.comment
and self._genre == other._genre)
@classmethod
def decode(cls, data, encoding="iso-8859-1"):
def decode_field(data):
try:
data = data[:data.index(b"\x00")]
except ValueError:
pass
return data.decode(encoding).strip(string.whitespace)
if data[:3] != b"TAG" or len(data) < 128:
raise NoTagError("ID3v1 tag not found")
tag = Tag1()
tag.title = decode_field(data[3:33])
tag.artist = decode_field(data[33:63])
tag.album = decode_field(data[63:93])
tag.year = decode_field(data[93:97])
if data[125] == 0:
tag.comment = decode_field(data[97:125])
tag.track = data[126]
else:
tag.comment = decode_field(data[97:127])
tag.track = 0
tag._genre = data[127]
return tag
@classmethod
def read(cls, filename, offset=None, encoding="iso-8859-1"):
"""Read an ID3v1 tag from a file."""
with fileutil.opened(filename, "rb") as file:
if offset is None:
file.seek(-128, 2)
else:
file.seek(offset)
data = file.read(128)
return cls.decode(data, encoding=encoding)
@classmethod
def encode(self, encoding="iso-8859-1", errors="strict"):
def encode_field(field, width):
data = field.encode(encoding, errors)
if len(data) < width:
data = data + b"\x00" * (width - len(data))
return data[:width]
data = bytearray(b"TAG")
data.extend(encode_field(self.title, 30))
data.extend(encode_field(self.artist, 30))
data.extend(encode_field(self.album, 30))
data.extend(encode_field(self.year, 4))
if self.track:
data.extend(encode_field(self.comment, 28))
data.append(0)
data.append(self.track)
else:
data.extend(encode_field(self.comment, 30))
data.append(self._genre)
assert len(data) == 128
return data
def write(self, filename, encoding="iso-8859-1", errors="strict"):
with fileutil.opened(filename, "rb+") as file:
file.seek(-128, 2)
data = file.read(128)
if data[:3] == b"TAG":
file.seek(-128, 2)
else:
file.seek(0, 2)
file.write(self.encode(encoding, errors))
|
staggerpkg/stagger | stagger/fileutil.py | xread | python | def xread(file, length):
"Read exactly length bytes from file; raise EOFError if file ends sooner."
data = file.read(length)
if len(data) != length:
raise EOFError
return data | Read exactly length bytes from file; raise EOFError if file ends sooner. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/fileutil.py#L43-L48 | null | #
# fileutil.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""File manipulation utilities."""
import io
import os.path
import shutil
import tempfile
import signal
from contextlib import contextmanager
@contextmanager
def opened(filename, mode):
"Open filename, or do nothing if filename is already an open file object"
if isinstance(filename, str):
file = open(filename, mode)
try:
yield file
finally:
if not file.closed:
file.close()
else:
yield filename
@contextmanager
def suppress_interrupt():
"""Suppress KeyboardInterrupt exceptions while the context is active.
The suppressed interrupt (if any) is raised when the context is exited.
"""
interrupted = False
def sigint_handler(signum, frame):
nonlocal interrupted
interrupted = True
s = signal.signal(signal.SIGINT, sigint_handler)
try:
yield None
finally:
signal.signal(signal.SIGINT, s)
if interrupted:
raise KeyboardInterrupt()
def replace_chunk(filename, offset, length, chunk, in_place=True, max_mem=5):
"""Replace length bytes of data with chunk, starting at offset.
Any KeyboardInterrupts arriving while replace_chunk is runnning
are deferred until the operation is complete.
If in_place is true, the operation works directly on the original
file; this is fast and works on files that are already open, but
an error or interrupt may lead to corrupt file contents.
If in_place is false, the function prepares a copy first, then
renames it back over the original file. This method is slower,
but it prevents corruption on systems with atomic renames (UNIX),
and reduces the window of vulnerability elsewhere (Windows).
If there is no need to move data that is not being replaced, then we use
the direct method irrespective of in_place. (In this case an interrupt
may only corrupt the chunk being replaced.)
"""
with suppress_interrupt():
_replace_chunk(filename, offset, length, chunk, in_place, max_mem)
def _replace_chunk(filename, offset, length, chunk, in_place, max_mem):
assert isinstance(filename, str) or in_place
with opened(filename, "rb+") as file:
# If the sizes match, we can simply overwrite the original data.
if length == len(chunk):
file.seek(offset)
file.write(chunk)
return
oldsize = file.seek(0, 2)
newsize = oldsize - length + len(chunk)
# If the orig chunk is exactly at the end of the file, we can
# simply truncate the file and then append the new chunk.
if offset + length == oldsize:
file.seek(offset)
file.truncate()
file.write(chunk)
return
if in_place:
_replace_chunk_in_place(file, offset, length, chunk, oldsize, newsize)
else: # not in_place
temp = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename),
prefix="stagger-",
suffix=".tmp",
delete=False)
try:
file.seek(0)
_copy_chunk(file, temp, offset)
temp.write(chunk)
file.seek(offset + length)
_copy_chunk(file, temp, oldsize - offset - length)
finally:
temp.close()
file.close()
shutil.copymode(filename, temp.name)
shutil.move(temp.name, filename)
return
def _copy_chunk(src, dst, length):
"Copy length bytes from file src to file dst."
BUFSIZE = 128 * 1024
while length > 0:
l = min(BUFSIZE, length)
buf = src.read(l)
assert len(buf) == l
dst.write(buf)
length -= l
def _replace_chunk_in_place(file, offset, length, chunk, oldsize, newsize):
if newsize > oldsize:
file.seek(0, 2)
file.write(b"\x00" * (len(chunk) - length))
file.seek(0)
try:
import mmap
m = mmap.mmap(file.fileno(), max(oldsize, newsize))
try:
m.move(offset + len(chunk),
offset + length,
oldsize - offset - length)
m[offset:offset + len(chunk)] = chunk
finally:
m.close()
except (ImportError, EnvironmentError, ValueError):
# mmap didn't work. Let's load the tail into a tempfile
# and construct the result from there.
file.seek(offset + length)
temp = tempfile.SpooledTemporaryFile(
max_size=max_mem * (1<<20),
prefix="stagger-",
suffix=".tmp")
try:
_copy_chunk(file, temp, oldsize - offset - length)
file.seek(offset)
file.truncate()
file.write(chunk)
temp.seek(0)
_copy_chunk(temp, file, oldsize - offset - length)
finally:
temp.close()
return
else:
# mmap did work, we just need to truncate any leftover parts
# at the end
file.truncate(newsize)
return
|
staggerpkg/stagger | stagger/fileutil.py | opened | python | def opened(filename, mode):
"Open filename, or do nothing if filename is already an open file object"
if isinstance(filename, str):
file = open(filename, mode)
try:
yield file
finally:
if not file.closed:
file.close()
else:
yield filename | Open filename, or do nothing if filename is already an open file object | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/fileutil.py#L51-L61 | null | #
# fileutil.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""File manipulation utilities."""
import io
import os.path
import shutil
import tempfile
import signal
from contextlib import contextmanager
def xread(file, length):
"Read exactly length bytes from file; raise EOFError if file ends sooner."
data = file.read(length)
if len(data) != length:
raise EOFError
return data
@contextmanager
@contextmanager
def suppress_interrupt():
"""Suppress KeyboardInterrupt exceptions while the context is active.
The suppressed interrupt (if any) is raised when the context is exited.
"""
interrupted = False
def sigint_handler(signum, frame):
nonlocal interrupted
interrupted = True
s = signal.signal(signal.SIGINT, sigint_handler)
try:
yield None
finally:
signal.signal(signal.SIGINT, s)
if interrupted:
raise KeyboardInterrupt()
def replace_chunk(filename, offset, length, chunk, in_place=True, max_mem=5):
"""Replace length bytes of data with chunk, starting at offset.
Any KeyboardInterrupts arriving while replace_chunk is runnning
are deferred until the operation is complete.
If in_place is true, the operation works directly on the original
file; this is fast and works on files that are already open, but
an error or interrupt may lead to corrupt file contents.
If in_place is false, the function prepares a copy first, then
renames it back over the original file. This method is slower,
but it prevents corruption on systems with atomic renames (UNIX),
and reduces the window of vulnerability elsewhere (Windows).
If there is no need to move data that is not being replaced, then we use
the direct method irrespective of in_place. (In this case an interrupt
may only corrupt the chunk being replaced.)
"""
with suppress_interrupt():
_replace_chunk(filename, offset, length, chunk, in_place, max_mem)
def _replace_chunk(filename, offset, length, chunk, in_place, max_mem):
assert isinstance(filename, str) or in_place
with opened(filename, "rb+") as file:
# If the sizes match, we can simply overwrite the original data.
if length == len(chunk):
file.seek(offset)
file.write(chunk)
return
oldsize = file.seek(0, 2)
newsize = oldsize - length + len(chunk)
# If the orig chunk is exactly at the end of the file, we can
# simply truncate the file and then append the new chunk.
if offset + length == oldsize:
file.seek(offset)
file.truncate()
file.write(chunk)
return
if in_place:
_replace_chunk_in_place(file, offset, length, chunk, oldsize, newsize)
else: # not in_place
temp = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename),
prefix="stagger-",
suffix=".tmp",
delete=False)
try:
file.seek(0)
_copy_chunk(file, temp, offset)
temp.write(chunk)
file.seek(offset + length)
_copy_chunk(file, temp, oldsize - offset - length)
finally:
temp.close()
file.close()
shutil.copymode(filename, temp.name)
shutil.move(temp.name, filename)
return
def _copy_chunk(src, dst, length):
"Copy length bytes from file src to file dst."
BUFSIZE = 128 * 1024
while length > 0:
l = min(BUFSIZE, length)
buf = src.read(l)
assert len(buf) == l
dst.write(buf)
length -= l
def _replace_chunk_in_place(file, offset, length, chunk, oldsize, newsize):
if newsize > oldsize:
file.seek(0, 2)
file.write(b"\x00" * (len(chunk) - length))
file.seek(0)
try:
import mmap
m = mmap.mmap(file.fileno(), max(oldsize, newsize))
try:
m.move(offset + len(chunk),
offset + length,
oldsize - offset - length)
m[offset:offset + len(chunk)] = chunk
finally:
m.close()
except (ImportError, EnvironmentError, ValueError):
# mmap didn't work. Let's load the tail into a tempfile
# and construct the result from there.
file.seek(offset + length)
temp = tempfile.SpooledTemporaryFile(
max_size=max_mem * (1<<20),
prefix="stagger-",
suffix=".tmp")
try:
_copy_chunk(file, temp, oldsize - offset - length)
file.seek(offset)
file.truncate()
file.write(chunk)
temp.seek(0)
_copy_chunk(temp, file, oldsize - offset - length)
finally:
temp.close()
return
else:
# mmap did work, we just need to truncate any leftover parts
# at the end
file.truncate(newsize)
return
|
staggerpkg/stagger | stagger/fileutil.py | suppress_interrupt | python | def suppress_interrupt():
interrupted = False
def sigint_handler(signum, frame):
nonlocal interrupted
interrupted = True
s = signal.signal(signal.SIGINT, sigint_handler)
try:
yield None
finally:
signal.signal(signal.SIGINT, s)
if interrupted:
raise KeyboardInterrupt() | Suppress KeyboardInterrupt exceptions while the context is active.
The suppressed interrupt (if any) is raised when the context is exited. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/fileutil.py#L64-L81 | null | #
# fileutil.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""File manipulation utilities."""
import io
import os.path
import shutil
import tempfile
import signal
from contextlib import contextmanager
def xread(file, length):
"Read exactly length bytes from file; raise EOFError if file ends sooner."
data = file.read(length)
if len(data) != length:
raise EOFError
return data
@contextmanager
def opened(filename, mode):
"Open filename, or do nothing if filename is already an open file object"
if isinstance(filename, str):
file = open(filename, mode)
try:
yield file
finally:
if not file.closed:
file.close()
else:
yield filename
@contextmanager
def suppress_interrupt():
"""Suppress KeyboardInterrupt exceptions while the context is active.
The suppressed interrupt (if any) is raised when the context is exited.
"""
interrupted = False
def sigint_handler(signum, frame):
nonlocal interrupted
interrupted = True
s = signal.signal(signal.SIGINT, sigint_handler)
try:
yield None
finally:
signal.signal(signal.SIGINT, s)
if interrupted:
raise KeyboardInterrupt()
def replace_chunk(filename, offset, length, chunk, in_place=True, max_mem=5):
"""Replace length bytes of data with chunk, starting at offset.
Any KeyboardInterrupts arriving while replace_chunk is runnning
are deferred until the operation is complete.
If in_place is true, the operation works directly on the original
file; this is fast and works on files that are already open, but
an error or interrupt may lead to corrupt file contents.
If in_place is false, the function prepares a copy first, then
renames it back over the original file. This method is slower,
but it prevents corruption on systems with atomic renames (UNIX),
and reduces the window of vulnerability elsewhere (Windows).
If there is no need to move data that is not being replaced, then we use
the direct method irrespective of in_place. (In this case an interrupt
may only corrupt the chunk being replaced.)
"""
with suppress_interrupt():
_replace_chunk(filename, offset, length, chunk, in_place, max_mem)
def _replace_chunk(filename, offset, length, chunk, in_place, max_mem):
assert isinstance(filename, str) or in_place
with opened(filename, "rb+") as file:
# If the sizes match, we can simply overwrite the original data.
if length == len(chunk):
file.seek(offset)
file.write(chunk)
return
oldsize = file.seek(0, 2)
newsize = oldsize - length + len(chunk)
# If the orig chunk is exactly at the end of the file, we can
# simply truncate the file and then append the new chunk.
if offset + length == oldsize:
file.seek(offset)
file.truncate()
file.write(chunk)
return
if in_place:
_replace_chunk_in_place(file, offset, length, chunk, oldsize, newsize)
else: # not in_place
temp = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename),
prefix="stagger-",
suffix=".tmp",
delete=False)
try:
file.seek(0)
_copy_chunk(file, temp, offset)
temp.write(chunk)
file.seek(offset + length)
_copy_chunk(file, temp, oldsize - offset - length)
finally:
temp.close()
file.close()
shutil.copymode(filename, temp.name)
shutil.move(temp.name, filename)
return
def _copy_chunk(src, dst, length):
"Copy length bytes from file src to file dst."
BUFSIZE = 128 * 1024
while length > 0:
l = min(BUFSIZE, length)
buf = src.read(l)
assert len(buf) == l
dst.write(buf)
length -= l
def _replace_chunk_in_place(file, offset, length, chunk, oldsize, newsize):
if newsize > oldsize:
file.seek(0, 2)
file.write(b"\x00" * (len(chunk) - length))
file.seek(0)
try:
import mmap
m = mmap.mmap(file.fileno(), max(oldsize, newsize))
try:
m.move(offset + len(chunk),
offset + length,
oldsize - offset - length)
m[offset:offset + len(chunk)] = chunk
finally:
m.close()
except (ImportError, EnvironmentError, ValueError):
# mmap didn't work. Let's load the tail into a tempfile
# and construct the result from there.
file.seek(offset + length)
temp = tempfile.SpooledTemporaryFile(
max_size=max_mem * (1<<20),
prefix="stagger-",
suffix=".tmp")
try:
_copy_chunk(file, temp, oldsize - offset - length)
file.seek(offset)
file.truncate()
file.write(chunk)
temp.seek(0)
_copy_chunk(temp, file, oldsize - offset - length)
finally:
temp.close()
return
else:
# mmap did work, we just need to truncate any leftover parts
# at the end
file.truncate(newsize)
return
|
staggerpkg/stagger | stagger/fileutil.py | replace_chunk | python | def replace_chunk(filename, offset, length, chunk, in_place=True, max_mem=5):
with suppress_interrupt():
_replace_chunk(filename, offset, length, chunk, in_place, max_mem) | Replace length bytes of data with chunk, starting at offset.
Any KeyboardInterrupts arriving while replace_chunk is runnning
are deferred until the operation is complete.
If in_place is true, the operation works directly on the original
file; this is fast and works on files that are already open, but
an error or interrupt may lead to corrupt file contents.
If in_place is false, the function prepares a copy first, then
renames it back over the original file. This method is slower,
but it prevents corruption on systems with atomic renames (UNIX),
and reduces the window of vulnerability elsewhere (Windows).
If there is no need to move data that is not being replaced, then we use
the direct method irrespective of in_place. (In this case an interrupt
may only corrupt the chunk being replaced.) | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/fileutil.py#L83-L102 | [
"def _replace_chunk(filename, offset, length, chunk, in_place, max_mem):\n assert isinstance(filename, str) or in_place\n with opened(filename, \"rb+\") as file:\n # If the sizes match, we can simply overwrite the original data.\n if length == len(chunk):\n file.seek(offset)\n ... | #
# fileutil.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""File manipulation utilities."""
import io
import os.path
import shutil
import tempfile
import signal
from contextlib import contextmanager
def xread(file, length):
"Read exactly length bytes from file; raise EOFError if file ends sooner."
data = file.read(length)
if len(data) != length:
raise EOFError
return data
@contextmanager
def opened(filename, mode):
"Open filename, or do nothing if filename is already an open file object"
if isinstance(filename, str):
file = open(filename, mode)
try:
yield file
finally:
if not file.closed:
file.close()
else:
yield filename
@contextmanager
def suppress_interrupt():
"""Suppress KeyboardInterrupt exceptions while the context is active.
The suppressed interrupt (if any) is raised when the context is exited.
"""
interrupted = False
def sigint_handler(signum, frame):
nonlocal interrupted
interrupted = True
s = signal.signal(signal.SIGINT, sigint_handler)
try:
yield None
finally:
signal.signal(signal.SIGINT, s)
if interrupted:
raise KeyboardInterrupt()
def _replace_chunk(filename, offset, length, chunk, in_place, max_mem):
assert isinstance(filename, str) or in_place
with opened(filename, "rb+") as file:
# If the sizes match, we can simply overwrite the original data.
if length == len(chunk):
file.seek(offset)
file.write(chunk)
return
oldsize = file.seek(0, 2)
newsize = oldsize - length + len(chunk)
# If the orig chunk is exactly at the end of the file, we can
# simply truncate the file and then append the new chunk.
if offset + length == oldsize:
file.seek(offset)
file.truncate()
file.write(chunk)
return
if in_place:
_replace_chunk_in_place(file, offset, length, chunk, oldsize, newsize)
else: # not in_place
temp = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename),
prefix="stagger-",
suffix=".tmp",
delete=False)
try:
file.seek(0)
_copy_chunk(file, temp, offset)
temp.write(chunk)
file.seek(offset + length)
_copy_chunk(file, temp, oldsize - offset - length)
finally:
temp.close()
file.close()
shutil.copymode(filename, temp.name)
shutil.move(temp.name, filename)
return
def _copy_chunk(src, dst, length):
"Copy length bytes from file src to file dst."
BUFSIZE = 128 * 1024
while length > 0:
l = min(BUFSIZE, length)
buf = src.read(l)
assert len(buf) == l
dst.write(buf)
length -= l
def _replace_chunk_in_place(file, offset, length, chunk, oldsize, newsize):
if newsize > oldsize:
file.seek(0, 2)
file.write(b"\x00" * (len(chunk) - length))
file.seek(0)
try:
import mmap
m = mmap.mmap(file.fileno(), max(oldsize, newsize))
try:
m.move(offset + len(chunk),
offset + length,
oldsize - offset - length)
m[offset:offset + len(chunk)] = chunk
finally:
m.close()
except (ImportError, EnvironmentError, ValueError):
# mmap didn't work. Let's load the tail into a tempfile
# and construct the result from there.
file.seek(offset + length)
temp = tempfile.SpooledTemporaryFile(
max_size=max_mem * (1<<20),
prefix="stagger-",
suffix=".tmp")
try:
_copy_chunk(file, temp, oldsize - offset - length)
file.seek(offset)
file.truncate()
file.write(chunk)
temp.seek(0)
_copy_chunk(temp, file, oldsize - offset - length)
finally:
temp.close()
return
else:
# mmap did work, we just need to truncate any leftover parts
# at the end
file.truncate(newsize)
return
|
staggerpkg/stagger | stagger/fileutil.py | _copy_chunk | python | def _copy_chunk(src, dst, length):
"Copy length bytes from file src to file dst."
BUFSIZE = 128 * 1024
while length > 0:
l = min(BUFSIZE, length)
buf = src.read(l)
assert len(buf) == l
dst.write(buf)
length -= l | Copy length bytes from file src to file dst. | train | https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/fileutil.py#L145-L153 | null | #
# fileutil.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""File manipulation utilities."""
import io
import os.path
import shutil
import tempfile
import signal
from contextlib import contextmanager
def xread(file, length):
"Read exactly length bytes from file; raise EOFError if file ends sooner."
data = file.read(length)
if len(data) != length:
raise EOFError
return data
@contextmanager
def opened(filename, mode):
"Open filename, or do nothing if filename is already an open file object"
if isinstance(filename, str):
file = open(filename, mode)
try:
yield file
finally:
if not file.closed:
file.close()
else:
yield filename
@contextmanager
def suppress_interrupt():
"""Suppress KeyboardInterrupt exceptions while the context is active.
The suppressed interrupt (if any) is raised when the context is exited.
"""
interrupted = False
def sigint_handler(signum, frame):
nonlocal interrupted
interrupted = True
s = signal.signal(signal.SIGINT, sigint_handler)
try:
yield None
finally:
signal.signal(signal.SIGINT, s)
if interrupted:
raise KeyboardInterrupt()
def replace_chunk(filename, offset, length, chunk, in_place=True, max_mem=5):
"""Replace length bytes of data with chunk, starting at offset.
Any KeyboardInterrupts arriving while replace_chunk is runnning
are deferred until the operation is complete.
If in_place is true, the operation works directly on the original
file; this is fast and works on files that are already open, but
an error or interrupt may lead to corrupt file contents.
If in_place is false, the function prepares a copy first, then
renames it back over the original file. This method is slower,
but it prevents corruption on systems with atomic renames (UNIX),
and reduces the window of vulnerability elsewhere (Windows).
If there is no need to move data that is not being replaced, then we use
the direct method irrespective of in_place. (In this case an interrupt
may only corrupt the chunk being replaced.)
"""
with suppress_interrupt():
_replace_chunk(filename, offset, length, chunk, in_place, max_mem)
def _replace_chunk(filename, offset, length, chunk, in_place, max_mem):
assert isinstance(filename, str) or in_place
with opened(filename, "rb+") as file:
# If the sizes match, we can simply overwrite the original data.
if length == len(chunk):
file.seek(offset)
file.write(chunk)
return
oldsize = file.seek(0, 2)
newsize = oldsize - length + len(chunk)
# If the orig chunk is exactly at the end of the file, we can
# simply truncate the file and then append the new chunk.
if offset + length == oldsize:
file.seek(offset)
file.truncate()
file.write(chunk)
return
if in_place:
_replace_chunk_in_place(file, offset, length, chunk, oldsize, newsize)
else: # not in_place
temp = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename),
prefix="stagger-",
suffix=".tmp",
delete=False)
try:
file.seek(0)
_copy_chunk(file, temp, offset)
temp.write(chunk)
file.seek(offset + length)
_copy_chunk(file, temp, oldsize - offset - length)
finally:
temp.close()
file.close()
shutil.copymode(filename, temp.name)
shutil.move(temp.name, filename)
return
def _replace_chunk_in_place(file, offset, length, chunk, oldsize, newsize):
if newsize > oldsize:
file.seek(0, 2)
file.write(b"\x00" * (len(chunk) - length))
file.seek(0)
try:
import mmap
m = mmap.mmap(file.fileno(), max(oldsize, newsize))
try:
m.move(offset + len(chunk),
offset + length,
oldsize - offset - length)
m[offset:offset + len(chunk)] = chunk
finally:
m.close()
except (ImportError, EnvironmentError, ValueError):
# mmap didn't work. Let's load the tail into a tempfile
# and construct the result from there.
file.seek(offset + length)
temp = tempfile.SpooledTemporaryFile(
max_size=max_mem * (1<<20),
prefix="stagger-",
suffix=".tmp")
try:
_copy_chunk(file, temp, oldsize - offset - length)
file.seek(offset)
file.truncate()
file.write(chunk)
temp.seek(0)
_copy_chunk(temp, file, oldsize - offset - length)
finally:
temp.close()
return
else:
# mmap did work, we just need to truncate any leftover parts
# at the end
file.truncate(newsize)
return
|
CameronLonsdale/lantern | lantern/analysis/frequency.py | frequency_to_probability | python | def frequency_to_probability(frequency_map, decorator=lambda f: f):
total = sum(frequency_map.values())
return {k: decorator(v / total) for k, v in frequency_map.items()} | Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total.
Example:
>>> frequency_to_probability({'a': 2, 'b': 2})
{'a': 0.5, 'b': 0.5}
Args:
frequency_map (dict): The dictionary to transform
decorator (function): A function to manipulate the probability
Returns:
Dictionary of ngrams to probability | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/analysis/frequency.py#L34-L49 | null | """General purpose frequency analysis tools."""
import importlib
import statistics
from collections import Counter
from lantern.structures import DynamicDict
from lantern.util import iterate_ngrams
def frequency_analyze(text, n=1):
"""Analyze the frequency of ngrams for a piece of text.
Examples:
>>> frequency_analyze("abb")
{'a': 1, 'b': 2}
>>> frequency_analyze("abb", 2)
{'ab': 1, 'bb': 1}
Args:
text (str): The text to analyze
n (int): The ngram size to use
Returns:
Dictionary of ngrams to frequency
Raises:
ValueError: If n is not a positive integer
"""
return Counter(iterate_ngrams(text, n))
def index_of_coincidence(*texts):
"""Calculate the index of coincidence for one or more ``texts``.
The results are averaged over multiple texts to return the delta index of coincidence.
Examples:
>>> index_of_coincidence("aabbc")
0.2
>>> index_of_coincidence("aabbc", "abbcc")
0.2
Args:
*texts (variable length argument list): The texts to analyze
Returns:
Decimal value of the index of coincidence
Raises:
ValueError: If texts is empty
ValueError: If any text is less that 2 character long
"""
if not texts:
raise ValueError("texts must not be empty")
return statistics.mean(_calculate_index_of_coincidence(frequency_analyze(text), len(text)) for text in texts)
def _calculate_index_of_coincidence(frequency_map, length):
"""A measure of how similar frequency_map is to the uniform distribution.
Or the probability that two letters picked randomly are alike.
"""
if length <= 1:
return 0
# We cannot error here as length can legitimiately be 1.
# Imagine a ciphertext of length 3 and a key of length 2.
# Spliting this text up and calculating the index of coincidence results in ['AC', 'B']
# IOC of B will be calcuated for the 2nd column of the key. We could represent the same
# encryption with a key of length 3 but then we encounter the same problem. This is also
# legitimiate encryption scheme we cannot ignore. Hence we have to deal with this fact here
# A value of 0 will impact the overall mean, however it does make some sense when you ask the question
# How many ways to choose 2 letters from the text, if theres only 1 letter then the answer is 0.
# Mathemtical combination, number of ways to choose 2 letters, no replacement, order doesnt matter
combination_of_letters = sum(freq * (freq - 1) for freq in frequency_map.values())
return combination_of_letters / (length * (length - 1))
def chi_squared(source_frequency, target_frequency):
"""Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``.
Example:
>>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2})
0.1
Args:
source_frequency (dict): Frequency map of the text you are analyzing
target_frequency (dict): Frequency map of the target language to compare with
Returns:
Decimal value of the chi-squared statistic
"""
# Ignore any symbols from source that are not in target.
# TODO: raise Error if source_len is 0?
target_prob = frequency_to_probability(target_frequency)
source_len = sum(v for k, v in source_frequency.items() if k in target_frequency)
result = 0
for symbol, prob in target_prob.items():
symbol_frequency = source_frequency.get(symbol, 0) # Frequecy is 0 if it doesnt appear in source
result += _calculate_chi_squared(symbol_frequency, prob, source_len)
return result
def _calculate_chi_squared(source_freq, target_prob, source_len):
"""A measure of the observed frequency of the symbol versus the expected frequency.
If the value is 0 then the texts are exactly alike for that symbol.
"""
expected = source_len * target_prob
return (source_freq - expected)**2 / expected
def _load_ngram(name):
"""Dynamically import the python module with the ngram defined as a dictionary.
Since bigger ngrams are large files its wasteful to always statically import them if they're not used.
"""
module = importlib.import_module('lantern.analysis.english_ngrams.{}'.format(name))
return getattr(module, name)
english = DynamicDict({
'unigrams': lambda: _load_ngram('unigrams'),
'bigrams': lambda: _load_ngram('bigrams'),
'trigrams': lambda: _load_ngram('trigrams'),
'quadgrams': lambda: _load_ngram('quadgrams')
})
"""English ngram frequencies."""
ENGLISH_IC = _calculate_index_of_coincidence(english.unigrams, sum(english.unigrams.values()))
"""Index of coincidence for the English language."""
|
CameronLonsdale/lantern | lantern/analysis/frequency.py | index_of_coincidence | python | def index_of_coincidence(*texts):
if not texts:
raise ValueError("texts must not be empty")
return statistics.mean(_calculate_index_of_coincidence(frequency_analyze(text), len(text)) for text in texts) | Calculate the index of coincidence for one or more ``texts``.
The results are averaged over multiple texts to return the delta index of coincidence.
Examples:
>>> index_of_coincidence("aabbc")
0.2
>>> index_of_coincidence("aabbc", "abbcc")
0.2
Args:
*texts (variable length argument list): The texts to analyze
Returns:
Decimal value of the index of coincidence
Raises:
ValueError: If texts is empty
ValueError: If any text is less that 2 character long | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/analysis/frequency.py#L52-L76 | null | """General purpose frequency analysis tools."""
import importlib
import statistics
from collections import Counter
from lantern.structures import DynamicDict
from lantern.util import iterate_ngrams
def frequency_analyze(text, n=1):
"""Analyze the frequency of ngrams for a piece of text.
Examples:
>>> frequency_analyze("abb")
{'a': 1, 'b': 2}
>>> frequency_analyze("abb", 2)
{'ab': 1, 'bb': 1}
Args:
text (str): The text to analyze
n (int): The ngram size to use
Returns:
Dictionary of ngrams to frequency
Raises:
ValueError: If n is not a positive integer
"""
return Counter(iterate_ngrams(text, n))
def frequency_to_probability(frequency_map, decorator=lambda f: f):
"""Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total.
Example:
>>> frequency_to_probability({'a': 2, 'b': 2})
{'a': 0.5, 'b': 0.5}
Args:
frequency_map (dict): The dictionary to transform
decorator (function): A function to manipulate the probability
Returns:
Dictionary of ngrams to probability
"""
total = sum(frequency_map.values())
return {k: decorator(v / total) for k, v in frequency_map.items()}
def _calculate_index_of_coincidence(frequency_map, length):
"""A measure of how similar frequency_map is to the uniform distribution.
Or the probability that two letters picked randomly are alike.
"""
if length <= 1:
return 0
# We cannot error here as length can legitimiately be 1.
# Imagine a ciphertext of length 3 and a key of length 2.
# Spliting this text up and calculating the index of coincidence results in ['AC', 'B']
# IOC of B will be calcuated for the 2nd column of the key. We could represent the same
# encryption with a key of length 3 but then we encounter the same problem. This is also
# legitimiate encryption scheme we cannot ignore. Hence we have to deal with this fact here
# A value of 0 will impact the overall mean, however it does make some sense when you ask the question
# How many ways to choose 2 letters from the text, if theres only 1 letter then the answer is 0.
# Mathemtical combination, number of ways to choose 2 letters, no replacement, order doesnt matter
combination_of_letters = sum(freq * (freq - 1) for freq in frequency_map.values())
return combination_of_letters / (length * (length - 1))
def chi_squared(source_frequency, target_frequency):
"""Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``.
Example:
>>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2})
0.1
Args:
source_frequency (dict): Frequency map of the text you are analyzing
target_frequency (dict): Frequency map of the target language to compare with
Returns:
Decimal value of the chi-squared statistic
"""
# Ignore any symbols from source that are not in target.
# TODO: raise Error if source_len is 0?
target_prob = frequency_to_probability(target_frequency)
source_len = sum(v for k, v in source_frequency.items() if k in target_frequency)
result = 0
for symbol, prob in target_prob.items():
symbol_frequency = source_frequency.get(symbol, 0) # Frequecy is 0 if it doesnt appear in source
result += _calculate_chi_squared(symbol_frequency, prob, source_len)
return result
def _calculate_chi_squared(source_freq, target_prob, source_len):
"""A measure of the observed frequency of the symbol versus the expected frequency.
If the value is 0 then the texts are exactly alike for that symbol.
"""
expected = source_len * target_prob
return (source_freq - expected)**2 / expected
def _load_ngram(name):
"""Dynamically import the python module with the ngram defined as a dictionary.
Since bigger ngrams are large files its wasteful to always statically import them if they're not used.
"""
module = importlib.import_module('lantern.analysis.english_ngrams.{}'.format(name))
return getattr(module, name)
english = DynamicDict({
'unigrams': lambda: _load_ngram('unigrams'),
'bigrams': lambda: _load_ngram('bigrams'),
'trigrams': lambda: _load_ngram('trigrams'),
'quadgrams': lambda: _load_ngram('quadgrams')
})
"""English ngram frequencies."""
ENGLISH_IC = _calculate_index_of_coincidence(english.unigrams, sum(english.unigrams.values()))
"""Index of coincidence for the English language."""
|
CameronLonsdale/lantern | lantern/analysis/frequency.py | _calculate_index_of_coincidence | python | def _calculate_index_of_coincidence(frequency_map, length):
if length <= 1:
return 0
# We cannot error here as length can legitimiately be 1.
# Imagine a ciphertext of length 3 and a key of length 2.
# Spliting this text up and calculating the index of coincidence results in ['AC', 'B']
# IOC of B will be calcuated for the 2nd column of the key. We could represent the same
# encryption with a key of length 3 but then we encounter the same problem. This is also
# legitimiate encryption scheme we cannot ignore. Hence we have to deal with this fact here
# A value of 0 will impact the overall mean, however it does make some sense when you ask the question
# How many ways to choose 2 letters from the text, if theres only 1 letter then the answer is 0.
# Mathemtical combination, number of ways to choose 2 letters, no replacement, order doesnt matter
combination_of_letters = sum(freq * (freq - 1) for freq in frequency_map.values())
return combination_of_letters / (length * (length - 1)) | A measure of how similar frequency_map is to the uniform distribution.
Or the probability that two letters picked randomly are alike. | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/analysis/frequency.py#L79-L96 | null | """General purpose frequency analysis tools."""
import importlib
import statistics
from collections import Counter
from lantern.structures import DynamicDict
from lantern.util import iterate_ngrams
def frequency_analyze(text, n=1):
"""Analyze the frequency of ngrams for a piece of text.
Examples:
>>> frequency_analyze("abb")
{'a': 1, 'b': 2}
>>> frequency_analyze("abb", 2)
{'ab': 1, 'bb': 1}
Args:
text (str): The text to analyze
n (int): The ngram size to use
Returns:
Dictionary of ngrams to frequency
Raises:
ValueError: If n is not a positive integer
"""
return Counter(iterate_ngrams(text, n))
def frequency_to_probability(frequency_map, decorator=lambda f: f):
"""Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total.
Example:
>>> frequency_to_probability({'a': 2, 'b': 2})
{'a': 0.5, 'b': 0.5}
Args:
frequency_map (dict): The dictionary to transform
decorator (function): A function to manipulate the probability
Returns:
Dictionary of ngrams to probability
"""
total = sum(frequency_map.values())
return {k: decorator(v / total) for k, v in frequency_map.items()}
def index_of_coincidence(*texts):
"""Calculate the index of coincidence for one or more ``texts``.
The results are averaged over multiple texts to return the delta index of coincidence.
Examples:
>>> index_of_coincidence("aabbc")
0.2
>>> index_of_coincidence("aabbc", "abbcc")
0.2
Args:
*texts (variable length argument list): The texts to analyze
Returns:
Decimal value of the index of coincidence
Raises:
ValueError: If texts is empty
ValueError: If any text is less that 2 character long
"""
if not texts:
raise ValueError("texts must not be empty")
return statistics.mean(_calculate_index_of_coincidence(frequency_analyze(text), len(text)) for text in texts)
def chi_squared(source_frequency, target_frequency):
"""Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``.
Example:
>>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2})
0.1
Args:
source_frequency (dict): Frequency map of the text you are analyzing
target_frequency (dict): Frequency map of the target language to compare with
Returns:
Decimal value of the chi-squared statistic
"""
# Ignore any symbols from source that are not in target.
# TODO: raise Error if source_len is 0?
target_prob = frequency_to_probability(target_frequency)
source_len = sum(v for k, v in source_frequency.items() if k in target_frequency)
result = 0
for symbol, prob in target_prob.items():
symbol_frequency = source_frequency.get(symbol, 0) # Frequecy is 0 if it doesnt appear in source
result += _calculate_chi_squared(symbol_frequency, prob, source_len)
return result
def _calculate_chi_squared(source_freq, target_prob, source_len):
"""A measure of the observed frequency of the symbol versus the expected frequency.
If the value is 0 then the texts are exactly alike for that symbol.
"""
expected = source_len * target_prob
return (source_freq - expected)**2 / expected
def _load_ngram(name):
"""Dynamically import the python module with the ngram defined as a dictionary.
Since bigger ngrams are large files its wasteful to always statically import them if they're not used.
"""
module = importlib.import_module('lantern.analysis.english_ngrams.{}'.format(name))
return getattr(module, name)
english = DynamicDict({
'unigrams': lambda: _load_ngram('unigrams'),
'bigrams': lambda: _load_ngram('bigrams'),
'trigrams': lambda: _load_ngram('trigrams'),
'quadgrams': lambda: _load_ngram('quadgrams')
})
"""English ngram frequencies."""
ENGLISH_IC = _calculate_index_of_coincidence(english.unigrams, sum(english.unigrams.values()))
"""Index of coincidence for the English language."""
|
CameronLonsdale/lantern | lantern/analysis/frequency.py | chi_squared | python | def chi_squared(source_frequency, target_frequency):
# Ignore any symbols from source that are not in target.
# TODO: raise Error if source_len is 0?
target_prob = frequency_to_probability(target_frequency)
source_len = sum(v for k, v in source_frequency.items() if k in target_frequency)
result = 0
for symbol, prob in target_prob.items():
symbol_frequency = source_frequency.get(symbol, 0) # Frequecy is 0 if it doesnt appear in source
result += _calculate_chi_squared(symbol_frequency, prob, source_len)
return result | Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``.
Example:
>>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2})
0.1
Args:
source_frequency (dict): Frequency map of the text you are analyzing
target_frequency (dict): Frequency map of the target language to compare with
Returns:
Decimal value of the chi-squared statistic | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/analysis/frequency.py#L99-L123 | [
"def frequency_to_probability(frequency_map, decorator=lambda f: f):\n \"\"\"Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total.\n\n Example:\n >>> frequency_to_probability({'a': 2, 'b': 2})\n {'a': 0.5, 'b': 0.5}\n\n Args:\n frequency... | """General purpose frequency analysis tools."""
import importlib
import statistics
from collections import Counter
from lantern.structures import DynamicDict
from lantern.util import iterate_ngrams
def frequency_analyze(text, n=1):
"""Analyze the frequency of ngrams for a piece of text.
Examples:
>>> frequency_analyze("abb")
{'a': 1, 'b': 2}
>>> frequency_analyze("abb", 2)
{'ab': 1, 'bb': 1}
Args:
text (str): The text to analyze
n (int): The ngram size to use
Returns:
Dictionary of ngrams to frequency
Raises:
ValueError: If n is not a positive integer
"""
return Counter(iterate_ngrams(text, n))
def frequency_to_probability(frequency_map, decorator=lambda f: f):
"""Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total.
Example:
>>> frequency_to_probability({'a': 2, 'b': 2})
{'a': 0.5, 'b': 0.5}
Args:
frequency_map (dict): The dictionary to transform
decorator (function): A function to manipulate the probability
Returns:
Dictionary of ngrams to probability
"""
total = sum(frequency_map.values())
return {k: decorator(v / total) for k, v in frequency_map.items()}
def index_of_coincidence(*texts):
"""Calculate the index of coincidence for one or more ``texts``.
The results are averaged over multiple texts to return the delta index of coincidence.
Examples:
>>> index_of_coincidence("aabbc")
0.2
>>> index_of_coincidence("aabbc", "abbcc")
0.2
Args:
*texts (variable length argument list): The texts to analyze
Returns:
Decimal value of the index of coincidence
Raises:
ValueError: If texts is empty
ValueError: If any text is less that 2 character long
"""
if not texts:
raise ValueError("texts must not be empty")
return statistics.mean(_calculate_index_of_coincidence(frequency_analyze(text), len(text)) for text in texts)
def _calculate_index_of_coincidence(frequency_map, length):
"""A measure of how similar frequency_map is to the uniform distribution.
Or the probability that two letters picked randomly are alike.
"""
if length <= 1:
return 0
# We cannot error here as length can legitimiately be 1.
# Imagine a ciphertext of length 3 and a key of length 2.
# Spliting this text up and calculating the index of coincidence results in ['AC', 'B']
# IOC of B will be calcuated for the 2nd column of the key. We could represent the same
# encryption with a key of length 3 but then we encounter the same problem. This is also
# legitimiate encryption scheme we cannot ignore. Hence we have to deal with this fact here
# A value of 0 will impact the overall mean, however it does make some sense when you ask the question
# How many ways to choose 2 letters from the text, if theres only 1 letter then the answer is 0.
# Mathemtical combination, number of ways to choose 2 letters, no replacement, order doesnt matter
combination_of_letters = sum(freq * (freq - 1) for freq in frequency_map.values())
return combination_of_letters / (length * (length - 1))
def _calculate_chi_squared(source_freq, target_prob, source_len):
"""A measure of the observed frequency of the symbol versus the expected frequency.
If the value is 0 then the texts are exactly alike for that symbol.
"""
expected = source_len * target_prob
return (source_freq - expected)**2 / expected
def _load_ngram(name):
"""Dynamically import the python module with the ngram defined as a dictionary.
Since bigger ngrams are large files its wasteful to always statically import them if they're not used.
"""
module = importlib.import_module('lantern.analysis.english_ngrams.{}'.format(name))
return getattr(module, name)
english = DynamicDict({
'unigrams': lambda: _load_ngram('unigrams'),
'bigrams': lambda: _load_ngram('bigrams'),
'trigrams': lambda: _load_ngram('trigrams'),
'quadgrams': lambda: _load_ngram('quadgrams')
})
"""English ngram frequencies."""
ENGLISH_IC = _calculate_index_of_coincidence(english.unigrams, sum(english.unigrams.values()))
"""Index of coincidence for the English language."""
|
CameronLonsdale/lantern | lantern/analysis/frequency.py | _calculate_chi_squared | python | def _calculate_chi_squared(source_freq, target_prob, source_len):
expected = source_len * target_prob
return (source_freq - expected)**2 / expected | A measure of the observed frequency of the symbol versus the expected frequency.
If the value is 0 then the texts are exactly alike for that symbol. | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/analysis/frequency.py#L126-L131 | null | """General purpose frequency analysis tools."""
import importlib
import statistics
from collections import Counter
from lantern.structures import DynamicDict
from lantern.util import iterate_ngrams
def frequency_analyze(text, n=1):
"""Analyze the frequency of ngrams for a piece of text.
Examples:
>>> frequency_analyze("abb")
{'a': 1, 'b': 2}
>>> frequency_analyze("abb", 2)
{'ab': 1, 'bb': 1}
Args:
text (str): The text to analyze
n (int): The ngram size to use
Returns:
Dictionary of ngrams to frequency
Raises:
ValueError: If n is not a positive integer
"""
return Counter(iterate_ngrams(text, n))
def frequency_to_probability(frequency_map, decorator=lambda f: f):
"""Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total.
Example:
>>> frequency_to_probability({'a': 2, 'b': 2})
{'a': 0.5, 'b': 0.5}
Args:
frequency_map (dict): The dictionary to transform
decorator (function): A function to manipulate the probability
Returns:
Dictionary of ngrams to probability
"""
total = sum(frequency_map.values())
return {k: decorator(v / total) for k, v in frequency_map.items()}
def index_of_coincidence(*texts):
"""Calculate the index of coincidence for one or more ``texts``.
The results are averaged over multiple texts to return the delta index of coincidence.
Examples:
>>> index_of_coincidence("aabbc")
0.2
>>> index_of_coincidence("aabbc", "abbcc")
0.2
Args:
*texts (variable length argument list): The texts to analyze
Returns:
Decimal value of the index of coincidence
Raises:
ValueError: If texts is empty
ValueError: If any text is less that 2 character long
"""
if not texts:
raise ValueError("texts must not be empty")
return statistics.mean(_calculate_index_of_coincidence(frequency_analyze(text), len(text)) for text in texts)
def _calculate_index_of_coincidence(frequency_map, length):
"""A measure of how similar frequency_map is to the uniform distribution.
Or the probability that two letters picked randomly are alike.
"""
if length <= 1:
return 0
# We cannot error here as length can legitimiately be 1.
# Imagine a ciphertext of length 3 and a key of length 2.
# Spliting this text up and calculating the index of coincidence results in ['AC', 'B']
# IOC of B will be calcuated for the 2nd column of the key. We could represent the same
# encryption with a key of length 3 but then we encounter the same problem. This is also
# legitimiate encryption scheme we cannot ignore. Hence we have to deal with this fact here
# A value of 0 will impact the overall mean, however it does make some sense when you ask the question
# How many ways to choose 2 letters from the text, if theres only 1 letter then the answer is 0.
# Mathemtical combination, number of ways to choose 2 letters, no replacement, order doesnt matter
combination_of_letters = sum(freq * (freq - 1) for freq in frequency_map.values())
return combination_of_letters / (length * (length - 1))
def chi_squared(source_frequency, target_frequency):
"""Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``.
Example:
>>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2})
0.1
Args:
source_frequency (dict): Frequency map of the text you are analyzing
target_frequency (dict): Frequency map of the target language to compare with
Returns:
Decimal value of the chi-squared statistic
"""
# Ignore any symbols from source that are not in target.
# TODO: raise Error if source_len is 0?
target_prob = frequency_to_probability(target_frequency)
source_len = sum(v for k, v in source_frequency.items() if k in target_frequency)
result = 0
for symbol, prob in target_prob.items():
symbol_frequency = source_frequency.get(symbol, 0) # Frequecy is 0 if it doesnt appear in source
result += _calculate_chi_squared(symbol_frequency, prob, source_len)
return result
def _load_ngram(name):
"""Dynamically import the python module with the ngram defined as a dictionary.
Since bigger ngrams are large files its wasteful to always statically import them if they're not used.
"""
module = importlib.import_module('lantern.analysis.english_ngrams.{}'.format(name))
return getattr(module, name)
english = DynamicDict({
'unigrams': lambda: _load_ngram('unigrams'),
'bigrams': lambda: _load_ngram('bigrams'),
'trigrams': lambda: _load_ngram('trigrams'),
'quadgrams': lambda: _load_ngram('quadgrams')
})
"""English ngram frequencies."""
ENGLISH_IC = _calculate_index_of_coincidence(english.unigrams, sum(english.unigrams.values()))
"""Index of coincidence for the English language."""
|
CameronLonsdale/lantern | lantern/analysis/frequency.py | _load_ngram | python | def _load_ngram(name):
module = importlib.import_module('lantern.analysis.english_ngrams.{}'.format(name))
return getattr(module, name) | Dynamically import the python module with the ngram defined as a dictionary.
Since bigger ngrams are large files its wasteful to always statically import them if they're not used. | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/analysis/frequency.py#L134-L139 | null | """General purpose frequency analysis tools."""
import importlib
import statistics
from collections import Counter
from lantern.structures import DynamicDict
from lantern.util import iterate_ngrams
def frequency_analyze(text, n=1):
"""Analyze the frequency of ngrams for a piece of text.
Examples:
>>> frequency_analyze("abb")
{'a': 1, 'b': 2}
>>> frequency_analyze("abb", 2)
{'ab': 1, 'bb': 1}
Args:
text (str): The text to analyze
n (int): The ngram size to use
Returns:
Dictionary of ngrams to frequency
Raises:
ValueError: If n is not a positive integer
"""
return Counter(iterate_ngrams(text, n))
def frequency_to_probability(frequency_map, decorator=lambda f: f):
"""Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total.
Example:
>>> frequency_to_probability({'a': 2, 'b': 2})
{'a': 0.5, 'b': 0.5}
Args:
frequency_map (dict): The dictionary to transform
decorator (function): A function to manipulate the probability
Returns:
Dictionary of ngrams to probability
"""
total = sum(frequency_map.values())
return {k: decorator(v / total) for k, v in frequency_map.items()}
def index_of_coincidence(*texts):
"""Calculate the index of coincidence for one or more ``texts``.
The results are averaged over multiple texts to return the delta index of coincidence.
Examples:
>>> index_of_coincidence("aabbc")
0.2
>>> index_of_coincidence("aabbc", "abbcc")
0.2
Args:
*texts (variable length argument list): The texts to analyze
Returns:
Decimal value of the index of coincidence
Raises:
ValueError: If texts is empty
ValueError: If any text is less that 2 character long
"""
if not texts:
raise ValueError("texts must not be empty")
return statistics.mean(_calculate_index_of_coincidence(frequency_analyze(text), len(text)) for text in texts)
def _calculate_index_of_coincidence(frequency_map, length):
"""A measure of how similar frequency_map is to the uniform distribution.
Or the probability that two letters picked randomly are alike.
"""
if length <= 1:
return 0
# We cannot error here as length can legitimiately be 1.
# Imagine a ciphertext of length 3 and a key of length 2.
# Spliting this text up and calculating the index of coincidence results in ['AC', 'B']
# IOC of B will be calcuated for the 2nd column of the key. We could represent the same
# encryption with a key of length 3 but then we encounter the same problem. This is also
# legitimiate encryption scheme we cannot ignore. Hence we have to deal with this fact here
# A value of 0 will impact the overall mean, however it does make some sense when you ask the question
# How many ways to choose 2 letters from the text, if theres only 1 letter then the answer is 0.
# Mathemtical combination, number of ways to choose 2 letters, no replacement, order doesnt matter
combination_of_letters = sum(freq * (freq - 1) for freq in frequency_map.values())
return combination_of_letters / (length * (length - 1))
def chi_squared(source_frequency, target_frequency):
"""Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``.
Example:
>>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2})
0.1
Args:
source_frequency (dict): Frequency map of the text you are analyzing
target_frequency (dict): Frequency map of the target language to compare with
Returns:
Decimal value of the chi-squared statistic
"""
# Ignore any symbols from source that are not in target.
# TODO: raise Error if source_len is 0?
target_prob = frequency_to_probability(target_frequency)
source_len = sum(v for k, v in source_frequency.items() if k in target_frequency)
result = 0
for symbol, prob in target_prob.items():
symbol_frequency = source_frequency.get(symbol, 0) # Frequecy is 0 if it doesnt appear in source
result += _calculate_chi_squared(symbol_frequency, prob, source_len)
return result
def _calculate_chi_squared(source_freq, target_prob, source_len):
"""A measure of the observed frequency of the symbol versus the expected frequency.
If the value is 0 then the texts are exactly alike for that symbol.
"""
expected = source_len * target_prob
return (source_freq - expected)**2 / expected
english = DynamicDict({
'unigrams': lambda: _load_ngram('unigrams'),
'bigrams': lambda: _load_ngram('bigrams'),
'trigrams': lambda: _load_ngram('trigrams'),
'quadgrams': lambda: _load_ngram('quadgrams')
})
"""English ngram frequencies."""
ENGLISH_IC = _calculate_index_of_coincidence(english.unigrams, sum(english.unigrams.values()))
"""Index of coincidence for the English language."""
|
CameronLonsdale/lantern | lantern/score.py | score | python | def score(text, *score_functions):
if not score_functions:
raise ValueError("score_functions must not be empty")
return statistics.mean(func(text) for func in score_functions) | Score ``text`` using ``score_functions``.
Examples:
>>> score("abc", function_a)
>>> score("abc", function_a, function_b)
Args:
text (str): The text to score
*score_functions (variable length argument list): functions to score with
Returns:
Arithmetic mean of scores
Raises:
ValueError: If score_functions is empty | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/score.py#L9-L29 | null | """
Scoring algorithm to return probability of correct decryption.
Output range depends on the score functions used.
"""
import statistics
|
CameronLonsdale/lantern | lantern/fitness/patternmatch.py | PatternMatch | python | def PatternMatch(regex):
pattern = re.compile(regex)
return lambda text: -1 if pattern.search(text) is None else 0 | Compute the score of a text by determing if a pattern matches.
Example:
>>> fitness = PatternMatch("flag{.*}")
>>> fitness("flag{example}")
0
>>> fitness("junk")
-1
Args:
regex (str): regular expression string to use as a pattern | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/fitness/patternmatch.py#L6-L21 | null | """Fitness scoring using pattern matching."""
import re
|
CameronLonsdale/lantern | lantern/util.py | remove | python | def remove(text, exclude):
exclude = ''.join(str(symbol) for symbol in exclude)
return text.translate(str.maketrans('', '', exclude)) | Remove ``exclude`` symbols from ``text``.
Example:
>>> remove("example text", string.whitespace)
'exampletext'
Args:
text (str): The text to modify
exclude (iterable): The symbols to exclude
Returns:
``text`` with ``exclude`` symbols removed | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/util.py#L6-L21 | null | """Utility functions to format and marshal data."""
import itertools
def split_columns(text, n_columns):
"""Split ``text`` into ``n_columns`` many columns.
Example:
>>> split_columns("example", 2)
['eape', 'xml']
Args:
text (str): The text to split
n_columns (int): The number of columns to create
Returns:
List of columns
Raises:
ValueError: If n_cols is <= 0 or >= len(text)
"""
if n_columns <= 0 or n_columns > len(text):
raise ValueError("n_columns must be within the bounds of 1 and text length")
return [text[i::n_columns] for i in range(n_columns)]
def combine_columns(columns):
"""Combine ``columns`` into a single string.
Example:
>>> combine_columns(['eape', 'xml'])
'example'
Args:
columns (iterable): ordered columns to combine
Returns:
String of combined columns
"""
columns_zipped = itertools.zip_longest(*columns)
return ''.join(x for zipped in columns_zipped for x in zipped if x)
def iterate_ngrams(text, n):
"""Generator to yield ngrams in ``text``.
Example:
>>> for ngram in iterate_ngrams("example", 4):
... print(ngram)
exam
xamp
ampl
mple
Args:
text (str): text to iterate over
n (int): size of window for iteration
Returns:
Generator expression to yield the next ngram in the text
Raises:
ValueError: If n is non positive
"""
if n <= 0:
raise ValueError("n must be a positive integer")
return [text[i: i + n] for i in range(len(text) - n + 1)]
def group(text, size):
"""Group ``text`` into blocks of ``size``.
Example:
>>> group("test", 2)
['te', 'st']
Args:
text (str): text to separate
size (int): size of groups to split the text into
Returns:
List of n-sized groups of text
Raises:
ValueError: If n is non positive
"""
if size <= 0:
raise ValueError("n must be a positive integer")
return [text[i:i + size] for i in range(0, len(text), size)]
|
CameronLonsdale/lantern | lantern/util.py | split_columns | python | def split_columns(text, n_columns):
if n_columns <= 0 or n_columns > len(text):
raise ValueError("n_columns must be within the bounds of 1 and text length")
return [text[i::n_columns] for i in range(n_columns)] | Split ``text`` into ``n_columns`` many columns.
Example:
>>> split_columns("example", 2)
['eape', 'xml']
Args:
text (str): The text to split
n_columns (int): The number of columns to create
Returns:
List of columns
Raises:
ValueError: If n_cols is <= 0 or >= len(text) | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/util.py#L24-L44 | null | """Utility functions to format and marshal data."""
import itertools
def remove(text, exclude):
"""Remove ``exclude`` symbols from ``text``.
Example:
>>> remove("example text", string.whitespace)
'exampletext'
Args:
text (str): The text to modify
exclude (iterable): The symbols to exclude
Returns:
``text`` with ``exclude`` symbols removed
"""
exclude = ''.join(str(symbol) for symbol in exclude)
return text.translate(str.maketrans('', '', exclude))
def combine_columns(columns):
"""Combine ``columns`` into a single string.
Example:
>>> combine_columns(['eape', 'xml'])
'example'
Args:
columns (iterable): ordered columns to combine
Returns:
String of combined columns
"""
columns_zipped = itertools.zip_longest(*columns)
return ''.join(x for zipped in columns_zipped for x in zipped if x)
def iterate_ngrams(text, n):
"""Generator to yield ngrams in ``text``.
Example:
>>> for ngram in iterate_ngrams("example", 4):
... print(ngram)
exam
xamp
ampl
mple
Args:
text (str): text to iterate over
n (int): size of window for iteration
Returns:
Generator expression to yield the next ngram in the text
Raises:
ValueError: If n is non positive
"""
if n <= 0:
raise ValueError("n must be a positive integer")
return [text[i: i + n] for i in range(len(text) - n + 1)]
def group(text, size):
"""Group ``text`` into blocks of ``size``.
Example:
>>> group("test", 2)
['te', 'st']
Args:
text (str): text to separate
size (int): size of groups to split the text into
Returns:
List of n-sized groups of text
Raises:
ValueError: If n is non positive
"""
if size <= 0:
raise ValueError("n must be a positive integer")
return [text[i:i + size] for i in range(0, len(text), size)]
|
CameronLonsdale/lantern | lantern/util.py | combine_columns | python | def combine_columns(columns):
columns_zipped = itertools.zip_longest(*columns)
return ''.join(x for zipped in columns_zipped for x in zipped if x) | Combine ``columns`` into a single string.
Example:
>>> combine_columns(['eape', 'xml'])
'example'
Args:
columns (iterable): ordered columns to combine
Returns:
String of combined columns | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/util.py#L47-L61 | null | """Utility functions to format and marshal data."""
import itertools
def remove(text, exclude):
"""Remove ``exclude`` symbols from ``text``.
Example:
>>> remove("example text", string.whitespace)
'exampletext'
Args:
text (str): The text to modify
exclude (iterable): The symbols to exclude
Returns:
``text`` with ``exclude`` symbols removed
"""
exclude = ''.join(str(symbol) for symbol in exclude)
return text.translate(str.maketrans('', '', exclude))
def split_columns(text, n_columns):
"""Split ``text`` into ``n_columns`` many columns.
Example:
>>> split_columns("example", 2)
['eape', 'xml']
Args:
text (str): The text to split
n_columns (int): The number of columns to create
Returns:
List of columns
Raises:
ValueError: If n_cols is <= 0 or >= len(text)
"""
if n_columns <= 0 or n_columns > len(text):
raise ValueError("n_columns must be within the bounds of 1 and text length")
return [text[i::n_columns] for i in range(n_columns)]
def iterate_ngrams(text, n):
"""Generator to yield ngrams in ``text``.
Example:
>>> for ngram in iterate_ngrams("example", 4):
... print(ngram)
exam
xamp
ampl
mple
Args:
text (str): text to iterate over
n (int): size of window for iteration
Returns:
Generator expression to yield the next ngram in the text
Raises:
ValueError: If n is non positive
"""
if n <= 0:
raise ValueError("n must be a positive integer")
return [text[i: i + n] for i in range(len(text) - n + 1)]
def group(text, size):
"""Group ``text`` into blocks of ``size``.
Example:
>>> group("test", 2)
['te', 'st']
Args:
text (str): text to separate
size (int): size of groups to split the text into
Returns:
List of n-sized groups of text
Raises:
ValueError: If n is non positive
"""
if size <= 0:
raise ValueError("n must be a positive integer")
return [text[i:i + size] for i in range(0, len(text), size)]
|
CameronLonsdale/lantern | lantern/util.py | iterate_ngrams | python | def iterate_ngrams(text, n):
if n <= 0:
raise ValueError("n must be a positive integer")
return [text[i: i + n] for i in range(len(text) - n + 1)] | Generator to yield ngrams in ``text``.
Example:
>>> for ngram in iterate_ngrams("example", 4):
... print(ngram)
exam
xamp
ampl
mple
Args:
text (str): text to iterate over
n (int): size of window for iteration
Returns:
Generator expression to yield the next ngram in the text
Raises:
ValueError: If n is non positive | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/util.py#L64-L88 | null | """Utility functions to format and marshal data."""
import itertools
def remove(text, exclude):
"""Remove ``exclude`` symbols from ``text``.
Example:
>>> remove("example text", string.whitespace)
'exampletext'
Args:
text (str): The text to modify
exclude (iterable): The symbols to exclude
Returns:
``text`` with ``exclude`` symbols removed
"""
exclude = ''.join(str(symbol) for symbol in exclude)
return text.translate(str.maketrans('', '', exclude))
def split_columns(text, n_columns):
"""Split ``text`` into ``n_columns`` many columns.
Example:
>>> split_columns("example", 2)
['eape', 'xml']
Args:
text (str): The text to split
n_columns (int): The number of columns to create
Returns:
List of columns
Raises:
ValueError: If n_cols is <= 0 or >= len(text)
"""
if n_columns <= 0 or n_columns > len(text):
raise ValueError("n_columns must be within the bounds of 1 and text length")
return [text[i::n_columns] for i in range(n_columns)]
def combine_columns(columns):
"""Combine ``columns`` into a single string.
Example:
>>> combine_columns(['eape', 'xml'])
'example'
Args:
columns (iterable): ordered columns to combine
Returns:
String of combined columns
"""
columns_zipped = itertools.zip_longest(*columns)
return ''.join(x for zipped in columns_zipped for x in zipped if x)
def group(text, size):
"""Group ``text`` into blocks of ``size``.
Example:
>>> group("test", 2)
['te', 'st']
Args:
text (str): text to separate
size (int): size of groups to split the text into
Returns:
List of n-sized groups of text
Raises:
ValueError: If n is non positive
"""
if size <= 0:
raise ValueError("n must be a positive integer")
return [text[i:i + size] for i in range(0, len(text), size)]
|
CameronLonsdale/lantern | lantern/util.py | group | python | def group(text, size):
if size <= 0:
raise ValueError("n must be a positive integer")
return [text[i:i + size] for i in range(0, len(text), size)] | Group ``text`` into blocks of ``size``.
Example:
>>> group("test", 2)
['te', 'st']
Args:
text (str): text to separate
size (int): size of groups to split the text into
Returns:
List of n-sized groups of text
Raises:
ValueError: If n is non positive | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/util.py#L91-L111 | null | """Utility functions to format and marshal data."""
import itertools
def remove(text, exclude):
"""Remove ``exclude`` symbols from ``text``.
Example:
>>> remove("example text", string.whitespace)
'exampletext'
Args:
text (str): The text to modify
exclude (iterable): The symbols to exclude
Returns:
``text`` with ``exclude`` symbols removed
"""
exclude = ''.join(str(symbol) for symbol in exclude)
return text.translate(str.maketrans('', '', exclude))
def split_columns(text, n_columns):
"""Split ``text`` into ``n_columns`` many columns.
Example:
>>> split_columns("example", 2)
['eape', 'xml']
Args:
text (str): The text to split
n_columns (int): The number of columns to create
Returns:
List of columns
Raises:
ValueError: If n_cols is <= 0 or >= len(text)
"""
if n_columns <= 0 or n_columns > len(text):
raise ValueError("n_columns must be within the bounds of 1 and text length")
return [text[i::n_columns] for i in range(n_columns)]
def combine_columns(columns):
"""Combine ``columns`` into a single string.
Example:
>>> combine_columns(['eape', 'xml'])
'example'
Args:
columns (iterable): ordered columns to combine
Returns:
String of combined columns
"""
columns_zipped = itertools.zip_longest(*columns)
return ''.join(x for zipped in columns_zipped for x in zipped if x)
def iterate_ngrams(text, n):
"""Generator to yield ngrams in ``text``.
Example:
>>> for ngram in iterate_ngrams("example", 4):
... print(ngram)
exam
xamp
ampl
mple
Args:
text (str): text to iterate over
n (int): size of window for iteration
Returns:
Generator expression to yield the next ngram in the text
Raises:
ValueError: If n is non positive
"""
if n <= 0:
raise ValueError("n must be a positive integer")
return [text[i: i + n] for i in range(len(text) - n + 1)]
|
CameronLonsdale/lantern | lantern/fitness/chisquared.py | ChiSquared | python | def ChiSquared(target_frequency):
def inner(text):
text = ''.join(text)
return -chi_squared(frequency_analyze(text), target_frequency)
return inner | Score a text by comparing its frequency distribution against another.
Note:
It is easy to be penalised without knowing it when using this scorer.
English frequency ngrams are capital letters, meaning when using it
any text you score against must be all capitals for it to give correct results.
I am aware of the issue and will work on a fix.
Todo:
Maybe include paramter for ngram size. Havent had a use case for this yet.
Once there is evidence it is needed, I will add it.
Example:
>>> fitness = ChiSquared(english.unigrams)
>>> fitness("ABC")
-32.2
Args:
target_frequency (dict): symbol to frequency mapping of the distribution to compare with | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/fitness/chisquared.py#L6-L31 | null | """Chi Squared Scoring function."""
from lantern.analysis.frequency import frequency_analyze, chi_squared
|
CameronLonsdale/lantern | lantern/modules/simplesubstitution.py | crack | python | def crack(ciphertext, *fitness_functions, ntrials=30, nswaps=3000):
if ntrials <= 0 or nswaps <= 0:
raise ValueError("ntrials and nswaps must be positive integers")
# Find a local maximum by swapping two letters and scoring the decryption
def next_node_inner_climb(node):
# Swap 2 characters in the key
a, b = random.sample(range(len(node)), 2)
node[a], node[b] = node[b], node[a]
plaintext = decrypt(node, ciphertext)
node_score = score(plaintext, *fitness_functions)
return node, node_score, Decryption(plaintext, ''.join(node), node_score)
# Outer climb rereuns hill climb ntrials number of times each time at a different start location
def next_node_outer_climb(node):
random.shuffle(node)
key, best_score, outputs = hill_climb(nswaps, node[:], next_node_inner_climb)
return key, best_score, outputs[-1] # The last item in this list is the item with the highest score
_, _, decryptions = hill_climb(ntrials, list(string.ascii_uppercase), next_node_outer_climb)
return sorted(decryptions, reverse=True) | Break ``ciphertext`` using hill climbing.
Note:
Currently ntrails and nswaps default to magic numbers.
Generally the trend is, the longer the text, the lower the number of trials
you need to run, because the hill climbing will lead to the best answer faster.
Because randomness is involved, there is the possibility of the correct decryption
not being found. In this circumstance you just need to run the code again.
Example:
>>> decryptions = crack("XUOOB", fitness.english.quadgrams)
>>> print(decryptions[0])
HELLO
Args:
ciphertext (str): The text to decrypt
*fitness_functions (variable length argument list): Functions to score decryption with
Keyword Args:
ntrials (int): The number of times to run the hill climbing algorithm
nswaps (int): The number of rounds to find a local maximum
Returns:
Sorted list of decryptions
Raises:
ValueError: If nswaps or ntrails are not positive integers
ValueError: If no fitness_functions are given | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/modules/simplesubstitution.py#L11-L60 | [
"def hill_climb(nsteps, start_node, get_next_node):\n \"\"\"Modular hill climbing algorithm.\n\n Example:\n >>> def get_next_node(node):\n ... a, b = random.sample(range(len(node)), 2)\n ... node[a], node[b] = node[b], node[a]\n ... plaintext = decrypt(node, ciphertext)... | """Automated breaking of the Simple Substitution Cipher."""
import random
import string
from lantern import score
from lantern.analysis.search import hill_climb
from lantern.structures import Decryption
# We sort the list to ensure the best results are at the front of the list
def decrypt(key, ciphertext):
"""Decrypt Simple Substitution enciphered ``ciphertext`` using ``key``.
Example:
>>> decrypt("PQSTUVWXYZCODEBRAKINGFHJLM", "XUOOB")
HELLO
Args:
key (iterable): The key to use
ciphertext (str): The text to decrypt
Returns:
Decrypted ciphertext
"""
# TODO: Is it worth keeping this here I should I only accept strings?
key = ''.join(key)
alphabet = string.ascii_letters
cipher_alphabet = key.lower() + key.upper()
return ciphertext.translate(str.maketrans(cipher_alphabet, alphabet))
|
CameronLonsdale/lantern | lantern/modules/simplesubstitution.py | decrypt | python | def decrypt(key, ciphertext):
# TODO: Is it worth keeping this here I should I only accept strings?
key = ''.join(key)
alphabet = string.ascii_letters
cipher_alphabet = key.lower() + key.upper()
return ciphertext.translate(str.maketrans(cipher_alphabet, alphabet)) | Decrypt Simple Substitution enciphered ``ciphertext`` using ``key``.
Example:
>>> decrypt("PQSTUVWXYZCODEBRAKINGFHJLM", "XUOOB")
HELLO
Args:
key (iterable): The key to use
ciphertext (str): The text to decrypt
Returns:
Decrypted ciphertext | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/modules/simplesubstitution.py#L63-L81 | null | """Automated breaking of the Simple Substitution Cipher."""
import random
import string
from lantern import score
from lantern.analysis.search import hill_climb
from lantern.structures import Decryption
def crack(ciphertext, *fitness_functions, ntrials=30, nswaps=3000):
"""Break ``ciphertext`` using hill climbing.
Note:
Currently ntrails and nswaps default to magic numbers.
Generally the trend is, the longer the text, the lower the number of trials
you need to run, because the hill climbing will lead to the best answer faster.
Because randomness is involved, there is the possibility of the correct decryption
not being found. In this circumstance you just need to run the code again.
Example:
>>> decryptions = crack("XUOOB", fitness.english.quadgrams)
>>> print(decryptions[0])
HELLO
Args:
ciphertext (str): The text to decrypt
*fitness_functions (variable length argument list): Functions to score decryption with
Keyword Args:
ntrials (int): The number of times to run the hill climbing algorithm
nswaps (int): The number of rounds to find a local maximum
Returns:
Sorted list of decryptions
Raises:
ValueError: If nswaps or ntrails are not positive integers
ValueError: If no fitness_functions are given
"""
if ntrials <= 0 or nswaps <= 0:
raise ValueError("ntrials and nswaps must be positive integers")
# Find a local maximum by swapping two letters and scoring the decryption
def next_node_inner_climb(node):
# Swap 2 characters in the key
a, b = random.sample(range(len(node)), 2)
node[a], node[b] = node[b], node[a]
plaintext = decrypt(node, ciphertext)
node_score = score(plaintext, *fitness_functions)
return node, node_score, Decryption(plaintext, ''.join(node), node_score)
# Outer climb rereuns hill climb ntrials number of times each time at a different start location
def next_node_outer_climb(node):
random.shuffle(node)
key, best_score, outputs = hill_climb(nswaps, node[:], next_node_inner_climb)
return key, best_score, outputs[-1] # The last item in this list is the item with the highest score
_, _, decryptions = hill_climb(ntrials, list(string.ascii_uppercase), next_node_outer_climb)
return sorted(decryptions, reverse=True) # We sort the list to ensure the best results are at the front of the list
|
CameronLonsdale/lantern | lantern/fitness/ngram.py | NgramScorer | python | def NgramScorer(frequency_map):
# Calculate the log probability
length = len(next(iter(frequency_map)))
# TODO: 0.01 is a magic number. Needs to be better than that.
floor = math.log10(0.01 / sum(frequency_map.values()))
ngrams = frequency.frequency_to_probability(frequency_map, decorator=math.log10)
def inner(text):
# I dont like this, it is only for the .upper() to work,
# But I feel as though this can be removed in later refactoring
text = ''.join(text)
text = remove(text.upper(), string.whitespace + string.punctuation)
return sum(ngrams.get(ngram, floor) for ngram in iterate_ngrams(text, length))
return inner | Compute the score of a text by using the frequencies of ngrams.
Example:
>>> fitness = NgramScorer(english.unigrams)
>>> fitness("ABC")
-4.3622319742618245
Args:
frequency_map (dict): ngram to frequency mapping | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/fitness/ngram.py#L11-L35 | [
"def frequency_to_probability(frequency_map, decorator=lambda f: f):\n \"\"\"Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total.\n\n Example:\n >>> frequency_to_probability({'a': 2, 'b': 2})\n {'a': 0.5, 'b': 0.5}\n\n Args:\n frequency... | """Fitness scoring using ngram frequency."""
import math
import string
from lantern.analysis import frequency
from lantern.structures import DynamicDict
from lantern.util import remove, iterate_ngrams
english = DynamicDict({
'unigrams': lambda: NgramScorer(frequency.english.unigrams),
'bigrams': lambda: NgramScorer(frequency.english.bigrams),
'trigrams': lambda: NgramScorer(frequency.english.trigrams),
'quadgrams': lambda: NgramScorer(frequency.english.quadgrams)
})
"""English ngram scorers."""
|
CameronLonsdale/lantern | lantern/modules/vigenere.py | crack | python | def crack(ciphertext, *fitness_functions, key_period=None, max_key_period=30):
if max_key_period <= 0 or (key_period is not None and key_period <= 0):
raise ValueError("Period values must be positive integers")
original_text = ciphertext
# Make the assumption that non alphabet characters have not been encrypted
# TODO: This is fairly poor code. Once languages are a thing, there should be some nice abstractions for this stuff
ciphertext = remove(ciphertext, string.punctuation + string.whitespace + string.digits)
periods = [int(key_period)] if key_period else key_periods(ciphertext, max_key_period)
# Decrypt for every valid period
period_decryptions = []
for period in periods:
if period >= len(ciphertext):
continue
# Collect the best decryptions for every column
column_decryptions = [shift.crack(col, *fitness_functions)[0] for col in split_columns(ciphertext, period)]
key = _build_key(decrypt.key for decrypt in column_decryptions)
plaintext = decrypt(key, original_text)
period_decryptions.append(Decryption(plaintext, key, score(plaintext, *fitness_functions)))
return sorted(period_decryptions, reverse=True) | Break ``ciphertext`` by finding (or using the given) key_period then breaking ``key_period`` many Caesar ciphers.
Example:
>>> decryptions = crack("OMSTV", fitness.ChiSquared(analysis.frequency.english.unigrams))
>>> print(decryptions[0])
HELLO
Args:
ciphertext (str): The text to decrypt
*fitness_functions (variable length argument list): Functions to score decryption with
Keyword Args:
key_period (int): The period of the key
max_key_period (int): The maximum period the key could be
Returns:
Sorted list of decryptions
Raises:
ValueError: If key_period or max_key_period are less than or equal to 0
ValueError: If no fitness_functions are given | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/modules/vigenere.py#L15-L60 | [
"def remove(text, exclude):\n \"\"\"Remove ``exclude`` symbols from ``text``.\n\n Example:\n >>> remove(\"example text\", string.whitespace)\n 'exampletext'\n\n Args:\n text (str): The text to modify\n exclude (iterable): The symbols to exclude\n\n Returns:\n ``text`` ... | """Automated breaking of the Vigenere Cipher."""
import string
from lantern import score
from lantern.modules import shift
from lantern.structures import Decryption
from lantern.analysis.frequency import index_of_coincidence, ENGLISH_IC
from lantern.util import split_columns, remove
# TODO: maybe add finding keyperiods as a parameter because people might want to use kasiski
# TODO: Maybe consider splitting the scoring functions for columns and the whole
# Name should be different?, say youre finding key periods through IC.
def key_periods(ciphertext, max_key_period):
"""Rank all key periods for ``ciphertext`` up to and including ``max_key_period``
Example:
>>> key_periods(ciphertext, 30)
[2, 4, 8, 3, ...]
Args:
ciphertext (str): The text to analyze
max_key_period (int): The maximum period the key could be
Returns:
Sorted list of keys
Raises:
ValueError: If max_key_period is less than or equal to 0
"""
if max_key_period <= 0:
raise ValueError("max_key_period must be a positive integer")
key_scores = []
for period in range(1, min(max_key_period, len(ciphertext)) + 1):
score = abs(ENGLISH_IC - index_of_coincidence(*split_columns(ciphertext, period)))
key_scores.append((period, score))
return [p[0] for p in sorted(key_scores, key=lambda x: x[1])]
def _build_key(keys):
num_letters = len(string.ascii_uppercase)
return ''.join(string.ascii_uppercase[(key) % num_letters] for key in keys)
def decrypt(key, ciphertext):
"""Decrypt Vigenere encrypted ``ciphertext`` using ``key``.
Example:
>>> decrypt("KEY", "RIJVS")
HELLO
Args:
key (iterable): The key to use
ciphertext (str): The text to decrypt
Returns:
Decrypted ciphertext
"""
index = 0
decrypted = ""
for char in ciphertext:
if char in string.punctuation + string.whitespace + string.digits:
decrypted += char
continue # Not part of the decryption
# Rotate character by the alphabet position of the letter in the key
alphabet = string.ascii_uppercase if key[index].isupper() else string.ascii_lowercase
decrypted += ''.join(shift.decrypt(int(alphabet.index(key[index])), char))
index = (index + 1) % len(key)
return decrypted
|
CameronLonsdale/lantern | lantern/modules/vigenere.py | key_periods | python | def key_periods(ciphertext, max_key_period):
if max_key_period <= 0:
raise ValueError("max_key_period must be a positive integer")
key_scores = []
for period in range(1, min(max_key_period, len(ciphertext)) + 1):
score = abs(ENGLISH_IC - index_of_coincidence(*split_columns(ciphertext, period)))
key_scores.append((period, score))
return [p[0] for p in sorted(key_scores, key=lambda x: x[1])] | Rank all key periods for ``ciphertext`` up to and including ``max_key_period``
Example:
>>> key_periods(ciphertext, 30)
[2, 4, 8, 3, ...]
Args:
ciphertext (str): The text to analyze
max_key_period (int): The maximum period the key could be
Returns:
Sorted list of keys
Raises:
ValueError: If max_key_period is less than or equal to 0 | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/modules/vigenere.py#L64-L89 | [
"def index_of_coincidence(*texts):\n \"\"\"Calculate the index of coincidence for one or more ``texts``.\n The results are averaged over multiple texts to return the delta index of coincidence.\n\n Examples:\n >>> index_of_coincidence(\"aabbc\")\n 0.2\n\n >>> index_of_coincidence(\"aab... | """Automated breaking of the Vigenere Cipher."""
import string
from lantern import score
from lantern.modules import shift
from lantern.structures import Decryption
from lantern.analysis.frequency import index_of_coincidence, ENGLISH_IC
from lantern.util import split_columns, remove
# TODO: maybe add finding keyperiods as a parameter because people might want to use kasiski
# TODO: Maybe consider splitting the scoring functions for columns and the whole
def crack(ciphertext, *fitness_functions, key_period=None, max_key_period=30):
"""Break ``ciphertext`` by finding (or using the given) key_period then breaking ``key_period`` many Caesar ciphers.
Example:
>>> decryptions = crack("OMSTV", fitness.ChiSquared(analysis.frequency.english.unigrams))
>>> print(decryptions[0])
HELLO
Args:
ciphertext (str): The text to decrypt
*fitness_functions (variable length argument list): Functions to score decryption with
Keyword Args:
key_period (int): The period of the key
max_key_period (int): The maximum period the key could be
Returns:
Sorted list of decryptions
Raises:
ValueError: If key_period or max_key_period are less than or equal to 0
ValueError: If no fitness_functions are given
"""
if max_key_period <= 0 or (key_period is not None and key_period <= 0):
raise ValueError("Period values must be positive integers")
original_text = ciphertext
# Make the assumption that non alphabet characters have not been encrypted
# TODO: This is fairly poor code. Once languages are a thing, there should be some nice abstractions for this stuff
ciphertext = remove(ciphertext, string.punctuation + string.whitespace + string.digits)
periods = [int(key_period)] if key_period else key_periods(ciphertext, max_key_period)
# Decrypt for every valid period
period_decryptions = []
for period in periods:
if period >= len(ciphertext):
continue
# Collect the best decryptions for every column
column_decryptions = [shift.crack(col, *fitness_functions)[0] for col in split_columns(ciphertext, period)]
key = _build_key(decrypt.key for decrypt in column_decryptions)
plaintext = decrypt(key, original_text)
period_decryptions.append(Decryption(plaintext, key, score(plaintext, *fitness_functions)))
return sorted(period_decryptions, reverse=True)
# Name should be different?, say youre finding key periods through IC.
def _build_key(keys):
num_letters = len(string.ascii_uppercase)
return ''.join(string.ascii_uppercase[(key) % num_letters] for key in keys)
def decrypt(key, ciphertext):
"""Decrypt Vigenere encrypted ``ciphertext`` using ``key``.
Example:
>>> decrypt("KEY", "RIJVS")
HELLO
Args:
key (iterable): The key to use
ciphertext (str): The text to decrypt
Returns:
Decrypted ciphertext
"""
index = 0
decrypted = ""
for char in ciphertext:
if char in string.punctuation + string.whitespace + string.digits:
decrypted += char
continue # Not part of the decryption
# Rotate character by the alphabet position of the letter in the key
alphabet = string.ascii_uppercase if key[index].isupper() else string.ascii_lowercase
decrypted += ''.join(shift.decrypt(int(alphabet.index(key[index])), char))
index = (index + 1) % len(key)
return decrypted
|
CameronLonsdale/lantern | lantern/modules/vigenere.py | decrypt | python | def decrypt(key, ciphertext):
index = 0
decrypted = ""
for char in ciphertext:
if char in string.punctuation + string.whitespace + string.digits:
decrypted += char
continue # Not part of the decryption
# Rotate character by the alphabet position of the letter in the key
alphabet = string.ascii_uppercase if key[index].isupper() else string.ascii_lowercase
decrypted += ''.join(shift.decrypt(int(alphabet.index(key[index])), char))
index = (index + 1) % len(key)
return decrypted | Decrypt Vigenere encrypted ``ciphertext`` using ``key``.
Example:
>>> decrypt("KEY", "RIJVS")
HELLO
Args:
key (iterable): The key to use
ciphertext (str): The text to decrypt
Returns:
Decrypted ciphertext | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/modules/vigenere.py#L97-L123 | [
"def decrypt(key, ciphertext, shift_function=shift_case_english):\n \"\"\"Decrypt Shift enciphered ``ciphertext`` using ``key``.\n\n Examples:\n >>> ''.join(decrypt(3, \"KHOOR\"))\n HELLO\n\n >> decrypt(15, [0xcf, 0x9e, 0xaf, 0xe0], shift_bytes)\n [0xde, 0xad, 0xbe, 0xef]\n\n Ar... | """Automated breaking of the Vigenere Cipher."""
import string
from lantern import score
from lantern.modules import shift
from lantern.structures import Decryption
from lantern.analysis.frequency import index_of_coincidence, ENGLISH_IC
from lantern.util import split_columns, remove
# TODO: maybe add finding keyperiods as a parameter because people might want to use kasiski
# TODO: Maybe consider splitting the scoring functions for columns and the whole
def crack(ciphertext, *fitness_functions, key_period=None, max_key_period=30):
"""Break ``ciphertext`` by finding (or using the given) key_period then breaking ``key_period`` many Caesar ciphers.
Example:
>>> decryptions = crack("OMSTV", fitness.ChiSquared(analysis.frequency.english.unigrams))
>>> print(decryptions[0])
HELLO
Args:
ciphertext (str): The text to decrypt
*fitness_functions (variable length argument list): Functions to score decryption with
Keyword Args:
key_period (int): The period of the key
max_key_period (int): The maximum period the key could be
Returns:
Sorted list of decryptions
Raises:
ValueError: If key_period or max_key_period are less than or equal to 0
ValueError: If no fitness_functions are given
"""
if max_key_period <= 0 or (key_period is not None and key_period <= 0):
raise ValueError("Period values must be positive integers")
original_text = ciphertext
# Make the assumption that non alphabet characters have not been encrypted
# TODO: This is fairly poor code. Once languages are a thing, there should be some nice abstractions for this stuff
ciphertext = remove(ciphertext, string.punctuation + string.whitespace + string.digits)
periods = [int(key_period)] if key_period else key_periods(ciphertext, max_key_period)
# Decrypt for every valid period
period_decryptions = []
for period in periods:
if period >= len(ciphertext):
continue
# Collect the best decryptions for every column
column_decryptions = [shift.crack(col, *fitness_functions)[0] for col in split_columns(ciphertext, period)]
key = _build_key(decrypt.key for decrypt in column_decryptions)
plaintext = decrypt(key, original_text)
period_decryptions.append(Decryption(plaintext, key, score(plaintext, *fitness_functions)))
return sorted(period_decryptions, reverse=True)
# Name should be different?, say youre finding key periods through IC.
def key_periods(ciphertext, max_key_period):
"""Rank all key periods for ``ciphertext`` up to and including ``max_key_period``
Example:
>>> key_periods(ciphertext, 30)
[2, 4, 8, 3, ...]
Args:
ciphertext (str): The text to analyze
max_key_period (int): The maximum period the key could be
Returns:
Sorted list of keys
Raises:
ValueError: If max_key_period is less than or equal to 0
"""
if max_key_period <= 0:
raise ValueError("max_key_period must be a positive integer")
key_scores = []
for period in range(1, min(max_key_period, len(ciphertext)) + 1):
score = abs(ENGLISH_IC - index_of_coincidence(*split_columns(ciphertext, period)))
key_scores.append((period, score))
return [p[0] for p in sorted(key_scores, key=lambda x: x[1])]
def _build_key(keys):
num_letters = len(string.ascii_uppercase)
return ''.join(string.ascii_uppercase[(key) % num_letters] for key in keys)
|
CameronLonsdale/lantern | lantern/analysis/search.py | hill_climb | python | def hill_climb(nsteps, start_node, get_next_node):
outputs = []
best_score = -float('inf')
for step in range(nsteps):
next_node, score, output = get_next_node(copy.deepcopy(start_node))
# Keep track of best score and the start node becomes finish node
if score > best_score:
start_node = copy.deepcopy(next_node)
best_score = score
outputs.append(output)
return start_node, best_score, outputs | Modular hill climbing algorithm.
Example:
>>> def get_next_node(node):
... a, b = random.sample(range(len(node)), 2)
... node[a], node[b] = node[b], node[a]
... plaintext = decrypt(node, ciphertext)
... score = lantern.score(plaintext, *fitness_functions)
... return node, score, Decryption(plaintext, ''.join(node), score)
>>> final_node, best_score, outputs = hill_climb(10, "ABC", get_next_node)
Args:
nsteps (int): The number of neighbours to visit
start_node: The starting node
get_next_node (function): Function to return the next node
the score of the current node and any optional output from the current node
Returns:
The highest node found, the score of this node and the outputs from the best nodes along the way | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/analysis/search.py#L6-L39 | [
"def next_node_outer_climb(node):\n random.shuffle(node)\n key, best_score, outputs = hill_climb(nswaps, node[:], next_node_inner_climb)\n return key, best_score, outputs[-1] # The last item in this list is the item with the highest score\n",
"def get_next_node(node):\n a, b = random.sample(range(len... | """Algorithms for searching and optimisation."""
import copy
|
CameronLonsdale/lantern | lantern/modules/shift.py | make_shift_function | python | def make_shift_function(alphabet):
def shift_case_sensitive(shift, symbol):
case = [case for case in alphabet if symbol in case]
if not case:
return symbol
case = case[0]
index = case.index(symbol)
return case[(index - shift) % len(case)]
return shift_case_sensitive | Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punctuation characters
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase, string.punctuation])
<function make_shift_function.<locals>.shift_case_sensitive>
Shift entire ASCII range, overflowing cases
>>> make_shift_function([''.join(chr(x) for x in range(32, 127))])
<function make_shift_function.<locals>.shift_case_sensitive>
Args:
alphabet (iterable): Ordered iterable of strings representing separate cases of an alphabet
Returns:
Function (shift, symbol) | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/modules/shift.py#L9-L43 | null | """Automated breaking of the Shift Cipher."""
import string
from lantern import score
from lantern.structures import Decryption
shift_case_english = make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
def crack(ciphertext, *fitness_functions, min_key=0, max_key=26, shift_function=shift_case_english):
"""Break ``ciphertext`` by enumerating keys between ``min_key`` and ``max_key``.
Example:
>>> decryptions = crack("KHOOR", fitness.english.quadgrams)
>>> print(''.join(decryptions[0].plaintext))
HELLO
Args:
ciphertext (iterable): The symbols to decrypt
*fitness_functions (variable length argument list): Functions to score decryption with
Keyword Args:
min_key (int): Key to start with
max_key (int): Key to stop at (exclusive)
shift_function (function(shift, symbol)): Shift function to use
Returns:
Sorted list of decryptions
Raises:
ValueError: If min_key exceeds max_key
ValueError: If no fitness_functions are given
"""
if min_key >= max_key:
raise ValueError("min_key cannot exceed max_key")
decryptions = []
for key in range(min_key, max_key):
plaintext = decrypt(key, ciphertext, shift_function=shift_function)
decryptions.append(Decryption(plaintext, key, score(plaintext, *fitness_functions)))
return sorted(decryptions, reverse=True)
def decrypt(key, ciphertext, shift_function=shift_case_english):
"""Decrypt Shift enciphered ``ciphertext`` using ``key``.
Examples:
>>> ''.join(decrypt(3, "KHOOR"))
HELLO
>> decrypt(15, [0xcf, 0x9e, 0xaf, 0xe0], shift_bytes)
[0xde, 0xad, 0xbe, 0xef]
Args:
key (int): The shift to use
ciphertext (iterable): The symbols to decrypt
shift_function (function (shift, symbol)): Shift function to apply to symbols in the ciphertext
Returns:
Decrypted ciphertext, list of plaintext symbols
"""
return [shift_function(key, symbol) for symbol in ciphertext]
|
CameronLonsdale/lantern | lantern/modules/shift.py | crack | python | def crack(ciphertext, *fitness_functions, min_key=0, max_key=26, shift_function=shift_case_english):
if min_key >= max_key:
raise ValueError("min_key cannot exceed max_key")
decryptions = []
for key in range(min_key, max_key):
plaintext = decrypt(key, ciphertext, shift_function=shift_function)
decryptions.append(Decryption(plaintext, key, score(plaintext, *fitness_functions)))
return sorted(decryptions, reverse=True) | Break ``ciphertext`` by enumerating keys between ``min_key`` and ``max_key``.
Example:
>>> decryptions = crack("KHOOR", fitness.english.quadgrams)
>>> print(''.join(decryptions[0].plaintext))
HELLO
Args:
ciphertext (iterable): The symbols to decrypt
*fitness_functions (variable length argument list): Functions to score decryption with
Keyword Args:
min_key (int): Key to start with
max_key (int): Key to stop at (exclusive)
shift_function (function(shift, symbol)): Shift function to use
Returns:
Sorted list of decryptions
Raises:
ValueError: If min_key exceeds max_key
ValueError: If no fitness_functions are given | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/modules/shift.py#L48-L80 | [
"def score(text, *score_functions):\n \"\"\"Score ``text`` using ``score_functions``.\n\n Examples:\n >>> score(\"abc\", function_a)\n >>> score(\"abc\", function_a, function_b)\n\n Args:\n text (str): The text to score\n *score_functions (variable length argument list): functio... | """Automated breaking of the Shift Cipher."""
import string
from lantern import score
from lantern.structures import Decryption
def make_shift_function(alphabet):
"""Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punctuation characters
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase, string.punctuation])
<function make_shift_function.<locals>.shift_case_sensitive>
Shift entire ASCII range, overflowing cases
>>> make_shift_function([''.join(chr(x) for x in range(32, 127))])
<function make_shift_function.<locals>.shift_case_sensitive>
Args:
alphabet (iterable): Ordered iterable of strings representing separate cases of an alphabet
Returns:
Function (shift, symbol)
"""
def shift_case_sensitive(shift, symbol):
case = [case for case in alphabet if symbol in case]
if not case:
return symbol
case = case[0]
index = case.index(symbol)
return case[(index - shift) % len(case)]
return shift_case_sensitive
shift_case_english = make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
def decrypt(key, ciphertext, shift_function=shift_case_english):
"""Decrypt Shift enciphered ``ciphertext`` using ``key``.
Examples:
>>> ''.join(decrypt(3, "KHOOR"))
HELLO
>> decrypt(15, [0xcf, 0x9e, 0xaf, 0xe0], shift_bytes)
[0xde, 0xad, 0xbe, 0xef]
Args:
key (int): The shift to use
ciphertext (iterable): The symbols to decrypt
shift_function (function (shift, symbol)): Shift function to apply to symbols in the ciphertext
Returns:
Decrypted ciphertext, list of plaintext symbols
"""
return [shift_function(key, symbol) for symbol in ciphertext]
|
CameronLonsdale/lantern | lantern/modules/shift.py | decrypt | python | def decrypt(key, ciphertext, shift_function=shift_case_english):
return [shift_function(key, symbol) for symbol in ciphertext] | Decrypt Shift enciphered ``ciphertext`` using ``key``.
Examples:
>>> ''.join(decrypt(3, "KHOOR"))
HELLO
>> decrypt(15, [0xcf, 0x9e, 0xaf, 0xe0], shift_bytes)
[0xde, 0xad, 0xbe, 0xef]
Args:
key (int): The shift to use
ciphertext (iterable): The symbols to decrypt
shift_function (function (shift, symbol)): Shift function to apply to symbols in the ciphertext
Returns:
Decrypted ciphertext, list of plaintext symbols | train | https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/modules/shift.py#L83-L101 | null | """Automated breaking of the Shift Cipher."""
import string
from lantern import score
from lantern.structures import Decryption
def make_shift_function(alphabet):
"""Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punctuation characters
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase, string.punctuation])
<function make_shift_function.<locals>.shift_case_sensitive>
Shift entire ASCII range, overflowing cases
>>> make_shift_function([''.join(chr(x) for x in range(32, 127))])
<function make_shift_function.<locals>.shift_case_sensitive>
Args:
alphabet (iterable): Ordered iterable of strings representing separate cases of an alphabet
Returns:
Function (shift, symbol)
"""
def shift_case_sensitive(shift, symbol):
case = [case for case in alphabet if symbol in case]
if not case:
return symbol
case = case[0]
index = case.index(symbol)
return case[(index - shift) % len(case)]
return shift_case_sensitive
shift_case_english = make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
def crack(ciphertext, *fitness_functions, min_key=0, max_key=26, shift_function=shift_case_english):
"""Break ``ciphertext`` by enumerating keys between ``min_key`` and ``max_key``.
Example:
>>> decryptions = crack("KHOOR", fitness.english.quadgrams)
>>> print(''.join(decryptions[0].plaintext))
HELLO
Args:
ciphertext (iterable): The symbols to decrypt
*fitness_functions (variable length argument list): Functions to score decryption with
Keyword Args:
min_key (int): Key to start with
max_key (int): Key to stop at (exclusive)
shift_function (function(shift, symbol)): Shift function to use
Returns:
Sorted list of decryptions
Raises:
ValueError: If min_key exceeds max_key
ValueError: If no fitness_functions are given
"""
if min_key >= max_key:
raise ValueError("min_key cannot exceed max_key")
decryptions = []
for key in range(min_key, max_key):
plaintext = decrypt(key, ciphertext, shift_function=shift_function)
decryptions.append(Decryption(plaintext, key, score(plaintext, *fitness_functions)))
return sorted(decryptions, reverse=True)
|
tjguk/networkzero | misc/pyconuk2017/robotics/robot/robot.py | Robot.get_command | python | def get_command(self):
try:
message_bytes = self.socket.recv(zmq.NOBLOCK)
log.debug("Received message: %r", message_bytes)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return None
else:
raise
else:
return message_bytes.decode(config.CODEC) | Attempt to return a unicode object from the command socket
If no message is available without blocking (as opposed to a blank
message), return None | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/misc/pyconuk2017/robotics/robot/robot.py#L39-L54 | null | class Robot(object):
def __init__(
self,
output,
stop_event=None,
listen_on_ip=config.LISTEN_ON_IP, listen_on_port=config.LISTEN_ON_PORT
):
log.info("Setting up Robot on %s:%s", listen_on_ip, listen_on_port)
log.info("Outputting to %s", output)
self.stop_event = stop_event or threading.Event()
self._init_socket(listen_on_ip, listen_on_port)
self.output = output
self.output._init()
def _init_socket(self, listen_on_ip, listen_on_port):
context = zmq.Context()
self.socket = context.socket(zmq.REP)
self.socket.bind("tcp://%s:%s" % (listen_on_ip, listen_on_port))
def get_command(self):
"""Attempt to return a unicode object from the command socket
If no message is available without blocking (as opposed to a blank
message), return None
"""
try:
message_bytes = self.socket.recv(zmq.NOBLOCK)
log.debug("Received message: %r", message_bytes)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return None
else:
raise
else:
return message_bytes.decode(config.CODEC)
def send_response(self, response):
"""Send a unicode object as reply to the most recently-issued command
"""
response_bytes = response.encode(config.CODEC)
log.debug("About to send reponse: %r", response_bytes)
self.socket.send(response_bytes)
def parse_command(self, command):
"""Break a multi word command up into an action and its parameters
"""
words = shlex.split(command.lower())
return words[0], words[1:]
def dispatch(self, command):
"""Pass a command along with its params to a suitable handler
If the command is blank, succeed silently
If the command has no handler, succeed silently
If the handler raises an exception, fail with the exception message
"""
log.info("Dispatch on %s", command)
if not command:
return "OK"
action, params = self.parse_command(command)
log.debug("Action = %s, Params = %s", action, params)
try:
function = getattr(self, "do_" + action, None)
if function:
function(*params)
return "OK"
except KeyboardInterrupt:
raise
except Exception as exc:
log.exception("Problem executing action %s", action)
return "ERROR: %s" % exc
def do_output(self, *args):
"""Pass a command directly to the current output processor
"""
if args:
action, params = args[0], args[1:]
log.debug("Pass %s directly to output with %s", action, params)
function = getattr(self.output, "do_" + action, None)
if function:
function(*params)
def do_finish(self):
self.stop_event.set()
#
# Main loop
#
def start(self):
while not self.stop_event.is_set():
try:
command = self.get_command()
if command is not None:
response = self.dispatch(command.strip())
self.send_response(response)
except KeyboardInterrupt:
log.warn("Closing gracefully...")
self.stop_event.set()
break
except:
log.exception("Problem in main loop")
self.stop_event.set()
raise
|
tjguk/networkzero | misc/pyconuk2017/robotics/robot/robot.py | Robot.send_response | python | def send_response(self, response):
response_bytes = response.encode(config.CODEC)
log.debug("About to send reponse: %r", response_bytes)
self.socket.send(response_bytes) | Send a unicode object as reply to the most recently-issued command | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/misc/pyconuk2017/robotics/robot/robot.py#L56-L61 | null | class Robot(object):
def __init__(
self,
output,
stop_event=None,
listen_on_ip=config.LISTEN_ON_IP, listen_on_port=config.LISTEN_ON_PORT
):
log.info("Setting up Robot on %s:%s", listen_on_ip, listen_on_port)
log.info("Outputting to %s", output)
self.stop_event = stop_event or threading.Event()
self._init_socket(listen_on_ip, listen_on_port)
self.output = output
self.output._init()
def _init_socket(self, listen_on_ip, listen_on_port):
context = zmq.Context()
self.socket = context.socket(zmq.REP)
self.socket.bind("tcp://%s:%s" % (listen_on_ip, listen_on_port))
def get_command(self):
"""Attempt to return a unicode object from the command socket
If no message is available without blocking (as opposed to a blank
message), return None
"""
try:
message_bytes = self.socket.recv(zmq.NOBLOCK)
log.debug("Received message: %r", message_bytes)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return None
else:
raise
else:
return message_bytes.decode(config.CODEC)
def parse_command(self, command):
"""Break a multi word command up into an action and its parameters
"""
words = shlex.split(command.lower())
return words[0], words[1:]
def dispatch(self, command):
"""Pass a command along with its params to a suitable handler
If the command is blank, succeed silently
If the command has no handler, succeed silently
If the handler raises an exception, fail with the exception message
"""
log.info("Dispatch on %s", command)
if not command:
return "OK"
action, params = self.parse_command(command)
log.debug("Action = %s, Params = %s", action, params)
try:
function = getattr(self, "do_" + action, None)
if function:
function(*params)
return "OK"
except KeyboardInterrupt:
raise
except Exception as exc:
log.exception("Problem executing action %s", action)
return "ERROR: %s" % exc
def do_output(self, *args):
"""Pass a command directly to the current output processor
"""
if args:
action, params = args[0], args[1:]
log.debug("Pass %s directly to output with %s", action, params)
function = getattr(self.output, "do_" + action, None)
if function:
function(*params)
def do_finish(self):
self.stop_event.set()
#
# Main loop
#
def start(self):
while not self.stop_event.is_set():
try:
command = self.get_command()
if command is not None:
response = self.dispatch(command.strip())
self.send_response(response)
except KeyboardInterrupt:
log.warn("Closing gracefully...")
self.stop_event.set()
break
except:
log.exception("Problem in main loop")
self.stop_event.set()
raise
|
tjguk/networkzero | misc/pyconuk2017/robotics/robot/robot.py | Robot.parse_command | python | def parse_command(self, command):
words = shlex.split(command.lower())
return words[0], words[1:] | Break a multi word command up into an action and its parameters | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/misc/pyconuk2017/robotics/robot/robot.py#L63-L67 | null | class Robot(object):
def __init__(
self,
output,
stop_event=None,
listen_on_ip=config.LISTEN_ON_IP, listen_on_port=config.LISTEN_ON_PORT
):
log.info("Setting up Robot on %s:%s", listen_on_ip, listen_on_port)
log.info("Outputting to %s", output)
self.stop_event = stop_event or threading.Event()
self._init_socket(listen_on_ip, listen_on_port)
self.output = output
self.output._init()
def _init_socket(self, listen_on_ip, listen_on_port):
context = zmq.Context()
self.socket = context.socket(zmq.REP)
self.socket.bind("tcp://%s:%s" % (listen_on_ip, listen_on_port))
def get_command(self):
"""Attempt to return a unicode object from the command socket
If no message is available without blocking (as opposed to a blank
message), return None
"""
try:
message_bytes = self.socket.recv(zmq.NOBLOCK)
log.debug("Received message: %r", message_bytes)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return None
else:
raise
else:
return message_bytes.decode(config.CODEC)
def send_response(self, response):
"""Send a unicode object as reply to the most recently-issued command
"""
response_bytes = response.encode(config.CODEC)
log.debug("About to send reponse: %r", response_bytes)
self.socket.send(response_bytes)
def dispatch(self, command):
"""Pass a command along with its params to a suitable handler
If the command is blank, succeed silently
If the command has no handler, succeed silently
If the handler raises an exception, fail with the exception message
"""
log.info("Dispatch on %s", command)
if not command:
return "OK"
action, params = self.parse_command(command)
log.debug("Action = %s, Params = %s", action, params)
try:
function = getattr(self, "do_" + action, None)
if function:
function(*params)
return "OK"
except KeyboardInterrupt:
raise
except Exception as exc:
log.exception("Problem executing action %s", action)
return "ERROR: %s" % exc
def do_output(self, *args):
"""Pass a command directly to the current output processor
"""
if args:
action, params = args[0], args[1:]
log.debug("Pass %s directly to output with %s", action, params)
function = getattr(self.output, "do_" + action, None)
if function:
function(*params)
def do_finish(self):
self.stop_event.set()
#
# Main loop
#
def start(self):
while not self.stop_event.is_set():
try:
command = self.get_command()
if command is not None:
response = self.dispatch(command.strip())
self.send_response(response)
except KeyboardInterrupt:
log.warn("Closing gracefully...")
self.stop_event.set()
break
except:
log.exception("Problem in main loop")
self.stop_event.set()
raise
|
tjguk/networkzero | misc/pyconuk2017/robotics/robot/robot.py | Robot.dispatch | python | def dispatch(self, command):
log.info("Dispatch on %s", command)
if not command:
return "OK"
action, params = self.parse_command(command)
log.debug("Action = %s, Params = %s", action, params)
try:
function = getattr(self, "do_" + action, None)
if function:
function(*params)
return "OK"
except KeyboardInterrupt:
raise
except Exception as exc:
log.exception("Problem executing action %s", action)
return "ERROR: %s" % exc | Pass a command along with its params to a suitable handler
If the command is blank, succeed silently
If the command has no handler, succeed silently
If the handler raises an exception, fail with the exception message | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/misc/pyconuk2017/robotics/robot/robot.py#L69-L91 | [
"def parse_command(self, command):\n \"\"\"Break a multi word command up into an action and its parameters\n \"\"\"\n words = shlex.split(command.lower())\n return words[0], words[1:]\n"
] | class Robot(object):
def __init__(
self,
output,
stop_event=None,
listen_on_ip=config.LISTEN_ON_IP, listen_on_port=config.LISTEN_ON_PORT
):
log.info("Setting up Robot on %s:%s", listen_on_ip, listen_on_port)
log.info("Outputting to %s", output)
self.stop_event = stop_event or threading.Event()
self._init_socket(listen_on_ip, listen_on_port)
self.output = output
self.output._init()
def _init_socket(self, listen_on_ip, listen_on_port):
context = zmq.Context()
self.socket = context.socket(zmq.REP)
self.socket.bind("tcp://%s:%s" % (listen_on_ip, listen_on_port))
def get_command(self):
"""Attempt to return a unicode object from the command socket
If no message is available without blocking (as opposed to a blank
message), return None
"""
try:
message_bytes = self.socket.recv(zmq.NOBLOCK)
log.debug("Received message: %r", message_bytes)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return None
else:
raise
else:
return message_bytes.decode(config.CODEC)
def send_response(self, response):
"""Send a unicode object as reply to the most recently-issued command
"""
response_bytes = response.encode(config.CODEC)
log.debug("About to send reponse: %r", response_bytes)
self.socket.send(response_bytes)
def parse_command(self, command):
"""Break a multi word command up into an action and its parameters
"""
words = shlex.split(command.lower())
return words[0], words[1:]
def dispatch(self, command):
"""Pass a command along with its params to a suitable handler
If the command is blank, succeed silently
If the command has no handler, succeed silently
If the handler raises an exception, fail with the exception message
"""
log.info("Dispatch on %s", command)
if not command:
return "OK"
action, params = self.parse_command(command)
log.debug("Action = %s, Params = %s", action, params)
try:
function = getattr(self, "do_" + action, None)
if function:
function(*params)
return "OK"
except KeyboardInterrupt:
raise
except Exception as exc:
log.exception("Problem executing action %s", action)
return "ERROR: %s" % exc
def do_output(self, *args):
"""Pass a command directly to the current output processor
"""
if args:
action, params = args[0], args[1:]
log.debug("Pass %s directly to output with %s", action, params)
function = getattr(self.output, "do_" + action, None)
if function:
function(*params)
def do_finish(self):
self.stop_event.set()
#
# Main loop
#
def start(self):
while not self.stop_event.is_set():
try:
command = self.get_command()
if command is not None:
response = self.dispatch(command.strip())
self.send_response(response)
except KeyboardInterrupt:
log.warn("Closing gracefully...")
self.stop_event.set()
break
except:
log.exception("Problem in main loop")
self.stop_event.set()
raise
|
tjguk/networkzero | misc/pyconuk2017/robotics/robot/robot.py | Robot.do_output | python | def do_output(self, *args):
if args:
action, params = args[0], args[1:]
log.debug("Pass %s directly to output with %s", action, params)
function = getattr(self.output, "do_" + action, None)
if function:
function(*params) | Pass a command directly to the current output processor | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/misc/pyconuk2017/robotics/robot/robot.py#L93-L101 | null | class Robot(object):
def __init__(
self,
output,
stop_event=None,
listen_on_ip=config.LISTEN_ON_IP, listen_on_port=config.LISTEN_ON_PORT
):
log.info("Setting up Robot on %s:%s", listen_on_ip, listen_on_port)
log.info("Outputting to %s", output)
self.stop_event = stop_event or threading.Event()
self._init_socket(listen_on_ip, listen_on_port)
self.output = output
self.output._init()
def _init_socket(self, listen_on_ip, listen_on_port):
context = zmq.Context()
self.socket = context.socket(zmq.REP)
self.socket.bind("tcp://%s:%s" % (listen_on_ip, listen_on_port))
def get_command(self):
"""Attempt to return a unicode object from the command socket
If no message is available without blocking (as opposed to a blank
message), return None
"""
try:
message_bytes = self.socket.recv(zmq.NOBLOCK)
log.debug("Received message: %r", message_bytes)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return None
else:
raise
else:
return message_bytes.decode(config.CODEC)
def send_response(self, response):
"""Send a unicode object as reply to the most recently-issued command
"""
response_bytes = response.encode(config.CODEC)
log.debug("About to send reponse: %r", response_bytes)
self.socket.send(response_bytes)
def parse_command(self, command):
"""Break a multi word command up into an action and its parameters
"""
words = shlex.split(command.lower())
return words[0], words[1:]
def dispatch(self, command):
"""Pass a command along with its params to a suitable handler
If the command is blank, succeed silently
If the command has no handler, succeed silently
If the handler raises an exception, fail with the exception message
"""
log.info("Dispatch on %s", command)
if not command:
return "OK"
action, params = self.parse_command(command)
log.debug("Action = %s, Params = %s", action, params)
try:
function = getattr(self, "do_" + action, None)
if function:
function(*params)
return "OK"
except KeyboardInterrupt:
raise
except Exception as exc:
log.exception("Problem executing action %s", action)
return "ERROR: %s" % exc
def do_finish(self):
self.stop_event.set()
#
# Main loop
#
def start(self):
while not self.stop_event.is_set():
try:
command = self.get_command()
if command is not None:
response = self.dispatch(command.strip())
self.send_response(response)
except KeyboardInterrupt:
log.warn("Closing gracefully...")
self.stop_event.set()
break
except:
log.exception("Problem in main loop")
self.stop_event.set()
raise
|
tjguk/networkzero | networkzero/sockets.py | Sockets.get_socket | python | def get_socket(self, address, role):
#
# If this thread doesn't yet have a sockets cache
# in its local storage, create one here.
#
try:
self._tls.sockets
except AttributeError:
self._tls.sockets = {}
# Convert the address to a single canonical string.
#
# If a list of addresses is passed, turn it into a tuple
# of canonical addresses for use as a dictionary key.
# Otherwise convert it to a single canonical string.
#
if isinstance(address, list):
caddress = tuple(core.address(a) for a in address)
else:
caddress = core.address(address)
#
# Each socket is identified for this thread by its address(es)
# and the role the socket is playing (listener, publisher, etc.)
# That is, within one thread, we are cacheing a read or a write
# socket to the same address(es).
#
# The slight corner case from this is that if you attempt to
# send to [addressA, addressB] and then to addressA and then
# to [addressB, addressA], three separate sockets will be
# created and used.
#
identifier = caddress
if identifier not in self._tls.sockets:
_logger.debug("%s does not exist in local sockets", identifier)
#
# If this is a listening / subscribing socket, it can only
# be bound once, regardless of thread. Therefore keep a
# threads-global list of addresses used and make sure this
# one hasn't been used elsewhere.
#
if role in Socket.binding_roles:
with self._lock:
if identifier in self._sockets:
raise core.SocketAlreadyExistsError("You cannot create a listening socket in more than one thread")
else:
self._sockets.add(identifier)
type = self.roles[role]
socket = context.socket(type)
socket.role = role
socket.address = caddress
#
# Do this last so that an exception earlier will result
# in the socket not being cached
#
self._tls.sockets[identifier] = socket
else:
_logger.debug("%s already not exist in local sockets", identifier)
#
# Only return sockets created in this thread
#
socket = self._tls.sockets[identifier]
return socket | Create or retrieve a socket of the right type, already connected
to the address. Address (ip:port) must be fully specified at this
point. core.address can be used to generate an address. | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/sockets.py#L119-L188 | [
"def address(address=None):\n \"\"\"Convert one of a number of inputs into a valid ip:port string.\n\n Elements which are not provided are filled in as follows:\n\n * IP Address: the system is asked for the set of IP addresses associated\n with the machine and the first one is used, preferring... | class Sockets:
try_length_ms = 500 # wait for .5 second at a time
roles = {
"listener" : zmq.REP,
"speaker" : zmq.REQ,
"publisher" : zmq.PUB,
"subscriber" : zmq.SUB
}
def __init__(self):
self._tls = threading.local()
self._lock = threading.Lock()
with self._lock:
self._sockets = set()
def get_socket(self, address, role):
"""Create or retrieve a socket of the right type, already connected
to the address. Address (ip:port) must be fully specified at this
point. core.address can be used to generate an address.
"""
#
# If this thread doesn't yet have a sockets cache
# in its local storage, create one here.
#
try:
self._tls.sockets
except AttributeError:
self._tls.sockets = {}
# Convert the address to a single canonical string.
#
# If a list of addresses is passed, turn it into a tuple
# of canonical addresses for use as a dictionary key.
# Otherwise convert it to a single canonical string.
#
if isinstance(address, list):
caddress = tuple(core.address(a) for a in address)
else:
caddress = core.address(address)
#
# Each socket is identified for this thread by its address(es)
# and the role the socket is playing (listener, publisher, etc.)
# That is, within one thread, we are cacheing a read or a write
# socket to the same address(es).
#
# The slight corner case from this is that if you attempt to
# send to [addressA, addressB] and then to addressA and then
# to [addressB, addressA], three separate sockets will be
# created and used.
#
identifier = caddress
if identifier not in self._tls.sockets:
_logger.debug("%s does not exist in local sockets", identifier)
#
# If this is a listening / subscribing socket, it can only
# be bound once, regardless of thread. Therefore keep a
# threads-global list of addresses used and make sure this
# one hasn't been used elsewhere.
#
if role in Socket.binding_roles:
with self._lock:
if identifier in self._sockets:
raise core.SocketAlreadyExistsError("You cannot create a listening socket in more than one thread")
else:
self._sockets.add(identifier)
type = self.roles[role]
socket = context.socket(type)
socket.role = role
socket.address = caddress
#
# Do this last so that an exception earlier will result
# in the socket not being cached
#
self._tls.sockets[identifier] = socket
else:
_logger.debug("%s already not exist in local sockets", identifier)
#
# Only return sockets created in this thread
#
socket = self._tls.sockets[identifier]
return socket
def intervals_ms(self, timeout_ms):
"""Generate a series of interval lengths, in ms, which
will add up to the number of ms in timeout_ms. If timeout_ms
is None, keep returning intervals forever.
"""
if timeout_ms is config.FOREVER:
while True:
yield self.try_length_ms
else:
whole_intervals, part_interval = divmod(timeout_ms, self.try_length_ms)
for _ in range(whole_intervals):
yield self.try_length_ms
yield part_interval
def _receive_with_timeout(self, socket, timeout_s, use_multipart=False):
"""Check for socket activity and either return what's
received on the socket or time out if timeout_s expires
without anything on the socket.
This is implemented in loops of self.try_length_ms milliseconds
to allow Ctrl-C handling to take place.
"""
if timeout_s is config.FOREVER:
timeout_ms = config.FOREVER
else:
timeout_ms = int(1000 * timeout_s)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
ms_so_far = 0
try:
for interval_ms in self.intervals_ms(timeout_ms):
sockets = dict(poller.poll(interval_ms))
ms_so_far += interval_ms
if socket in sockets:
if use_multipart:
return socket.recv_multipart()
else:
return socket.recv()
else:
raise core.SocketTimedOutError(timeout_s)
except KeyboardInterrupt:
raise core.SocketInterruptedError(ms_so_far / 1000.0)
def wait_for_message_from(self, address, wait_for_s):
socket = self.get_socket(address, "listener")
try:
message = self._receive_with_timeout(socket, wait_for_s)
except (core.SocketTimedOutError):
return None
else:
return _unserialise(message)
def send_message_to(self, address, message, wait_for_reply_s):
socket = self.get_socket(address, "speaker")
serialised_message = _serialise(message)
socket.send(serialised_message)
return _unserialise(self._receive_with_timeout(socket, wait_for_reply_s))
def send_reply_to(self, address, reply):
socket = self.get_socket(address, "listener")
reply = _serialise(reply)
return socket.send(reply)
def send_news_to(self, address, topic, data):
socket = self.get_socket(address, "publisher")
return socket.send_multipart(_serialise_for_pubsub(topic, data))
def wait_for_news_from(self, address, topic, wait_for_s, is_raw=False):
if isinstance(address, list):
addresses = address
else:
addresses = [address]
socket = self.get_socket(addresses, "subscriber")
if isinstance(topic, str):
topics = [topic]
else:
topics = topic
for t in topics:
socket.set(zmq.SUBSCRIBE, t.encode(config.ENCODING))
try:
result = self._receive_with_timeout(socket, wait_for_s, use_multipart=True)
unserialised_result = _unserialise_for_pubsub(result, is_raw)
return unserialised_result
except (core.SocketTimedOutError, core.SocketInterruptedError):
return None, None
|
tjguk/networkzero | networkzero/sockets.py | Sockets.intervals_ms | python | def intervals_ms(self, timeout_ms):
if timeout_ms is config.FOREVER:
while True:
yield self.try_length_ms
else:
whole_intervals, part_interval = divmod(timeout_ms, self.try_length_ms)
for _ in range(whole_intervals):
yield self.try_length_ms
yield part_interval | Generate a series of interval lengths, in ms, which
will add up to the number of ms in timeout_ms. If timeout_ms
is None, keep returning intervals forever. | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/sockets.py#L190-L202 | null | class Sockets:
try_length_ms = 500 # wait for .5 second at a time
roles = {
"listener" : zmq.REP,
"speaker" : zmq.REQ,
"publisher" : zmq.PUB,
"subscriber" : zmq.SUB
}
def __init__(self):
self._tls = threading.local()
self._lock = threading.Lock()
with self._lock:
self._sockets = set()
def get_socket(self, address, role):
"""Create or retrieve a socket of the right type, already connected
to the address. Address (ip:port) must be fully specified at this
point. core.address can be used to generate an address.
"""
#
# If this thread doesn't yet have a sockets cache
# in its local storage, create one here.
#
try:
self._tls.sockets
except AttributeError:
self._tls.sockets = {}
# Convert the address to a single canonical string.
#
# If a list of addresses is passed, turn it into a tuple
# of canonical addresses for use as a dictionary key.
# Otherwise convert it to a single canonical string.
#
if isinstance(address, list):
caddress = tuple(core.address(a) for a in address)
else:
caddress = core.address(address)
#
# Each socket is identified for this thread by its address(es)
# and the role the socket is playing (listener, publisher, etc.)
# That is, within one thread, we are cacheing a read or a write
# socket to the same address(es).
#
# The slight corner case from this is that if you attempt to
# send to [addressA, addressB] and then to addressA and then
# to [addressB, addressA], three separate sockets will be
# created and used.
#
identifier = caddress
if identifier not in self._tls.sockets:
_logger.debug("%s does not exist in local sockets", identifier)
#
# If this is a listening / subscribing socket, it can only
# be bound once, regardless of thread. Therefore keep a
# threads-global list of addresses used and make sure this
# one hasn't been used elsewhere.
#
if role in Socket.binding_roles:
with self._lock:
if identifier in self._sockets:
raise core.SocketAlreadyExistsError("You cannot create a listening socket in more than one thread")
else:
self._sockets.add(identifier)
type = self.roles[role]
socket = context.socket(type)
socket.role = role
socket.address = caddress
#
# Do this last so that an exception earlier will result
# in the socket not being cached
#
self._tls.sockets[identifier] = socket
else:
_logger.debug("%s already not exist in local sockets", identifier)
#
# Only return sockets created in this thread
#
socket = self._tls.sockets[identifier]
return socket
def _receive_with_timeout(self, socket, timeout_s, use_multipart=False):
"""Check for socket activity and either return what's
received on the socket or time out if timeout_s expires
without anything on the socket.
This is implemented in loops of self.try_length_ms milliseconds
to allow Ctrl-C handling to take place.
"""
if timeout_s is config.FOREVER:
timeout_ms = config.FOREVER
else:
timeout_ms = int(1000 * timeout_s)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
ms_so_far = 0
try:
for interval_ms in self.intervals_ms(timeout_ms):
sockets = dict(poller.poll(interval_ms))
ms_so_far += interval_ms
if socket in sockets:
if use_multipart:
return socket.recv_multipart()
else:
return socket.recv()
else:
raise core.SocketTimedOutError(timeout_s)
except KeyboardInterrupt:
raise core.SocketInterruptedError(ms_so_far / 1000.0)
def wait_for_message_from(self, address, wait_for_s):
socket = self.get_socket(address, "listener")
try:
message = self._receive_with_timeout(socket, wait_for_s)
except (core.SocketTimedOutError):
return None
else:
return _unserialise(message)
def send_message_to(self, address, message, wait_for_reply_s):
socket = self.get_socket(address, "speaker")
serialised_message = _serialise(message)
socket.send(serialised_message)
return _unserialise(self._receive_with_timeout(socket, wait_for_reply_s))
def send_reply_to(self, address, reply):
socket = self.get_socket(address, "listener")
reply = _serialise(reply)
return socket.send(reply)
def send_news_to(self, address, topic, data):
socket = self.get_socket(address, "publisher")
return socket.send_multipart(_serialise_for_pubsub(topic, data))
def wait_for_news_from(self, address, topic, wait_for_s, is_raw=False):
if isinstance(address, list):
addresses = address
else:
addresses = [address]
socket = self.get_socket(addresses, "subscriber")
if isinstance(topic, str):
topics = [topic]
else:
topics = topic
for t in topics:
socket.set(zmq.SUBSCRIBE, t.encode(config.ENCODING))
try:
result = self._receive_with_timeout(socket, wait_for_s, use_multipart=True)
unserialised_result = _unserialise_for_pubsub(result, is_raw)
return unserialised_result
except (core.SocketTimedOutError, core.SocketInterruptedError):
return None, None
|
tjguk/networkzero | networkzero/sockets.py | Sockets._receive_with_timeout | python | def _receive_with_timeout(self, socket, timeout_s, use_multipart=False):
if timeout_s is config.FOREVER:
timeout_ms = config.FOREVER
else:
timeout_ms = int(1000 * timeout_s)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
ms_so_far = 0
try:
for interval_ms in self.intervals_ms(timeout_ms):
sockets = dict(poller.poll(interval_ms))
ms_so_far += interval_ms
if socket in sockets:
if use_multipart:
return socket.recv_multipart()
else:
return socket.recv()
else:
raise core.SocketTimedOutError(timeout_s)
except KeyboardInterrupt:
raise core.SocketInterruptedError(ms_so_far / 1000.0) | Check for socket activity and either return what's
received on the socket or time out if timeout_s expires
without anything on the socket.
This is implemented in loops of self.try_length_ms milliseconds
to allow Ctrl-C handling to take place. | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/sockets.py#L204-L232 | [
"def intervals_ms(self, timeout_ms):\n \"\"\"Generate a series of interval lengths, in ms, which\n will add up to the number of ms in timeout_ms. If timeout_ms\n is None, keep returning intervals forever.\n \"\"\"\n if timeout_ms is config.FOREVER:\n while True:\n yield self.try_len... | class Sockets:
try_length_ms = 500 # wait for .5 second at a time
roles = {
"listener" : zmq.REP,
"speaker" : zmq.REQ,
"publisher" : zmq.PUB,
"subscriber" : zmq.SUB
}
def __init__(self):
self._tls = threading.local()
self._lock = threading.Lock()
with self._lock:
self._sockets = set()
def get_socket(self, address, role):
"""Create or retrieve a socket of the right type, already connected
to the address. Address (ip:port) must be fully specified at this
point. core.address can be used to generate an address.
"""
#
# If this thread doesn't yet have a sockets cache
# in its local storage, create one here.
#
try:
self._tls.sockets
except AttributeError:
self._tls.sockets = {}
# Convert the address to a single canonical string.
#
# If a list of addresses is passed, turn it into a tuple
# of canonical addresses for use as a dictionary key.
# Otherwise convert it to a single canonical string.
#
if isinstance(address, list):
caddress = tuple(core.address(a) for a in address)
else:
caddress = core.address(address)
#
# Each socket is identified for this thread by its address(es)
# and the role the socket is playing (listener, publisher, etc.)
# That is, within one thread, we are cacheing a read or a write
# socket to the same address(es).
#
# The slight corner case from this is that if you attempt to
# send to [addressA, addressB] and then to addressA and then
# to [addressB, addressA], three separate sockets will be
# created and used.
#
identifier = caddress
if identifier not in self._tls.sockets:
_logger.debug("%s does not exist in local sockets", identifier)
#
# If this is a listening / subscribing socket, it can only
# be bound once, regardless of thread. Therefore keep a
# threads-global list of addresses used and make sure this
# one hasn't been used elsewhere.
#
if role in Socket.binding_roles:
with self._lock:
if identifier in self._sockets:
raise core.SocketAlreadyExistsError("You cannot create a listening socket in more than one thread")
else:
self._sockets.add(identifier)
type = self.roles[role]
socket = context.socket(type)
socket.role = role
socket.address = caddress
#
# Do this last so that an exception earlier will result
# in the socket not being cached
#
self._tls.sockets[identifier] = socket
else:
_logger.debug("%s already not exist in local sockets", identifier)
#
# Only return sockets created in this thread
#
socket = self._tls.sockets[identifier]
return socket
def intervals_ms(self, timeout_ms):
"""Generate a series of interval lengths, in ms, which
will add up to the number of ms in timeout_ms. If timeout_ms
is None, keep returning intervals forever.
"""
if timeout_ms is config.FOREVER:
while True:
yield self.try_length_ms
else:
whole_intervals, part_interval = divmod(timeout_ms, self.try_length_ms)
for _ in range(whole_intervals):
yield self.try_length_ms
yield part_interval
def _receive_with_timeout(self, socket, timeout_s, use_multipart=False):
"""Check for socket activity and either return what's
received on the socket or time out if timeout_s expires
without anything on the socket.
This is implemented in loops of self.try_length_ms milliseconds
to allow Ctrl-C handling to take place.
"""
if timeout_s is config.FOREVER:
timeout_ms = config.FOREVER
else:
timeout_ms = int(1000 * timeout_s)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
ms_so_far = 0
try:
for interval_ms in self.intervals_ms(timeout_ms):
sockets = dict(poller.poll(interval_ms))
ms_so_far += interval_ms
if socket in sockets:
if use_multipart:
return socket.recv_multipart()
else:
return socket.recv()
else:
raise core.SocketTimedOutError(timeout_s)
except KeyboardInterrupt:
raise core.SocketInterruptedError(ms_so_far / 1000.0)
def wait_for_message_from(self, address, wait_for_s):
socket = self.get_socket(address, "listener")
try:
message = self._receive_with_timeout(socket, wait_for_s)
except (core.SocketTimedOutError):
return None
else:
return _unserialise(message)
def send_message_to(self, address, message, wait_for_reply_s):
socket = self.get_socket(address, "speaker")
serialised_message = _serialise(message)
socket.send(serialised_message)
return _unserialise(self._receive_with_timeout(socket, wait_for_reply_s))
def send_reply_to(self, address, reply):
socket = self.get_socket(address, "listener")
reply = _serialise(reply)
return socket.send(reply)
def send_news_to(self, address, topic, data):
socket = self.get_socket(address, "publisher")
return socket.send_multipart(_serialise_for_pubsub(topic, data))
def wait_for_news_from(self, address, topic, wait_for_s, is_raw=False):
if isinstance(address, list):
addresses = address
else:
addresses = [address]
socket = self.get_socket(addresses, "subscriber")
if isinstance(topic, str):
topics = [topic]
else:
topics = topic
for t in topics:
socket.set(zmq.SUBSCRIBE, t.encode(config.ENCODING))
try:
result = self._receive_with_timeout(socket, wait_for_s, use_multipart=True)
unserialised_result = _unserialise_for_pubsub(result, is_raw)
return unserialised_result
except (core.SocketTimedOutError, core.SocketInterruptedError):
return None, None
|
tjguk/networkzero | networkzero/core.py | is_valid_ip_pattern | python | def is_valid_ip_pattern(ip):
ip = ip.replace('*', '1')
try:
socket.inet_aton(ip)
return True
except socket.error:
# Not a valid IPv4 address pattern
return False | Check whether a string matches the outline of an IPv4 address,
allowing "*" as a wildcard | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/core.py#L101-L110 | null | # -*- coding: utf-8 -*-
import base64
import fnmatch
import logging
import random
import shlex
import socket
try:
import netifaces
except ImportError:
warnings.warn("Unable to import netifaces; using local fallback")
from . import _netifaces as netifaces
from . import config
def get_logger(name):
#
# For now, this is just a hand-off to logging.getLogger
# Later, though, we might want to add a null handler etc.
#
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
return logger
_debug_logging_enabled = False
def _enable_debug_logging():
global _debug_logging_enabled
if not _debug_logging_enabled:
logger = logging.getLogger("networkzero")
handler = logging.FileHandler("network.log", "w", encoding="utf-8")
handler.setFormatter(logging.Formatter("%(asctime)s %(threadName)s %(name)s %(levelname)s %(message)s"))
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
_debug_logging_enabled = True
_logger = get_logger(__name__)
#
# Common exceptions
#
class NetworkZeroError(Exception):
pass
class SocketAlreadyExistsError(NetworkZeroError):
pass
class SocketTimedOutError(NetworkZeroError):
def __init__(self, n_seconds):
self.n_seconds = n_seconds
def __str__(self):
return "Gave up waiting after %s seconds; this connection is now unusable" % self.n_seconds
class SocketInterruptedError(NetworkZeroError):
def __init__(self, after_n_seconds):
self.after_n_seconds = after_n_seconds
def __str__(self):
return "Interrupted after %s seconds; this connection is now unusable" % self.after_n_seconds
class AddressError(NetworkZeroError):
pass
class NoAddressFoundError(AddressError):
pass
class InvalidAddressError(NetworkZeroError):
def __init__(self, address, errno=None):
self.address = address
self.errno = errno
def __str__(self):
message = "%s is not a valid address" % self.address
if self.errno:
message += "; the system returned an error of %d" % self.errno
return message
class DifferentThreadError(NetworkZeroError):
pass
#
# Ports in the range 0xc000..0xffff are reserved
# for dynamic allocation
#
PORT_POOL = list(config.DYNAMIC_PORTS)
def split_address(address):
if ":" in address:
ip, _, port = address.partition(":")
else:
if address.isdigit():
ip, port = "", address
else:
ip, port = address, ""
return ip, port
def is_valid_port(port, port_range=range(65536)):
try:
return int(port) in port_range
except ValueError:
return False
def is_valid_address(address, port_range=range(65536)):
ip, port = split_address(address)
return is_valid_ip_pattern(ip) and is_valid_port(port, port_range)
def _find_ip4_broadcast_addresses():
"""Yield each IP4 broadcast address, and the all-broadcast
"""
yield "255.255.255.255"
for interface in netifaces.interfaces():
ifaddresses = netifaces.ifaddresses(interface)
for family in ifaddresses:
if family == netifaces.AF_INET:
address_info = ifaddresses[family]
for info in address_info:
if "broadcast" in info:
yield info['broadcast']
_ip4_addresses = None
def _find_ip4_addresses():
"""Find all the IP4 addresses currently bound to interfaces
"""
global _ip4_addresses
proto = socket.AF_INET
if _ip4_addresses is None:
_ip4_addresses = []
#
# Determine the interface for the default gateway
# (if any) and, later, prioritise the INET address on
# that interface.
#
default_gateway = netifaces.gateways()['default']
if proto in default_gateway:
_, default_gateway_interface = default_gateway[proto]
else:
default_gateway_interface = None
for interface in netifaces.interfaces():
for info in netifaces.ifaddresses(interface).get(netifaces.AF_INET, []):
if info['addr']:
if interface == default_gateway_interface:
_ip4_addresses.insert(0, info['addr'])
else:
_ip4_addresses.append(info['addr'])
return _ip4_addresses
_ip4 = None
_prefer = None
def _find_ip4(prefer=None):
global _ip4, _prefer
#
# Order the list of possible addresses on the machine: if any
# address pattern is given as a preference (most -> least)
# give it that weighting, otherwise treat all addresses
# numerically. If no preference is given, prefer the most
# likely useful local address range.
#
if prefer:
_prefer = prefer
else:
_prefer = ["192.168.*"]
def sorter(ip4):
octets = [int(i) for i in ip4.split(".")]
for n, pattern in enumerate(_prefer):
if fnmatch.fnmatch(ip4, pattern):
return n, octets
else:
#
# Return the address itself if it doesn't match
# a preference
#
return n + 1, octets
ip4_addresses = _find_ip4_addresses()
#
# Pick an address allowing for user preference if stated
#
if not ip4_addresses:
raise NoAddressFoundError
else:
#
# Find the best match. If the user actually supplied a preference
# list, assume an exact match is required to at least one of the
# patterns.
#
ip4 = min(ip4_addresses, key=sorter)
if prefer and not any(fnmatch.fnmatch(ip4, pattern) for pattern in prefer):
raise NoAddressFoundError("No address matches any of: %s" % ", ".join(prefer))
else:
_ip4 = ip4
return _ip4
def address(address=None):
"""Convert one of a number of inputs into a valid ip:port string.
Elements which are not provided are filled in as follows:
* IP Address: the system is asked for the set of IP addresses associated
with the machine and the first one is used, preferring those matching
`address` if it is a wildcard.
* Port number: a random port is selected from the pool of dynamically-available
port numbers.
This means you can pass any of: nothing; a hostname; an IP address; an IP address with wildcards; a port number
If an IP address is supplied but is invalid, an InvalidAddressError
exception is raised.
:param address: (optional) Any of: an IP address, a port number, or both
:returns: a valid ip:port string for this machine
"""
address = str(address or "").strip()
#
# If the address is an ip:port pair, split into its component parts.
# Otherwise, try to determine whether we're looking at an IP
# or at a port and leave the other one blank
#
host_or_ip, port = split_address(address)
#
# If the port has been supplied, make sure it's numeric and that it's a valid
# port number. If it hasn't been supplied, remove a random one from the pool
# of possible dynamically-allocated ports and use that.
#
if port:
try:
port = int(port)
except ValueError:
raise AddressError("Port %s must be a number" % port)
if port not in config.VALID_PORTS:
raise AddressError("Port %d must be in range %d - %d" % (
port, min(config.VALID_PORTS), max(config.VALID_PORTS))
)
else:
random.shuffle(PORT_POOL)
port = PORT_POOL.pop()
#
# The address part could be an IP address (optionally including
# wildcards to indicate a preference) or a hostname or nothing.
# If it's a hostname we attempt to resolve it to an IP address.
# It it's nothing or a wildcard we query the system for a matching IP address.
#
if (not host_or_ip) or is_valid_ip_pattern(host_or_ip):
#
# If a specific IP address is given, use that.
# If an IP pattern is given (ie something with a wildcard in it) treat
# that as no address with a preference for that wildcard.
#
prefer = None
if "*" in host_or_ip:
host_or_ip, prefer = None, [host_or_ip]
#
# If no IP (or only a wildcard) is specified, query the system for valid
# addresses, preferring those which match the wildcard. NB if the preference
# matches one we've previously used, we can return a cached address. But
# different requests can specify different wildcard preferences.
#
if not host_or_ip:
if _ip4 and _prefer == prefer:
ip = _ip4
else:
ip = _find_ip4(prefer)
else:
ip = host_or_ip
else:
#
# Treat the string as a hostname and resolve to an IP4 address
#
try:
ip = socket.gethostbyname(host_or_ip)
except socket.gaierror as exc:
_logger.error("gaierror %d for %s", exc.errno, host_or_ip)
raise InvalidAddressError(host_or_ip, exc.errno)
else:
#
# Bizarrely specific check because BT Internet "helpfully"
# redirects DNS fails to this address which hosts a sponsored
# landing page!
#
if ip == "92.242.132.15":
raise InvalidAddressError(host_or_ip, 0)
return "%s:%s" % (ip, port)
def action_and_params(commandline):
"""Treat a command line as an action followed by parameter
:param commandline: a string containing at least an action
:returns: action, [param1, param2, ...]
"""
components = shlex.split(commandline)
return components[0], components[1:]
def bytes_to_string(data):
"""Take bytes and return a base64-encoded unicode string equivalent
:param data: a bytes object
:returns: base64-encoded unicode object
"""
return base64.b64encode(data).decode("ascii")
def string_to_bytes(data):
"""Take a base64-encoded unicode string and return the equivalent bytes
:param data: a base64-encoded unicode object
:returns: the equivalent bytes
"""
return base64.b64decode(data.encode("ascii"))
|
tjguk/networkzero | networkzero/core.py | _find_ip4_broadcast_addresses | python | def _find_ip4_broadcast_addresses():
yield "255.255.255.255"
for interface in netifaces.interfaces():
ifaddresses = netifaces.ifaddresses(interface)
for family in ifaddresses:
if family == netifaces.AF_INET:
address_info = ifaddresses[family]
for info in address_info:
if "broadcast" in info:
yield info['broadcast'] | Yield each IP4 broadcast address, and the all-broadcast | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/core.py#L122-L133 | null | # -*- coding: utf-8 -*-
import base64
import fnmatch
import logging
import random
import shlex
import socket
try:
import netifaces
except ImportError:
warnings.warn("Unable to import netifaces; using local fallback")
from . import _netifaces as netifaces
from . import config
def get_logger(name):
#
# For now, this is just a hand-off to logging.getLogger
# Later, though, we might want to add a null handler etc.
#
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
return logger
_debug_logging_enabled = False
def _enable_debug_logging():
global _debug_logging_enabled
if not _debug_logging_enabled:
logger = logging.getLogger("networkzero")
handler = logging.FileHandler("network.log", "w", encoding="utf-8")
handler.setFormatter(logging.Formatter("%(asctime)s %(threadName)s %(name)s %(levelname)s %(message)s"))
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
_debug_logging_enabled = True
_logger = get_logger(__name__)
#
# Common exceptions
#
class NetworkZeroError(Exception):
pass
class SocketAlreadyExistsError(NetworkZeroError):
pass
class SocketTimedOutError(NetworkZeroError):
def __init__(self, n_seconds):
self.n_seconds = n_seconds
def __str__(self):
return "Gave up waiting after %s seconds; this connection is now unusable" % self.n_seconds
class SocketInterruptedError(NetworkZeroError):
def __init__(self, after_n_seconds):
self.after_n_seconds = after_n_seconds
def __str__(self):
return "Interrupted after %s seconds; this connection is now unusable" % self.after_n_seconds
class AddressError(NetworkZeroError):
pass
class NoAddressFoundError(AddressError):
pass
class InvalidAddressError(NetworkZeroError):
def __init__(self, address, errno=None):
self.address = address
self.errno = errno
def __str__(self):
message = "%s is not a valid address" % self.address
if self.errno:
message += "; the system returned an error of %d" % self.errno
return message
class DifferentThreadError(NetworkZeroError):
pass
#
# Ports in the range 0xc000..0xffff are reserved
# for dynamic allocation
#
PORT_POOL = list(config.DYNAMIC_PORTS)
def split_address(address):
if ":" in address:
ip, _, port = address.partition(":")
else:
if address.isdigit():
ip, port = "", address
else:
ip, port = address, ""
return ip, port
def is_valid_ip_pattern(ip):
"""Check whether a string matches the outline of an IPv4 address,
allowing "*" as a wildcard"""
ip = ip.replace('*', '1')
try:
socket.inet_aton(ip)
return True
except socket.error:
# Not a valid IPv4 address pattern
return False
def is_valid_port(port, port_range=range(65536)):
try:
return int(port) in port_range
except ValueError:
return False
def is_valid_address(address, port_range=range(65536)):
ip, port = split_address(address)
return is_valid_ip_pattern(ip) and is_valid_port(port, port_range)
_ip4_addresses = None
def _find_ip4_addresses():
"""Find all the IP4 addresses currently bound to interfaces
"""
global _ip4_addresses
proto = socket.AF_INET
if _ip4_addresses is None:
_ip4_addresses = []
#
# Determine the interface for the default gateway
# (if any) and, later, prioritise the INET address on
# that interface.
#
default_gateway = netifaces.gateways()['default']
if proto in default_gateway:
_, default_gateway_interface = default_gateway[proto]
else:
default_gateway_interface = None
for interface in netifaces.interfaces():
for info in netifaces.ifaddresses(interface).get(netifaces.AF_INET, []):
if info['addr']:
if interface == default_gateway_interface:
_ip4_addresses.insert(0, info['addr'])
else:
_ip4_addresses.append(info['addr'])
return _ip4_addresses
_ip4 = None
_prefer = None
def _find_ip4(prefer=None):
global _ip4, _prefer
#
# Order the list of possible addresses on the machine: if any
# address pattern is given as a preference (most -> least)
# give it that weighting, otherwise treat all addresses
# numerically. If no preference is given, prefer the most
# likely useful local address range.
#
if prefer:
_prefer = prefer
else:
_prefer = ["192.168.*"]
def sorter(ip4):
octets = [int(i) for i in ip4.split(".")]
for n, pattern in enumerate(_prefer):
if fnmatch.fnmatch(ip4, pattern):
return n, octets
else:
#
# Return the address itself if it doesn't match
# a preference
#
return n + 1, octets
ip4_addresses = _find_ip4_addresses()
#
# Pick an address allowing for user preference if stated
#
if not ip4_addresses:
raise NoAddressFoundError
else:
#
# Find the best match. If the user actually supplied a preference
# list, assume an exact match is required to at least one of the
# patterns.
#
ip4 = min(ip4_addresses, key=sorter)
if prefer and not any(fnmatch.fnmatch(ip4, pattern) for pattern in prefer):
raise NoAddressFoundError("No address matches any of: %s" % ", ".join(prefer))
else:
_ip4 = ip4
return _ip4
def address(address=None):
"""Convert one of a number of inputs into a valid ip:port string.
Elements which are not provided are filled in as follows:
* IP Address: the system is asked for the set of IP addresses associated
with the machine and the first one is used, preferring those matching
`address` if it is a wildcard.
* Port number: a random port is selected from the pool of dynamically-available
port numbers.
This means you can pass any of: nothing; a hostname; an IP address; an IP address with wildcards; a port number
If an IP address is supplied but is invalid, an InvalidAddressError
exception is raised.
:param address: (optional) Any of: an IP address, a port number, or both
:returns: a valid ip:port string for this machine
"""
address = str(address or "").strip()
#
# If the address is an ip:port pair, split into its component parts.
# Otherwise, try to determine whether we're looking at an IP
# or at a port and leave the other one blank
#
host_or_ip, port = split_address(address)
#
# If the port has been supplied, make sure it's numeric and that it's a valid
# port number. If it hasn't been supplied, remove a random one from the pool
# of possible dynamically-allocated ports and use that.
#
if port:
try:
port = int(port)
except ValueError:
raise AddressError("Port %s must be a number" % port)
if port not in config.VALID_PORTS:
raise AddressError("Port %d must be in range %d - %d" % (
port, min(config.VALID_PORTS), max(config.VALID_PORTS))
)
else:
random.shuffle(PORT_POOL)
port = PORT_POOL.pop()
#
# The address part could be an IP address (optionally including
# wildcards to indicate a preference) or a hostname or nothing.
# If it's a hostname we attempt to resolve it to an IP address.
# It it's nothing or a wildcard we query the system for a matching IP address.
#
if (not host_or_ip) or is_valid_ip_pattern(host_or_ip):
#
# If a specific IP address is given, use that.
# If an IP pattern is given (ie something with a wildcard in it) treat
# that as no address with a preference for that wildcard.
#
prefer = None
if "*" in host_or_ip:
host_or_ip, prefer = None, [host_or_ip]
#
# If no IP (or only a wildcard) is specified, query the system for valid
# addresses, preferring those which match the wildcard. NB if the preference
# matches one we've previously used, we can return a cached address. But
# different requests can specify different wildcard preferences.
#
if not host_or_ip:
if _ip4 and _prefer == prefer:
ip = _ip4
else:
ip = _find_ip4(prefer)
else:
ip = host_or_ip
else:
#
# Treat the string as a hostname and resolve to an IP4 address
#
try:
ip = socket.gethostbyname(host_or_ip)
except socket.gaierror as exc:
_logger.error("gaierror %d for %s", exc.errno, host_or_ip)
raise InvalidAddressError(host_or_ip, exc.errno)
else:
#
# Bizarrely specific check because BT Internet "helpfully"
# redirects DNS fails to this address which hosts a sponsored
# landing page!
#
if ip == "92.242.132.15":
raise InvalidAddressError(host_or_ip, 0)
return "%s:%s" % (ip, port)
def action_and_params(commandline):
"""Treat a command line as an action followed by parameter
:param commandline: a string containing at least an action
:returns: action, [param1, param2, ...]
"""
components = shlex.split(commandline)
return components[0], components[1:]
def bytes_to_string(data):
"""Take bytes and return a base64-encoded unicode string equivalent
:param data: a bytes object
:returns: base64-encoded unicode object
"""
return base64.b64encode(data).decode("ascii")
def string_to_bytes(data):
"""Take a base64-encoded unicode string and return the equivalent bytes
:param data: a base64-encoded unicode object
:returns: the equivalent bytes
"""
return base64.b64decode(data.encode("ascii"))
|
tjguk/networkzero | networkzero/core.py | _find_ip4_addresses | python | def _find_ip4_addresses():
global _ip4_addresses
proto = socket.AF_INET
if _ip4_addresses is None:
_ip4_addresses = []
#
# Determine the interface for the default gateway
# (if any) and, later, prioritise the INET address on
# that interface.
#
default_gateway = netifaces.gateways()['default']
if proto in default_gateway:
_, default_gateway_interface = default_gateway[proto]
else:
default_gateway_interface = None
for interface in netifaces.interfaces():
for info in netifaces.ifaddresses(interface).get(netifaces.AF_INET, []):
if info['addr']:
if interface == default_gateway_interface:
_ip4_addresses.insert(0, info['addr'])
else:
_ip4_addresses.append(info['addr'])
return _ip4_addresses | Find all the IP4 addresses currently bound to interfaces | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/core.py#L136-L163 | null | # -*- coding: utf-8 -*-
import base64
import fnmatch
import logging
import random
import shlex
import socket
try:
import netifaces
except ImportError:
warnings.warn("Unable to import netifaces; using local fallback")
from . import _netifaces as netifaces
from . import config
def get_logger(name):
#
# For now, this is just a hand-off to logging.getLogger
# Later, though, we might want to add a null handler etc.
#
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
return logger
_debug_logging_enabled = False
def _enable_debug_logging():
global _debug_logging_enabled
if not _debug_logging_enabled:
logger = logging.getLogger("networkzero")
handler = logging.FileHandler("network.log", "w", encoding="utf-8")
handler.setFormatter(logging.Formatter("%(asctime)s %(threadName)s %(name)s %(levelname)s %(message)s"))
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
_debug_logging_enabled = True
_logger = get_logger(__name__)
#
# Common exceptions
#
class NetworkZeroError(Exception):
pass
class SocketAlreadyExistsError(NetworkZeroError):
pass
class SocketTimedOutError(NetworkZeroError):
def __init__(self, n_seconds):
self.n_seconds = n_seconds
def __str__(self):
return "Gave up waiting after %s seconds; this connection is now unusable" % self.n_seconds
class SocketInterruptedError(NetworkZeroError):
def __init__(self, after_n_seconds):
self.after_n_seconds = after_n_seconds
def __str__(self):
return "Interrupted after %s seconds; this connection is now unusable" % self.after_n_seconds
class AddressError(NetworkZeroError):
pass
class NoAddressFoundError(AddressError):
pass
class InvalidAddressError(NetworkZeroError):
def __init__(self, address, errno=None):
self.address = address
self.errno = errno
def __str__(self):
message = "%s is not a valid address" % self.address
if self.errno:
message += "; the system returned an error of %d" % self.errno
return message
class DifferentThreadError(NetworkZeroError):
pass
#
# Ports in the range 0xc000..0xffff are reserved
# for dynamic allocation
#
PORT_POOL = list(config.DYNAMIC_PORTS)
def split_address(address):
if ":" in address:
ip, _, port = address.partition(":")
else:
if address.isdigit():
ip, port = "", address
else:
ip, port = address, ""
return ip, port
def is_valid_ip_pattern(ip):
"""Check whether a string matches the outline of an IPv4 address,
allowing "*" as a wildcard"""
ip = ip.replace('*', '1')
try:
socket.inet_aton(ip)
return True
except socket.error:
# Not a valid IPv4 address pattern
return False
def is_valid_port(port, port_range=range(65536)):
try:
return int(port) in port_range
except ValueError:
return False
def is_valid_address(address, port_range=range(65536)):
ip, port = split_address(address)
return is_valid_ip_pattern(ip) and is_valid_port(port, port_range)
def _find_ip4_broadcast_addresses():
"""Yield each IP4 broadcast address, and the all-broadcast
"""
yield "255.255.255.255"
for interface in netifaces.interfaces():
ifaddresses = netifaces.ifaddresses(interface)
for family in ifaddresses:
if family == netifaces.AF_INET:
address_info = ifaddresses[family]
for info in address_info:
if "broadcast" in info:
yield info['broadcast']
_ip4_addresses = None
_ip4 = None
_prefer = None
def _find_ip4(prefer=None):
global _ip4, _prefer
#
# Order the list of possible addresses on the machine: if any
# address pattern is given as a preference (most -> least)
# give it that weighting, otherwise treat all addresses
# numerically. If no preference is given, prefer the most
# likely useful local address range.
#
if prefer:
_prefer = prefer
else:
_prefer = ["192.168.*"]
def sorter(ip4):
octets = [int(i) for i in ip4.split(".")]
for n, pattern in enumerate(_prefer):
if fnmatch.fnmatch(ip4, pattern):
return n, octets
else:
#
# Return the address itself if it doesn't match
# a preference
#
return n + 1, octets
ip4_addresses = _find_ip4_addresses()
#
# Pick an address allowing for user preference if stated
#
if not ip4_addresses:
raise NoAddressFoundError
else:
#
# Find the best match. If the user actually supplied a preference
# list, assume an exact match is required to at least one of the
# patterns.
#
ip4 = min(ip4_addresses, key=sorter)
if prefer and not any(fnmatch.fnmatch(ip4, pattern) for pattern in prefer):
raise NoAddressFoundError("No address matches any of: %s" % ", ".join(prefer))
else:
_ip4 = ip4
return _ip4
def address(address=None):
"""Convert one of a number of inputs into a valid ip:port string.
Elements which are not provided are filled in as follows:
* IP Address: the system is asked for the set of IP addresses associated
with the machine and the first one is used, preferring those matching
`address` if it is a wildcard.
* Port number: a random port is selected from the pool of dynamically-available
port numbers.
This means you can pass any of: nothing; a hostname; an IP address; an IP address with wildcards; a port number
If an IP address is supplied but is invalid, an InvalidAddressError
exception is raised.
:param address: (optional) Any of: an IP address, a port number, or both
:returns: a valid ip:port string for this machine
"""
address = str(address or "").strip()
#
# If the address is an ip:port pair, split into its component parts.
# Otherwise, try to determine whether we're looking at an IP
# or at a port and leave the other one blank
#
host_or_ip, port = split_address(address)
#
# If the port has been supplied, make sure it's numeric and that it's a valid
# port number. If it hasn't been supplied, remove a random one from the pool
# of possible dynamically-allocated ports and use that.
#
if port:
try:
port = int(port)
except ValueError:
raise AddressError("Port %s must be a number" % port)
if port not in config.VALID_PORTS:
raise AddressError("Port %d must be in range %d - %d" % (
port, min(config.VALID_PORTS), max(config.VALID_PORTS))
)
else:
random.shuffle(PORT_POOL)
port = PORT_POOL.pop()
#
# The address part could be an IP address (optionally including
# wildcards to indicate a preference) or a hostname or nothing.
# If it's a hostname we attempt to resolve it to an IP address.
# It it's nothing or a wildcard we query the system for a matching IP address.
#
if (not host_or_ip) or is_valid_ip_pattern(host_or_ip):
#
# If a specific IP address is given, use that.
# If an IP pattern is given (ie something with a wildcard in it) treat
# that as no address with a preference for that wildcard.
#
prefer = None
if "*" in host_or_ip:
host_or_ip, prefer = None, [host_or_ip]
#
# If no IP (or only a wildcard) is specified, query the system for valid
# addresses, preferring those which match the wildcard. NB if the preference
# matches one we've previously used, we can return a cached address. But
# different requests can specify different wildcard preferences.
#
if not host_or_ip:
if _ip4 and _prefer == prefer:
ip = _ip4
else:
ip = _find_ip4(prefer)
else:
ip = host_or_ip
else:
#
# Treat the string as a hostname and resolve to an IP4 address
#
try:
ip = socket.gethostbyname(host_or_ip)
except socket.gaierror as exc:
_logger.error("gaierror %d for %s", exc.errno, host_or_ip)
raise InvalidAddressError(host_or_ip, exc.errno)
else:
#
# Bizarrely specific check because BT Internet "helpfully"
# redirects DNS fails to this address which hosts a sponsored
# landing page!
#
if ip == "92.242.132.15":
raise InvalidAddressError(host_or_ip, 0)
return "%s:%s" % (ip, port)
def action_and_params(commandline):
"""Treat a command line as an action followed by parameter
:param commandline: a string containing at least an action
:returns: action, [param1, param2, ...]
"""
components = shlex.split(commandline)
return components[0], components[1:]
def bytes_to_string(data):
"""Take bytes and return a base64-encoded unicode string equivalent
:param data: a bytes object
:returns: base64-encoded unicode object
"""
return base64.b64encode(data).decode("ascii")
def string_to_bytes(data):
"""Take a base64-encoded unicode string and return the equivalent bytes
:param data: a base64-encoded unicode object
:returns: the equivalent bytes
"""
return base64.b64decode(data.encode("ascii"))
|
tjguk/networkzero | networkzero/core.py | address | python | def address(address=None):
address = str(address or "").strip()
#
# If the address is an ip:port pair, split into its component parts.
# Otherwise, try to determine whether we're looking at an IP
# or at a port and leave the other one blank
#
host_or_ip, port = split_address(address)
#
# If the port has been supplied, make sure it's numeric and that it's a valid
# port number. If it hasn't been supplied, remove a random one from the pool
# of possible dynamically-allocated ports and use that.
#
if port:
try:
port = int(port)
except ValueError:
raise AddressError("Port %s must be a number" % port)
if port not in config.VALID_PORTS:
raise AddressError("Port %d must be in range %d - %d" % (
port, min(config.VALID_PORTS), max(config.VALID_PORTS))
)
else:
random.shuffle(PORT_POOL)
port = PORT_POOL.pop()
#
# The address part could be an IP address (optionally including
# wildcards to indicate a preference) or a hostname or nothing.
# If it's a hostname we attempt to resolve it to an IP address.
# It it's nothing or a wildcard we query the system for a matching IP address.
#
if (not host_or_ip) or is_valid_ip_pattern(host_or_ip):
#
# If a specific IP address is given, use that.
# If an IP pattern is given (ie something with a wildcard in it) treat
# that as no address with a preference for that wildcard.
#
prefer = None
if "*" in host_or_ip:
host_or_ip, prefer = None, [host_or_ip]
#
# If no IP (or only a wildcard) is specified, query the system for valid
# addresses, preferring those which match the wildcard. NB if the preference
# matches one we've previously used, we can return a cached address. But
# different requests can specify different wildcard preferences.
#
if not host_or_ip:
if _ip4 and _prefer == prefer:
ip = _ip4
else:
ip = _find_ip4(prefer)
else:
ip = host_or_ip
else:
#
# Treat the string as a hostname and resolve to an IP4 address
#
try:
ip = socket.gethostbyname(host_or_ip)
except socket.gaierror as exc:
_logger.error("gaierror %d for %s", exc.errno, host_or_ip)
raise InvalidAddressError(host_or_ip, exc.errno)
else:
#
# Bizarrely specific check because BT Internet "helpfully"
# redirects DNS fails to this address which hosts a sponsored
# landing page!
#
if ip == "92.242.132.15":
raise InvalidAddressError(host_or_ip, 0)
return "%s:%s" % (ip, port) | Convert one of a number of inputs into a valid ip:port string.
Elements which are not provided are filled in as follows:
* IP Address: the system is asked for the set of IP addresses associated
with the machine and the first one is used, preferring those matching
`address` if it is a wildcard.
* Port number: a random port is selected from the pool of dynamically-available
port numbers.
This means you can pass any of: nothing; a hostname; an IP address; an IP address with wildcards; a port number
If an IP address is supplied but is invalid, an InvalidAddressError
exception is raised.
:param address: (optional) Any of: an IP address, a port number, or both
:returns: a valid ip:port string for this machine | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/core.py#L212-L306 | [
"def split_address(address):\n if \":\" in address:\n ip, _, port = address.partition(\":\")\n else:\n if address.isdigit():\n ip, port = \"\", address\n else:\n ip, port = address, \"\"\n return ip, port\n",
"def is_valid_ip_pattern(ip):\n \"\"\"Check whethe... | # -*- coding: utf-8 -*-
import base64
import fnmatch
import logging
import random
import shlex
import socket
try:
import netifaces
except ImportError:
warnings.warn("Unable to import netifaces; using local fallback")
from . import _netifaces as netifaces
from . import config
def get_logger(name):
#
# For now, this is just a hand-off to logging.getLogger
# Later, though, we might want to add a null handler etc.
#
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
return logger
_debug_logging_enabled = False
def _enable_debug_logging():
global _debug_logging_enabled
if not _debug_logging_enabled:
logger = logging.getLogger("networkzero")
handler = logging.FileHandler("network.log", "w", encoding="utf-8")
handler.setFormatter(logging.Formatter("%(asctime)s %(threadName)s %(name)s %(levelname)s %(message)s"))
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
_debug_logging_enabled = True
_logger = get_logger(__name__)
#
# Common exceptions
#
class NetworkZeroError(Exception):
pass
class SocketAlreadyExistsError(NetworkZeroError):
pass
class SocketTimedOutError(NetworkZeroError):
def __init__(self, n_seconds):
self.n_seconds = n_seconds
def __str__(self):
return "Gave up waiting after %s seconds; this connection is now unusable" % self.n_seconds
class SocketInterruptedError(NetworkZeroError):
def __init__(self, after_n_seconds):
self.after_n_seconds = after_n_seconds
def __str__(self):
return "Interrupted after %s seconds; this connection is now unusable" % self.after_n_seconds
class AddressError(NetworkZeroError):
pass
class NoAddressFoundError(AddressError):
pass
class InvalidAddressError(NetworkZeroError):
def __init__(self, address, errno=None):
self.address = address
self.errno = errno
def __str__(self):
message = "%s is not a valid address" % self.address
if self.errno:
message += "; the system returned an error of %d" % self.errno
return message
class DifferentThreadError(NetworkZeroError):
pass
#
# Ports in the range 0xc000..0xffff are reserved
# for dynamic allocation
#
PORT_POOL = list(config.DYNAMIC_PORTS)
def split_address(address):
if ":" in address:
ip, _, port = address.partition(":")
else:
if address.isdigit():
ip, port = "", address
else:
ip, port = address, ""
return ip, port
def is_valid_ip_pattern(ip):
"""Check whether a string matches the outline of an IPv4 address,
allowing "*" as a wildcard"""
ip = ip.replace('*', '1')
try:
socket.inet_aton(ip)
return True
except socket.error:
# Not a valid IPv4 address pattern
return False
def is_valid_port(port, port_range=range(65536)):
try:
return int(port) in port_range
except ValueError:
return False
def is_valid_address(address, port_range=range(65536)):
ip, port = split_address(address)
return is_valid_ip_pattern(ip) and is_valid_port(port, port_range)
def _find_ip4_broadcast_addresses():
"""Yield each IP4 broadcast address, and the all-broadcast
"""
yield "255.255.255.255"
for interface in netifaces.interfaces():
ifaddresses = netifaces.ifaddresses(interface)
for family in ifaddresses:
if family == netifaces.AF_INET:
address_info = ifaddresses[family]
for info in address_info:
if "broadcast" in info:
yield info['broadcast']
_ip4_addresses = None
def _find_ip4_addresses():
"""Find all the IP4 addresses currently bound to interfaces
"""
global _ip4_addresses
proto = socket.AF_INET
if _ip4_addresses is None:
_ip4_addresses = []
#
# Determine the interface for the default gateway
# (if any) and, later, prioritise the INET address on
# that interface.
#
default_gateway = netifaces.gateways()['default']
if proto in default_gateway:
_, default_gateway_interface = default_gateway[proto]
else:
default_gateway_interface = None
for interface in netifaces.interfaces():
for info in netifaces.ifaddresses(interface).get(netifaces.AF_INET, []):
if info['addr']:
if interface == default_gateway_interface:
_ip4_addresses.insert(0, info['addr'])
else:
_ip4_addresses.append(info['addr'])
return _ip4_addresses
_ip4 = None
_prefer = None
def _find_ip4(prefer=None):
global _ip4, _prefer
#
# Order the list of possible addresses on the machine: if any
# address pattern is given as a preference (most -> least)
# give it that weighting, otherwise treat all addresses
# numerically. If no preference is given, prefer the most
# likely useful local address range.
#
if prefer:
_prefer = prefer
else:
_prefer = ["192.168.*"]
def sorter(ip4):
octets = [int(i) for i in ip4.split(".")]
for n, pattern in enumerate(_prefer):
if fnmatch.fnmatch(ip4, pattern):
return n, octets
else:
#
# Return the address itself if it doesn't match
# a preference
#
return n + 1, octets
ip4_addresses = _find_ip4_addresses()
#
# Pick an address allowing for user preference if stated
#
if not ip4_addresses:
raise NoAddressFoundError
else:
#
# Find the best match. If the user actually supplied a preference
# list, assume an exact match is required to at least one of the
# patterns.
#
ip4 = min(ip4_addresses, key=sorter)
if prefer and not any(fnmatch.fnmatch(ip4, pattern) for pattern in prefer):
raise NoAddressFoundError("No address matches any of: %s" % ", ".join(prefer))
else:
_ip4 = ip4
return _ip4
def action_and_params(commandline):
"""Treat a command line as an action followed by parameter
:param commandline: a string containing at least an action
:returns: action, [param1, param2, ...]
"""
components = shlex.split(commandline)
return components[0], components[1:]
def bytes_to_string(data):
"""Take bytes and return a base64-encoded unicode string equivalent
:param data: a bytes object
:returns: base64-encoded unicode object
"""
return base64.b64encode(data).decode("ascii")
def string_to_bytes(data):
"""Take a base64-encoded unicode string and return the equivalent bytes
:param data: a base64-encoded unicode object
:returns: the equivalent bytes
"""
return base64.b64decode(data.encode("ascii"))
|
tjguk/networkzero | networkzero/discovery.py | _bind_with_timeout | python | def _bind_with_timeout(bind_function, args, n_tries=3, retry_interval_s=0.5):
n_tries_left = n_tries
while n_tries_left > 0:
try:
return bind_function(*args)
except zmq.error.ZMQError as exc:
_logger.warn("%s; %d tries remaining", exc, n_tries_left)
n_tries_left -= 1
except OSError as exc:
if exc.errno == errno.EADDRINUSE:
_logger.warn("%s; %d tries remaining", exc, n_tries_left)
n_tries_left -= 1
else:
raise
else:
raise core.SocketAlreadyExistsError("Failed to bind after %s tries" % n_tries) | Attempt to bind a socket a number of times with a short interval in between
Especially on Linux, crashing out of a networkzero process can leave the sockets
lingering and unable to re-bind on startup. We give it a few goes here to see if
we can bind within a couple of seconds. | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/discovery.py#L80-L101 | null | # -*- coding: utf-8 -*-
"""Advertise and collect advertisements of network services
The discovery module offers:
* A UDP broadcast socket which:
- Listens for and keeps track of service adverts from this and other
machines & processes
- Broadcasts services advertised by this process
* A ZeroMQ socket which allow any process on this machine to
communicate with its broadcast socket
In other words, we have a beacon which listens to instructions
from processes on this machine while sending out and listening
to adverts broadcast to/from all machines on the network.
The beacon is started automatically in a daemon thread when the first
attempt is made to advertise or discover. If another process already
has a beacon running (ie if this beacon can't bind to its port) this
beacon thread will shut down with no further action.
The module-level functions to advertise and discover will open a connection
to a ZeroMQ socket on this machine (which might be hosted by this or by another
process) and will use this socket to send commands to the beacon thread which
will update or return its internal list of advertised services.
As an additional convenience, the :func:`advertise` function will, if given no
specific address, generate a suitable ip:port pair by interrogating the system.
This functionality is actually in :func:`networkzero.address` (qv).
"""
from __future__ import print_function
import os, sys
import atexit
import collections
import errno
import json
import logging
import socket
import threading
import time
import zmq
from . import config
from . import core
from . import sockets
_logger = core.get_logger(__name__)
#
# Continue is a sentinel value to indicate that a command
# has completed its scheduled slice without producing a result
# and without exceeding its overall timeout.
#
Continue = object()
#
# Empty is a sentinel to distinguish between no result and a result of None
#
Empty = object()
def _unpack(message):
return json.loads(message.decode(config.ENCODING))
def _pack(message):
return json.dumps(message).encode(config.ENCODING)
def timed_out(started_at, wait_for_s):
#
# If the wait time is the sentinel value FOREVER, never time out
# Otherwise time out if the current time is more than wait_for_s seconds after the start time
#
if wait_for_s is config.FOREVER:
return False
else:
return time.time() > started_at + wait_for_s
def _bind_with_timeout(bind_function, args, n_tries=3, retry_interval_s=0.5):
"""Attempt to bind a socket a number of times with a short interval in between
Especially on Linux, crashing out of a networkzero process can leave the sockets
lingering and unable to re-bind on startup. We give it a few goes here to see if
we can bind within a couple of seconds.
"""
n_tries_left = n_tries
while n_tries_left > 0:
try:
return bind_function(*args)
except zmq.error.ZMQError as exc:
_logger.warn("%s; %d tries remaining", exc, n_tries_left)
n_tries_left -= 1
except OSError as exc:
if exc.errno == errno.EADDRINUSE:
_logger.warn("%s; %d tries remaining", exc, n_tries_left)
n_tries_left -= 1
else:
raise
else:
raise core.SocketAlreadyExistsError("Failed to bind after %s tries" % n_tries)
class _Service(object):
"""Convenience container with details of a service to be advertised
Includes the name, address and when it is next due to be advertised
and when it is due to expire if it was discovered.
"""
def __init__(self, name, address, ttl_s=None):
self.name = name
self.address = address
self.ttl_s = ttl_s
self.expires_at = None if ttl_s is None else (time.time() + ttl_s)
self.advertise_at = 0
def __str__(self):
return "_Service %s at %s due to advertise at %s and expire at %s" % (
self.name, self.address,
time.ctime(self.advertise_at), time.ctime(self.expires_at)
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self))
def __eq__(self, other):
return self.name == other.name
class _Command(object):
"""Convenience container with details of a running command
Includes the action ("discover", "advertise" etc.), its parameters, when
it was started -- for timeout purposes -- and any response.
This is used by the process_command functionality
"""
def __init__(self, action, params):
self.action = action
self.params = params
self.started_at = time.time()
self.response = Empty
def __str__(self):
return "_Command: %s (%s) started at %s -> %s" % (self.action, self.params, time.ctime(self.started_at), self.response)
class _Beacon(threading.Thread):
"""Threaded beacon to: listen for adverts & broadcast adverts
"""
rpc_port = 9998
beacon_port = 9999
finder_timeout_s = 0.05
beacon_message_size = 256
time_between_broadcasts_s = config.BEACON_ADVERT_FREQUENCY_S
def __init__(self, beacon_port=None):
threading.Thread.__init__(self)
self.setDaemon(True)
self._stop_event = threading.Event()
self._is_paused = False
self.beacon_port = beacon_port or self.__class__.beacon_port
_logger.debug("Using beacon port %s", self.beacon_port)
#
# Services we're advertising
#
self._services_to_advertise = collections.deque()
#
# Broadcast adverts which we've received (some of which will be our own)
#
self._services_found = {}
#
# _Command requests are collected on one queue
# _Command responses are added to another
#
self._command = None
#
# Set the socket up to broadcast datagrams over UDP
#
self.broadcast_addresses = set(core._find_ip4_broadcast_addresses())
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.socket.bind(("", self.beacon_port))
#
# Add the raw UDP socket to a ZeroMQ socket poller so we can check whether
# it's received anything as part of the beacon's main event loop.
#
self.socket_fd = self.socket.fileno()
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.rpc = sockets.context.socket(zmq.REP)
#
# To avoid problems when restarting a beacon not long after it's been
# closed, force the socket to shut down regardless about 1 second after
# it's been closed.
#
self.rpc.linger = 1000
_bind_with_timeout(self.rpc.bind, ("tcp://127.0.0.1:%s" % self.rpc_port,))
def stop(self):
_logger.debug("About to stop")
self._stop_event.set()
#
# Commands available via RPC are methods whose name starts with "do_"
#
def do_advertise(self, started_at, name, address, fail_if_exists, ttl_s):
_logger.debug("Advertise %s on %s %s TTL=%s", name, address, fail_if_exists, ttl_s)
canonical_address = core.address(address)
for service in self._services_to_advertise:
if service.name == name:
if fail_if_exists:
_logger.error("_Service %s already exists on %s", name, service.address)
return None
else:
_logger.warn("Superseding service %s which already exists on %s", name, service.address)
service = _Service(name, canonical_address, ttl_s)
self._services_to_advertise.append(service)
#
# As a shortcut, automatically "discover" any services we ourselves are advertising
#
self._services_found[name] = service
return canonical_address
def do_unadvertise(self, started_at, name):
_logger.debug("Unadvertise %s", name)
for service in self._services_to_advertise:
if service.name == name:
self._services_to_advertise.remove(service)
break
else:
_logger.warn("No advert found for %s", name)
_logger.debug("Services now: %s", self._services_to_advertise)
def do_pause(self, started_at):
_logger.debug("Pause")
self._is_paused = True
def do_resume(self, started_at):
_logger.debug("Resume")
self._is_paused = False
def do_discover(self, started_at, name, wait_for_s):
_logger.debug("Discover %s waiting for %s seconds", name, wait_for_s)
discovered = self._services_found.get(name)
#
# If we've got a match, return it. Otherwise:
# * If we're due to wait for ever, continue
# * If we're out of time return None
# * Otherwise we've still got time left: continue
#
if discovered:
return discovered.address
if timed_out(started_at, wait_for_s):
return None
else:
return Continue
def do_discover_all(self, started_at):
_logger.debug("Discover all")
return [(service.name, service.address) for service in self._services_found.values()]
def do_reset(self, started_at):
_logger.debug("Reset")
self.do_pause(started_at)
self._services_found.clear()
self._services_to_advertise.clear()
self.do_resume(started_at)
def do_stop(self, started_at):
_logger.debug("Stop")
self.stop()
def listen_for_one_advert(self):
events = dict(self.poller.poll(1000 * self.finder_timeout_s))
if self.socket_fd not in events:
return
message, source = self.socket.recvfrom(self.beacon_message_size)
_logger.debug("Broadcast message received: %r", message)
service_name, service_address, ttl_s = _unpack(message)
service = _Service(service_name, service_address, ttl_s)
self._services_found[service_name] = service
def broadcast_one_advert(self):
if self._services_to_advertise:
next_service = self._services_to_advertise[0]
if next_service.advertise_at < time.time():
_logger.debug("%s due to advertise at %s", next_service.name, time.ctime(next_service.advertise_at))
message = _pack([next_service.name, next_service.address, next_service.ttl_s])
for broadcast_address in self.broadcast_addresses:
_logger.debug("Advertising on %s", broadcast_address)
self.socket.sendto(message, 0, (broadcast_address, self.beacon_port))
next_service.advertise_at = time.time() + self.time_between_broadcasts_s
self._services_to_advertise.rotate(-1)
def remove_expired_adverts(self):
for name, service in list(self._services_found.items()):
#
# A service with an empty expiry time never expired
#
if service.expires_at is None:
continue
if service.expires_at <= time.time():
_logger.warn("Removing advert for %s which expired at %s",
name, time.ctime(service.expires_at))
del self._services_found[name]
def poll_command_request(self):
"""If the command RPC socket has an incoming request,
separate it into its action and its params and put it
on the command request queue.
"""
try:
message = self.rpc.recv(zmq.NOBLOCK)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return
else:
raise
_logger.debug("Received command %s", message)
segments = _unpack(message)
action, params = segments[0], segments[1:]
_logger.debug("Adding %s, %s to the request queue", action, params)
self._command = _Command(action, params)
def process_command(self):
if not self._command:
return
else:
_logger.debug("process_command: %s", self._command.action)
command = self._command
_logger.debug("Picked %s, %s, %s", self._command.action, self._command.params, self._command.started_at)
function = getattr(self, "do_" + command.action.lower(), None)
if not function:
raise NotImplementedError("%s is not a valid action")
else:
try:
result = function(command.started_at, *command.params)
except:
_logger.exception("Problem calling %s with %s", command.action, command.params)
result = None
_logger.debug("result = %s", result)
#
# result will be Continue if the action cannot be completed
# (eg a discovery) but its time is not yet expired. Leave
# the command on the stack for now.
#
if result is Continue:
return
#
# If we get a result, add the result to the response
# queue and pop the request off the stack.
#
self._command.response = result
def poll_command_reponse(self):
"""If the latest request has a response, issue it as a
reply to the RPC socket.
"""
if self._command.response is not Empty:
_logger.debug("Sending response %s", self._command.response)
self.rpc.send(_pack(self._command.response))
self._command = None
def run(self):
_logger.info("Starting discovery")
while not self._stop_event.wait(0):
try:
#
# If we're not already processing one, check for an command
# to advertise/discover from a local process.
#
if not self._command:
self.poll_command_request()
#
# If we're paused no adverts will be broadcast. Adverts
# will be received and stale ones expired
#
if not self._is_paused:
#
# Broadcast the first advert whose advertising schedule
# has arrived
#
self.broadcast_one_advert()
#
# See if an advert broadcast has arrived
#
self.listen_for_one_advert()
#
# See if any adverts have expired
#
self.remove_expired_adverts()
#
# If we're processing a command, see if it's complete
#
if self._command:
self.process_command()
self.poll_command_reponse()
except:
_logger.exception("Problem in beacon thread")
break
_logger.info("Ending discovery")
self.rpc.close()
self.socket.close()
_beacon = None
_remote_beacon = object()
def _start_beacon(port=None):
"""Start a beacon thread within this process if no beacon is currently
running on this machine.
In general this is called automatically when an attempt is made to
advertise or discover. It might be convenient, though, to call this
function directly if you want to have a process whose only job is
to host this beacon so that it doesn't shut down when other processes
shut down.
"""
global _beacon
if _beacon is None:
_logger.debug("About to start beacon with port %s", port)
try:
_beacon = _Beacon(port)
except (OSError, socket.error) as exc:
if exc.errno == errno.EADDRINUSE:
_logger.warn("Beacon already active on this machine")
#
# _remote_beacon is simply a not-None sentinel value
# to distinguish between the case where we have not
# yet started a beacon and where we have found one
# in another process.
#
_beacon = _remote_beacon
else:
raise
else:
_beacon.start()
def _stop_beacon():
#
# Mostly for testing: shutdown the beacon if it's running
# locally and clear it globally so the next attempt will
# start fresh.
#
global _beacon
if _beacon and _beacon is not _remote_beacon:
_beacon.stop()
_beacon.join()
_beacon = None
def _rpc(action, *args, **kwargs):
_logger.debug("About to send rpc request %s with args %s, kwargs %s", action, args, kwargs)
wait_for_s = kwargs.pop("wait_for_s", 5)
with sockets.context.socket(zmq.REQ) as socket:
#
# To avoid problems when restarting a beacon not long after it's been
# closed, force the socket to shut down regardless about 1 second after
# it's been closed.
#
socket.connect("tcp://localhost:%s" % _Beacon.rpc_port)
socket.send(_pack([action] + list(args)))
reply = sockets._sockets._receive_with_timeout(socket, wait_for_s)
return _unpack(reply)
def _pause():
return _rpc("pause")
def _resume():
return _rpc("resume")
_services_advertised = {}
def advertise(name, address=None, fail_if_exists=False, ttl_s=config.ADVERT_TTL_S):
"""Advertise a name at an address
Start to advertise service `name` at address `address`. If
the address is not supplied, one is constructed and this is
returned by the function. ie this is a typical use::
address = nw0.advertise("myservice")
:param name: any text
:param address: either "ip:port" or None
:param fail_if_exists: fail if this name is already registered?
:param ttl_s: the advert will persist for this many seconds other beacons
:returns: the address given or constructed
"""
_start_beacon()
address = _rpc("advertise", name, address, fail_if_exists, ttl_s)
_services_advertised[name] = address
return address
def _unadvertise_all():
"""Remove all adverts
"""
for name in _services_advertised:
try:
_unadvertise(name)
except core.SocketTimedOutError:
_logger.warn("Timed out trying to unadvertise")
break
atexit.register(_unadvertise_all)
def _unadvertise(name):
"""Remove the advert for a name
This is intended for internal use only at the moment. When a process
exits it can remove adverts for its services from the beacon running
on that machine. (Of course, if the beacon thread is part of of the
same service, all its adverts will cease).
"""
_start_beacon()
return _rpc("unadvertise", name)
def discover(name, wait_for_s=60):
"""Discover a service by name
Look for an advert to a named service::
address = nw0.discover("myservice")
:param name: any text
:param wait_for_s: how many seconds to wait before giving up
:returns: the address found or None
"""
_start_beacon()
#
# It's possible to enter a deadlock situation where the first
# process fires off a discovery request and waits for the
# second process to advertise. But the second process has to
# connect to the rpc port of the first process' beacon and
# its advertisement is queued behind the pending discovery.
#
# To give both a chance of succeeding we operate in bursts,
# allowing them to interleave.
#
t0 = time.time()
while True:
discovery = _rpc("discover", name, 0.5)
if discovery:
return discovery
if timed_out(t0, wait_for_s):
return None
def discover_all():
"""Produce a list of all known services and their addresses
Ask for all known services as a list of 2-tuples: (name, address)
This could, eg, be used to form a dictionary of services::
services = dict(nw0.discover_all())
:returns: a list of 2-tuples [(name, address), ...]
"""
_start_beacon()
return _rpc("discover_all")
def discover_group(group, separator="/", exclude=None):
"""Produce a list of all services and their addresses in a group
A group is an optional form of namespace within the discovery mechanism.
If an advertised name has the form <group><sep><name> it is deemed to
belong to <group>. Note that the service's name is still the full
string <group><sep><name>. The group concept is simply for discovery and
to assist differentiation, eg, in a classroom group.
:param group: the name of a group prefix
:param separator: the separator character [/]
:param exclude: an iterable of names to exclude (or None)
:returns: a list of 2-tuples [(name, address), ...]
"""
_start_beacon()
if exclude is None:
names_to_exclude = set()
else:
names_to_exclude = set(exclude)
all_discovered = _rpc("discover_all")
return [(name, address)
for (name, address) in all_discovered
if name.startswith("%s%s" % (group, separator))
and name not in names_to_exclude
]
def reset_beacon():
"""Clear the adverts which the beacon is carrying
(This is mostly useful when testing, to get a fresh start)
"""
_start_beacon()
return _rpc("reset")
if __name__ == '__main__':
params = [arg.lower() for arg in sys.argv]
if "--debug" in params:
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
handler = logging.StreamHandler()
handler.setLevel(logging_level)
handler.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
_logger.addHandler(handler)
_start_beacon()
_logger.info("Beacon started at %s", time.asctime())
while True:
time.sleep(1)
|
tjguk/networkzero | networkzero/discovery.py | _start_beacon | python | def _start_beacon(port=None):
global _beacon
if _beacon is None:
_logger.debug("About to start beacon with port %s", port)
try:
_beacon = _Beacon(port)
except (OSError, socket.error) as exc:
if exc.errno == errno.EADDRINUSE:
_logger.warn("Beacon already active on this machine")
#
# _remote_beacon is simply a not-None sentinel value
# to distinguish between the case where we have not
# yet started a beacon and where we have found one
# in another process.
#
_beacon = _remote_beacon
else:
raise
else:
_beacon.start() | Start a beacon thread within this process if no beacon is currently
running on this machine.
In general this is called automatically when an attempt is made to
advertise or discover. It might be convenient, though, to call this
function directly if you want to have a process whose only job is
to host this beacon so that it doesn't shut down when other processes
shut down. | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/discovery.py#L433-L461 | null | # -*- coding: utf-8 -*-
"""Advertise and collect advertisements of network services
The discovery module offers:
* A UDP broadcast socket which:
- Listens for and keeps track of service adverts from this and other
machines & processes
- Broadcasts services advertised by this process
* A ZeroMQ socket which allow any process on this machine to
communicate with its broadcast socket
In other words, we have a beacon which listens to instructions
from processes on this machine while sending out and listening
to adverts broadcast to/from all machines on the network.
The beacon is started automatically in a daemon thread when the first
attempt is made to advertise or discover. If another process already
has a beacon running (ie if this beacon can't bind to its port) this
beacon thread will shut down with no further action.
The module-level functions to advertise and discover will open a connection
to a ZeroMQ socket on this machine (which might be hosted by this or by another
process) and will use this socket to send commands to the beacon thread which
will update or return its internal list of advertised services.
As an additional convenience, the :func:`advertise` function will, if given no
specific address, generate a suitable ip:port pair by interrogating the system.
This functionality is actually in :func:`networkzero.address` (qv).
"""
from __future__ import print_function
import os, sys
import atexit
import collections
import errno
import json
import logging
import socket
import threading
import time
import zmq
from . import config
from . import core
from . import sockets
_logger = core.get_logger(__name__)
#
# Continue is a sentinel value to indicate that a command
# has completed its scheduled slice without producing a result
# and without exceeding its overall timeout.
#
Continue = object()
#
# Empty is a sentinel to distinguish between no result and a result of None
#
Empty = object()
def _unpack(message):
return json.loads(message.decode(config.ENCODING))
def _pack(message):
return json.dumps(message).encode(config.ENCODING)
def timed_out(started_at, wait_for_s):
#
# If the wait time is the sentinel value FOREVER, never time out
# Otherwise time out if the current time is more than wait_for_s seconds after the start time
#
if wait_for_s is config.FOREVER:
return False
else:
return time.time() > started_at + wait_for_s
def _bind_with_timeout(bind_function, args, n_tries=3, retry_interval_s=0.5):
"""Attempt to bind a socket a number of times with a short interval in between
Especially on Linux, crashing out of a networkzero process can leave the sockets
lingering and unable to re-bind on startup. We give it a few goes here to see if
we can bind within a couple of seconds.
"""
n_tries_left = n_tries
while n_tries_left > 0:
try:
return bind_function(*args)
except zmq.error.ZMQError as exc:
_logger.warn("%s; %d tries remaining", exc, n_tries_left)
n_tries_left -= 1
except OSError as exc:
if exc.errno == errno.EADDRINUSE:
_logger.warn("%s; %d tries remaining", exc, n_tries_left)
n_tries_left -= 1
else:
raise
else:
raise core.SocketAlreadyExistsError("Failed to bind after %s tries" % n_tries)
class _Service(object):
"""Convenience container with details of a service to be advertised
Includes the name, address and when it is next due to be advertised
and when it is due to expire if it was discovered.
"""
def __init__(self, name, address, ttl_s=None):
self.name = name
self.address = address
self.ttl_s = ttl_s
self.expires_at = None if ttl_s is None else (time.time() + ttl_s)
self.advertise_at = 0
def __str__(self):
return "_Service %s at %s due to advertise at %s and expire at %s" % (
self.name, self.address,
time.ctime(self.advertise_at), time.ctime(self.expires_at)
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self))
def __eq__(self, other):
return self.name == other.name
class _Command(object):
"""Convenience container with details of a running command
Includes the action ("discover", "advertise" etc.), its parameters, when
it was started -- for timeout purposes -- and any response.
This is used by the process_command functionality
"""
def __init__(self, action, params):
self.action = action
self.params = params
self.started_at = time.time()
self.response = Empty
def __str__(self):
return "_Command: %s (%s) started at %s -> %s" % (self.action, self.params, time.ctime(self.started_at), self.response)
class _Beacon(threading.Thread):
"""Threaded beacon to: listen for adverts & broadcast adverts
"""
rpc_port = 9998
beacon_port = 9999
finder_timeout_s = 0.05
beacon_message_size = 256
time_between_broadcasts_s = config.BEACON_ADVERT_FREQUENCY_S
def __init__(self, beacon_port=None):
threading.Thread.__init__(self)
self.setDaemon(True)
self._stop_event = threading.Event()
self._is_paused = False
self.beacon_port = beacon_port or self.__class__.beacon_port
_logger.debug("Using beacon port %s", self.beacon_port)
#
# Services we're advertising
#
self._services_to_advertise = collections.deque()
#
# Broadcast adverts which we've received (some of which will be our own)
#
self._services_found = {}
#
# _Command requests are collected on one queue
# _Command responses are added to another
#
self._command = None
#
# Set the socket up to broadcast datagrams over UDP
#
self.broadcast_addresses = set(core._find_ip4_broadcast_addresses())
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.socket.bind(("", self.beacon_port))
#
# Add the raw UDP socket to a ZeroMQ socket poller so we can check whether
# it's received anything as part of the beacon's main event loop.
#
self.socket_fd = self.socket.fileno()
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.rpc = sockets.context.socket(zmq.REP)
#
# To avoid problems when restarting a beacon not long after it's been
# closed, force the socket to shut down regardless about 1 second after
# it's been closed.
#
self.rpc.linger = 1000
_bind_with_timeout(self.rpc.bind, ("tcp://127.0.0.1:%s" % self.rpc_port,))
def stop(self):
_logger.debug("About to stop")
self._stop_event.set()
#
# Commands available via RPC are methods whose name starts with "do_"
#
def do_advertise(self, started_at, name, address, fail_if_exists, ttl_s):
_logger.debug("Advertise %s on %s %s TTL=%s", name, address, fail_if_exists, ttl_s)
canonical_address = core.address(address)
for service in self._services_to_advertise:
if service.name == name:
if fail_if_exists:
_logger.error("_Service %s already exists on %s", name, service.address)
return None
else:
_logger.warn("Superseding service %s which already exists on %s", name, service.address)
service = _Service(name, canonical_address, ttl_s)
self._services_to_advertise.append(service)
#
# As a shortcut, automatically "discover" any services we ourselves are advertising
#
self._services_found[name] = service
return canonical_address
def do_unadvertise(self, started_at, name):
_logger.debug("Unadvertise %s", name)
for service in self._services_to_advertise:
if service.name == name:
self._services_to_advertise.remove(service)
break
else:
_logger.warn("No advert found for %s", name)
_logger.debug("Services now: %s", self._services_to_advertise)
def do_pause(self, started_at):
_logger.debug("Pause")
self._is_paused = True
def do_resume(self, started_at):
_logger.debug("Resume")
self._is_paused = False
def do_discover(self, started_at, name, wait_for_s):
_logger.debug("Discover %s waiting for %s seconds", name, wait_for_s)
discovered = self._services_found.get(name)
#
# If we've got a match, return it. Otherwise:
# * If we're due to wait for ever, continue
# * If we're out of time return None
# * Otherwise we've still got time left: continue
#
if discovered:
return discovered.address
if timed_out(started_at, wait_for_s):
return None
else:
return Continue
def do_discover_all(self, started_at):
_logger.debug("Discover all")
return [(service.name, service.address) for service in self._services_found.values()]
def do_reset(self, started_at):
_logger.debug("Reset")
self.do_pause(started_at)
self._services_found.clear()
self._services_to_advertise.clear()
self.do_resume(started_at)
def do_stop(self, started_at):
_logger.debug("Stop")
self.stop()
def listen_for_one_advert(self):
events = dict(self.poller.poll(1000 * self.finder_timeout_s))
if self.socket_fd not in events:
return
message, source = self.socket.recvfrom(self.beacon_message_size)
_logger.debug("Broadcast message received: %r", message)
service_name, service_address, ttl_s = _unpack(message)
service = _Service(service_name, service_address, ttl_s)
self._services_found[service_name] = service
def broadcast_one_advert(self):
if self._services_to_advertise:
next_service = self._services_to_advertise[0]
if next_service.advertise_at < time.time():
_logger.debug("%s due to advertise at %s", next_service.name, time.ctime(next_service.advertise_at))
message = _pack([next_service.name, next_service.address, next_service.ttl_s])
for broadcast_address in self.broadcast_addresses:
_logger.debug("Advertising on %s", broadcast_address)
self.socket.sendto(message, 0, (broadcast_address, self.beacon_port))
next_service.advertise_at = time.time() + self.time_between_broadcasts_s
self._services_to_advertise.rotate(-1)
def remove_expired_adverts(self):
for name, service in list(self._services_found.items()):
#
# A service with an empty expiry time never expired
#
if service.expires_at is None:
continue
if service.expires_at <= time.time():
_logger.warn("Removing advert for %s which expired at %s",
name, time.ctime(service.expires_at))
del self._services_found[name]
def poll_command_request(self):
"""If the command RPC socket has an incoming request,
separate it into its action and its params and put it
on the command request queue.
"""
try:
message = self.rpc.recv(zmq.NOBLOCK)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return
else:
raise
_logger.debug("Received command %s", message)
segments = _unpack(message)
action, params = segments[0], segments[1:]
_logger.debug("Adding %s, %s to the request queue", action, params)
self._command = _Command(action, params)
def process_command(self):
if not self._command:
return
else:
_logger.debug("process_command: %s", self._command.action)
command = self._command
_logger.debug("Picked %s, %s, %s", self._command.action, self._command.params, self._command.started_at)
function = getattr(self, "do_" + command.action.lower(), None)
if not function:
raise NotImplementedError("%s is not a valid action")
else:
try:
result = function(command.started_at, *command.params)
except:
_logger.exception("Problem calling %s with %s", command.action, command.params)
result = None
_logger.debug("result = %s", result)
#
# result will be Continue if the action cannot be completed
# (eg a discovery) but its time is not yet expired. Leave
# the command on the stack for now.
#
if result is Continue:
return
#
# If we get a result, add the result to the response
# queue and pop the request off the stack.
#
self._command.response = result
def poll_command_reponse(self):
"""If the latest request has a response, issue it as a
reply to the RPC socket.
"""
if self._command.response is not Empty:
_logger.debug("Sending response %s", self._command.response)
self.rpc.send(_pack(self._command.response))
self._command = None
def run(self):
_logger.info("Starting discovery")
while not self._stop_event.wait(0):
try:
#
# If we're not already processing one, check for an command
# to advertise/discover from a local process.
#
if not self._command:
self.poll_command_request()
#
# If we're paused no adverts will be broadcast. Adverts
# will be received and stale ones expired
#
if not self._is_paused:
#
# Broadcast the first advert whose advertising schedule
# has arrived
#
self.broadcast_one_advert()
#
# See if an advert broadcast has arrived
#
self.listen_for_one_advert()
#
# See if any adverts have expired
#
self.remove_expired_adverts()
#
# If we're processing a command, see if it's complete
#
if self._command:
self.process_command()
self.poll_command_reponse()
except:
_logger.exception("Problem in beacon thread")
break
_logger.info("Ending discovery")
self.rpc.close()
self.socket.close()
_beacon = None
_remote_beacon = object()
def _start_beacon(port=None):
"""Start a beacon thread within this process if no beacon is currently
running on this machine.
In general this is called automatically when an attempt is made to
advertise or discover. It might be convenient, though, to call this
function directly if you want to have a process whose only job is
to host this beacon so that it doesn't shut down when other processes
shut down.
"""
global _beacon
if _beacon is None:
_logger.debug("About to start beacon with port %s", port)
try:
_beacon = _Beacon(port)
except (OSError, socket.error) as exc:
if exc.errno == errno.EADDRINUSE:
_logger.warn("Beacon already active on this machine")
#
# _remote_beacon is simply a not-None sentinel value
# to distinguish between the case where we have not
# yet started a beacon and where we have found one
# in another process.
#
_beacon = _remote_beacon
else:
raise
else:
_beacon.start()
def _stop_beacon():
#
# Mostly for testing: shutdown the beacon if it's running
# locally and clear it globally so the next attempt will
# start fresh.
#
global _beacon
if _beacon and _beacon is not _remote_beacon:
_beacon.stop()
_beacon.join()
_beacon = None
def _rpc(action, *args, **kwargs):
_logger.debug("About to send rpc request %s with args %s, kwargs %s", action, args, kwargs)
wait_for_s = kwargs.pop("wait_for_s", 5)
with sockets.context.socket(zmq.REQ) as socket:
#
# To avoid problems when restarting a beacon not long after it's been
# closed, force the socket to shut down regardless about 1 second after
# it's been closed.
#
socket.connect("tcp://localhost:%s" % _Beacon.rpc_port)
socket.send(_pack([action] + list(args)))
reply = sockets._sockets._receive_with_timeout(socket, wait_for_s)
return _unpack(reply)
def _pause():
return _rpc("pause")
def _resume():
return _rpc("resume")
_services_advertised = {}
def advertise(name, address=None, fail_if_exists=False, ttl_s=config.ADVERT_TTL_S):
"""Advertise a name at an address
Start to advertise service `name` at address `address`. If
the address is not supplied, one is constructed and this is
returned by the function. ie this is a typical use::
address = nw0.advertise("myservice")
:param name: any text
:param address: either "ip:port" or None
:param fail_if_exists: fail if this name is already registered?
:param ttl_s: the advert will persist for this many seconds other beacons
:returns: the address given or constructed
"""
_start_beacon()
address = _rpc("advertise", name, address, fail_if_exists, ttl_s)
_services_advertised[name] = address
return address
def _unadvertise_all():
"""Remove all adverts
"""
for name in _services_advertised:
try:
_unadvertise(name)
except core.SocketTimedOutError:
_logger.warn("Timed out trying to unadvertise")
break
atexit.register(_unadvertise_all)
def _unadvertise(name):
"""Remove the advert for a name
This is intended for internal use only at the moment. When a process
exits it can remove adverts for its services from the beacon running
on that machine. (Of course, if the beacon thread is part of of the
same service, all its adverts will cease).
"""
_start_beacon()
return _rpc("unadvertise", name)
def discover(name, wait_for_s=60):
"""Discover a service by name
Look for an advert to a named service::
address = nw0.discover("myservice")
:param name: any text
:param wait_for_s: how many seconds to wait before giving up
:returns: the address found or None
"""
_start_beacon()
#
# It's possible to enter a deadlock situation where the first
# process fires off a discovery request and waits for the
# second process to advertise. But the second process has to
# connect to the rpc port of the first process' beacon and
# its advertisement is queued behind the pending discovery.
#
# To give both a chance of succeeding we operate in bursts,
# allowing them to interleave.
#
t0 = time.time()
while True:
discovery = _rpc("discover", name, 0.5)
if discovery:
return discovery
if timed_out(t0, wait_for_s):
return None
def discover_all():
"""Produce a list of all known services and their addresses
Ask for all known services as a list of 2-tuples: (name, address)
This could, eg, be used to form a dictionary of services::
services = dict(nw0.discover_all())
:returns: a list of 2-tuples [(name, address), ...]
"""
_start_beacon()
return _rpc("discover_all")
def discover_group(group, separator="/", exclude=None):
"""Produce a list of all services and their addresses in a group
A group is an optional form of namespace within the discovery mechanism.
If an advertised name has the form <group><sep><name> it is deemed to
belong to <group>. Note that the service's name is still the full
string <group><sep><name>. The group concept is simply for discovery and
to assist differentiation, eg, in a classroom group.
:param group: the name of a group prefix
:param separator: the separator character [/]
:param exclude: an iterable of names to exclude (or None)
:returns: a list of 2-tuples [(name, address), ...]
"""
_start_beacon()
if exclude is None:
names_to_exclude = set()
else:
names_to_exclude = set(exclude)
all_discovered = _rpc("discover_all")
return [(name, address)
for (name, address) in all_discovered
if name.startswith("%s%s" % (group, separator))
and name not in names_to_exclude
]
def reset_beacon():
"""Clear the adverts which the beacon is carrying
(This is mostly useful when testing, to get a fresh start)
"""
_start_beacon()
return _rpc("reset")
if __name__ == '__main__':
params = [arg.lower() for arg in sys.argv]
if "--debug" in params:
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
handler = logging.StreamHandler()
handler.setLevel(logging_level)
handler.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
_logger.addHandler(handler)
_start_beacon()
_logger.info("Beacon started at %s", time.asctime())
while True:
time.sleep(1)
|
tjguk/networkzero | networkzero/discovery.py | advertise | python | def advertise(name, address=None, fail_if_exists=False, ttl_s=config.ADVERT_TTL_S):
_start_beacon()
address = _rpc("advertise", name, address, fail_if_exists, ttl_s)
_services_advertised[name] = address
return address | Advertise a name at an address
Start to advertise service `name` at address `address`. If
the address is not supplied, one is constructed and this is
returned by the function. ie this is a typical use::
address = nw0.advertise("myservice")
:param name: any text
:param address: either "ip:port" or None
:param fail_if_exists: fail if this name is already registered?
:param ttl_s: the advert will persist for this many seconds other beacons
:returns: the address given or constructed | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/discovery.py#L497-L515 | [
"def _start_beacon(port=None):\n \"\"\"Start a beacon thread within this process if no beacon is currently\n running on this machine.\n\n In general this is called automatically when an attempt is made to\n advertise or discover. It might be convenient, though, to call this\n function directly if you... | # -*- coding: utf-8 -*-
"""Advertise and collect advertisements of network services
The discovery module offers:
* A UDP broadcast socket which:
- Listens for and keeps track of service adverts from this and other
machines & processes
- Broadcasts services advertised by this process
* A ZeroMQ socket which allow any process on this machine to
communicate with its broadcast socket
In other words, we have a beacon which listens to instructions
from processes on this machine while sending out and listening
to adverts broadcast to/from all machines on the network.
The beacon is started automatically in a daemon thread when the first
attempt is made to advertise or discover. If another process already
has a beacon running (ie if this beacon can't bind to its port) this
beacon thread will shut down with no further action.
The module-level functions to advertise and discover will open a connection
to a ZeroMQ socket on this machine (which might be hosted by this or by another
process) and will use this socket to send commands to the beacon thread which
will update or return its internal list of advertised services.
As an additional convenience, the :func:`advertise` function will, if given no
specific address, generate a suitable ip:port pair by interrogating the system.
This functionality is actually in :func:`networkzero.address` (qv).
"""
from __future__ import print_function
import os, sys
import atexit
import collections
import errno
import json
import logging
import socket
import threading
import time
import zmq
from . import config
from . import core
from . import sockets
_logger = core.get_logger(__name__)
#
# Continue is a sentinel value to indicate that a command
# has completed its scheduled slice without producing a result
# and without exceeding its overall timeout.
#
Continue = object()
#
# Empty is a sentinel to distinguish between no result and a result of None
#
Empty = object()
def _unpack(message):
return json.loads(message.decode(config.ENCODING))
def _pack(message):
return json.dumps(message).encode(config.ENCODING)
def timed_out(started_at, wait_for_s):
#
# If the wait time is the sentinel value FOREVER, never time out
# Otherwise time out if the current time is more than wait_for_s seconds after the start time
#
if wait_for_s is config.FOREVER:
return False
else:
return time.time() > started_at + wait_for_s
def _bind_with_timeout(bind_function, args, n_tries=3, retry_interval_s=0.5):
"""Attempt to bind a socket a number of times with a short interval in between
Especially on Linux, crashing out of a networkzero process can leave the sockets
lingering and unable to re-bind on startup. We give it a few goes here to see if
we can bind within a couple of seconds.
"""
n_tries_left = n_tries
while n_tries_left > 0:
try:
return bind_function(*args)
except zmq.error.ZMQError as exc:
_logger.warn("%s; %d tries remaining", exc, n_tries_left)
n_tries_left -= 1
except OSError as exc:
if exc.errno == errno.EADDRINUSE:
_logger.warn("%s; %d tries remaining", exc, n_tries_left)
n_tries_left -= 1
else:
raise
else:
raise core.SocketAlreadyExistsError("Failed to bind after %s tries" % n_tries)
class _Service(object):
"""Convenience container with details of a service to be advertised
Includes the name, address and when it is next due to be advertised
and when it is due to expire if it was discovered.
"""
def __init__(self, name, address, ttl_s=None):
self.name = name
self.address = address
self.ttl_s = ttl_s
self.expires_at = None if ttl_s is None else (time.time() + ttl_s)
self.advertise_at = 0
def __str__(self):
return "_Service %s at %s due to advertise at %s and expire at %s" % (
self.name, self.address,
time.ctime(self.advertise_at), time.ctime(self.expires_at)
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self))
def __eq__(self, other):
return self.name == other.name
class _Command(object):
"""Convenience container with details of a running command
Includes the action ("discover", "advertise" etc.), its parameters, when
it was started -- for timeout purposes -- and any response.
This is used by the process_command functionality
"""
def __init__(self, action, params):
self.action = action
self.params = params
self.started_at = time.time()
self.response = Empty
def __str__(self):
return "_Command: %s (%s) started at %s -> %s" % (self.action, self.params, time.ctime(self.started_at), self.response)
class _Beacon(threading.Thread):
"""Threaded beacon to: listen for adverts & broadcast adverts
"""
rpc_port = 9998
beacon_port = 9999
finder_timeout_s = 0.05
beacon_message_size = 256
time_between_broadcasts_s = config.BEACON_ADVERT_FREQUENCY_S
def __init__(self, beacon_port=None):
threading.Thread.__init__(self)
self.setDaemon(True)
self._stop_event = threading.Event()
self._is_paused = False
self.beacon_port = beacon_port or self.__class__.beacon_port
_logger.debug("Using beacon port %s", self.beacon_port)
#
# Services we're advertising
#
self._services_to_advertise = collections.deque()
#
# Broadcast adverts which we've received (some of which will be our own)
#
self._services_found = {}
#
# _Command requests are collected on one queue
# _Command responses are added to another
#
self._command = None
#
# Set the socket up to broadcast datagrams over UDP
#
self.broadcast_addresses = set(core._find_ip4_broadcast_addresses())
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.socket.bind(("", self.beacon_port))
#
# Add the raw UDP socket to a ZeroMQ socket poller so we can check whether
# it's received anything as part of the beacon's main event loop.
#
self.socket_fd = self.socket.fileno()
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.rpc = sockets.context.socket(zmq.REP)
#
# To avoid problems when restarting a beacon not long after it's been
# closed, force the socket to shut down regardless about 1 second after
# it's been closed.
#
self.rpc.linger = 1000
_bind_with_timeout(self.rpc.bind, ("tcp://127.0.0.1:%s" % self.rpc_port,))
def stop(self):
_logger.debug("About to stop")
self._stop_event.set()
#
# Commands available via RPC are methods whose name starts with "do_"
#
def do_advertise(self, started_at, name, address, fail_if_exists, ttl_s):
_logger.debug("Advertise %s on %s %s TTL=%s", name, address, fail_if_exists, ttl_s)
canonical_address = core.address(address)
for service in self._services_to_advertise:
if service.name == name:
if fail_if_exists:
_logger.error("_Service %s already exists on %s", name, service.address)
return None
else:
_logger.warn("Superseding service %s which already exists on %s", name, service.address)
service = _Service(name, canonical_address, ttl_s)
self._services_to_advertise.append(service)
#
# As a shortcut, automatically "discover" any services we ourselves are advertising
#
self._services_found[name] = service
return canonical_address
def do_unadvertise(self, started_at, name):
_logger.debug("Unadvertise %s", name)
for service in self._services_to_advertise:
if service.name == name:
self._services_to_advertise.remove(service)
break
else:
_logger.warn("No advert found for %s", name)
_logger.debug("Services now: %s", self._services_to_advertise)
def do_pause(self, started_at):
_logger.debug("Pause")
self._is_paused = True
def do_resume(self, started_at):
_logger.debug("Resume")
self._is_paused = False
def do_discover(self, started_at, name, wait_for_s):
_logger.debug("Discover %s waiting for %s seconds", name, wait_for_s)
discovered = self._services_found.get(name)
#
# If we've got a match, return it. Otherwise:
# * If we're due to wait for ever, continue
# * If we're out of time return None
# * Otherwise we've still got time left: continue
#
if discovered:
return discovered.address
if timed_out(started_at, wait_for_s):
return None
else:
return Continue
def do_discover_all(self, started_at):
_logger.debug("Discover all")
return [(service.name, service.address) for service in self._services_found.values()]
def do_reset(self, started_at):
_logger.debug("Reset")
self.do_pause(started_at)
self._services_found.clear()
self._services_to_advertise.clear()
self.do_resume(started_at)
def do_stop(self, started_at):
_logger.debug("Stop")
self.stop()
def listen_for_one_advert(self):
events = dict(self.poller.poll(1000 * self.finder_timeout_s))
if self.socket_fd not in events:
return
message, source = self.socket.recvfrom(self.beacon_message_size)
_logger.debug("Broadcast message received: %r", message)
service_name, service_address, ttl_s = _unpack(message)
service = _Service(service_name, service_address, ttl_s)
self._services_found[service_name] = service
def broadcast_one_advert(self):
if self._services_to_advertise:
next_service = self._services_to_advertise[0]
if next_service.advertise_at < time.time():
_logger.debug("%s due to advertise at %s", next_service.name, time.ctime(next_service.advertise_at))
message = _pack([next_service.name, next_service.address, next_service.ttl_s])
for broadcast_address in self.broadcast_addresses:
_logger.debug("Advertising on %s", broadcast_address)
self.socket.sendto(message, 0, (broadcast_address, self.beacon_port))
next_service.advertise_at = time.time() + self.time_between_broadcasts_s
self._services_to_advertise.rotate(-1)
def remove_expired_adverts(self):
for name, service in list(self._services_found.items()):
#
# A service with an empty expiry time never expired
#
if service.expires_at is None:
continue
if service.expires_at <= time.time():
_logger.warn("Removing advert for %s which expired at %s",
name, time.ctime(service.expires_at))
del self._services_found[name]
def poll_command_request(self):
"""If the command RPC socket has an incoming request,
separate it into its action and its params and put it
on the command request queue.
"""
try:
message = self.rpc.recv(zmq.NOBLOCK)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return
else:
raise
_logger.debug("Received command %s", message)
segments = _unpack(message)
action, params = segments[0], segments[1:]
_logger.debug("Adding %s, %s to the request queue", action, params)
self._command = _Command(action, params)
def process_command(self):
if not self._command:
return
else:
_logger.debug("process_command: %s", self._command.action)
command = self._command
_logger.debug("Picked %s, %s, %s", self._command.action, self._command.params, self._command.started_at)
function = getattr(self, "do_" + command.action.lower(), None)
if not function:
raise NotImplementedError("%s is not a valid action")
else:
try:
result = function(command.started_at, *command.params)
except:
_logger.exception("Problem calling %s with %s", command.action, command.params)
result = None
_logger.debug("result = %s", result)
#
# result will be Continue if the action cannot be completed
# (eg a discovery) but its time is not yet expired. Leave
# the command on the stack for now.
#
if result is Continue:
return
#
# If we get a result, add the result to the response
# queue and pop the request off the stack.
#
self._command.response = result
def poll_command_reponse(self):
"""If the latest request has a response, issue it as a
reply to the RPC socket.
"""
if self._command.response is not Empty:
_logger.debug("Sending response %s", self._command.response)
self.rpc.send(_pack(self._command.response))
self._command = None
def run(self):
_logger.info("Starting discovery")
while not self._stop_event.wait(0):
try:
#
# If we're not already processing one, check for an command
# to advertise/discover from a local process.
#
if not self._command:
self.poll_command_request()
#
# If we're paused no adverts will be broadcast. Adverts
# will be received and stale ones expired
#
if not self._is_paused:
#
# Broadcast the first advert whose advertising schedule
# has arrived
#
self.broadcast_one_advert()
#
# See if an advert broadcast has arrived
#
self.listen_for_one_advert()
#
# See if any adverts have expired
#
self.remove_expired_adverts()
#
# If we're processing a command, see if it's complete
#
if self._command:
self.process_command()
self.poll_command_reponse()
except:
_logger.exception("Problem in beacon thread")
break
_logger.info("Ending discovery")
self.rpc.close()
self.socket.close()
_beacon = None
_remote_beacon = object()
def _start_beacon(port=None):
"""Start a beacon thread within this process if no beacon is currently
running on this machine.
In general this is called automatically when an attempt is made to
advertise or discover. It might be convenient, though, to call this
function directly if you want to have a process whose only job is
to host this beacon so that it doesn't shut down when other processes
shut down.
"""
global _beacon
if _beacon is None:
_logger.debug("About to start beacon with port %s", port)
try:
_beacon = _Beacon(port)
except (OSError, socket.error) as exc:
if exc.errno == errno.EADDRINUSE:
_logger.warn("Beacon already active on this machine")
#
# _remote_beacon is simply a not-None sentinel value
# to distinguish between the case where we have not
# yet started a beacon and where we have found one
# in another process.
#
_beacon = _remote_beacon
else:
raise
else:
_beacon.start()
def _stop_beacon():
#
# Mostly for testing: shutdown the beacon if it's running
# locally and clear it globally so the next attempt will
# start fresh.
#
global _beacon
if _beacon and _beacon is not _remote_beacon:
_beacon.stop()
_beacon.join()
_beacon = None
def _rpc(action, *args, **kwargs):
_logger.debug("About to send rpc request %s with args %s, kwargs %s", action, args, kwargs)
wait_for_s = kwargs.pop("wait_for_s", 5)
with sockets.context.socket(zmq.REQ) as socket:
#
# To avoid problems when restarting a beacon not long after it's been
# closed, force the socket to shut down regardless about 1 second after
# it's been closed.
#
socket.connect("tcp://localhost:%s" % _Beacon.rpc_port)
socket.send(_pack([action] + list(args)))
reply = sockets._sockets._receive_with_timeout(socket, wait_for_s)
return _unpack(reply)
def _pause():
return _rpc("pause")
def _resume():
return _rpc("resume")
_services_advertised = {}
def advertise(name, address=None, fail_if_exists=False, ttl_s=config.ADVERT_TTL_S):
"""Advertise a name at an address
Start to advertise service `name` at address `address`. If
the address is not supplied, one is constructed and this is
returned by the function. ie this is a typical use::
address = nw0.advertise("myservice")
:param name: any text
:param address: either "ip:port" or None
:param fail_if_exists: fail if this name is already registered?
:param ttl_s: the advert will persist for this many seconds other beacons
:returns: the address given or constructed
"""
_start_beacon()
address = _rpc("advertise", name, address, fail_if_exists, ttl_s)
_services_advertised[name] = address
return address
def _unadvertise_all():
"""Remove all adverts
"""
for name in _services_advertised:
try:
_unadvertise(name)
except core.SocketTimedOutError:
_logger.warn("Timed out trying to unadvertise")
break
atexit.register(_unadvertise_all)
def _unadvertise(name):
"""Remove the advert for a name
This is intended for internal use only at the moment. When a process
exits it can remove adverts for its services from the beacon running
on that machine. (Of course, if the beacon thread is part of of the
same service, all its adverts will cease).
"""
_start_beacon()
return _rpc("unadvertise", name)
def discover(name, wait_for_s=60):
"""Discover a service by name
Look for an advert to a named service::
address = nw0.discover("myservice")
:param name: any text
:param wait_for_s: how many seconds to wait before giving up
:returns: the address found or None
"""
_start_beacon()
#
# It's possible to enter a deadlock situation where the first
# process fires off a discovery request and waits for the
# second process to advertise. But the second process has to
# connect to the rpc port of the first process' beacon and
# its advertisement is queued behind the pending discovery.
#
# To give both a chance of succeeding we operate in bursts,
# allowing them to interleave.
#
t0 = time.time()
while True:
discovery = _rpc("discover", name, 0.5)
if discovery:
return discovery
if timed_out(t0, wait_for_s):
return None
def discover_all():
"""Produce a list of all known services and their addresses
Ask for all known services as a list of 2-tuples: (name, address)
This could, eg, be used to form a dictionary of services::
services = dict(nw0.discover_all())
:returns: a list of 2-tuples [(name, address), ...]
"""
_start_beacon()
return _rpc("discover_all")
def discover_group(group, separator="/", exclude=None):
"""Produce a list of all services and their addresses in a group
A group is an optional form of namespace within the discovery mechanism.
If an advertised name has the form <group><sep><name> it is deemed to
belong to <group>. Note that the service's name is still the full
string <group><sep><name>. The group concept is simply for discovery and
to assist differentiation, eg, in a classroom group.
:param group: the name of a group prefix
:param separator: the separator character [/]
:param exclude: an iterable of names to exclude (or None)
:returns: a list of 2-tuples [(name, address), ...]
"""
_start_beacon()
if exclude is None:
names_to_exclude = set()
else:
names_to_exclude = set(exclude)
all_discovered = _rpc("discover_all")
return [(name, address)
for (name, address) in all_discovered
if name.startswith("%s%s" % (group, separator))
and name not in names_to_exclude
]
def reset_beacon():
"""Clear the adverts which the beacon is carrying
(This is mostly useful when testing, to get a fresh start)
"""
_start_beacon()
return _rpc("reset")
if __name__ == '__main__':
params = [arg.lower() for arg in sys.argv]
if "--debug" in params:
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
handler = logging.StreamHandler()
handler.setLevel(logging_level)
handler.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
_logger.addHandler(handler)
_start_beacon()
_logger.info("Beacon started at %s", time.asctime())
while True:
time.sleep(1)
|
tjguk/networkzero | networkzero/discovery.py | discover | python | def discover(name, wait_for_s=60):
_start_beacon()
#
# It's possible to enter a deadlock situation where the first
# process fires off a discovery request and waits for the
# second process to advertise. But the second process has to
# connect to the rpc port of the first process' beacon and
# its advertisement is queued behind the pending discovery.
#
# To give both a chance of succeeding we operate in bursts,
# allowing them to interleave.
#
t0 = time.time()
while True:
discovery = _rpc("discover", name, 0.5)
if discovery:
return discovery
if timed_out(t0, wait_for_s):
return None | Discover a service by name
Look for an advert to a named service::
address = nw0.discover("myservice")
:param name: any text
:param wait_for_s: how many seconds to wait before giving up
:returns: the address found or None | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/discovery.py#L539-L567 | [
"def timed_out(started_at, wait_for_s):\n #\n # If the wait time is the sentinel value FOREVER, never time out\n # Otherwise time out if the current time is more than wait_for_s seconds after the start time\n #\n if wait_for_s is config.FOREVER:\n return False\n else:\n return time.t... | # -*- coding: utf-8 -*-
"""Advertise and collect advertisements of network services
The discovery module offers:
* A UDP broadcast socket which:
- Listens for and keeps track of service adverts from this and other
machines & processes
- Broadcasts services advertised by this process
* A ZeroMQ socket which allow any process on this machine to
communicate with its broadcast socket
In other words, we have a beacon which listens to instructions
from processes on this machine while sending out and listening
to adverts broadcast to/from all machines on the network.
The beacon is started automatically in a daemon thread when the first
attempt is made to advertise or discover. If another process already
has a beacon running (ie if this beacon can't bind to its port) this
beacon thread will shut down with no further action.
The module-level functions to advertise and discover will open a connection
to a ZeroMQ socket on this machine (which might be hosted by this or by another
process) and will use this socket to send commands to the beacon thread which
will update or return its internal list of advertised services.
As an additional convenience, the :func:`advertise` function will, if given no
specific address, generate a suitable ip:port pair by interrogating the system.
This functionality is actually in :func:`networkzero.address` (qv).
"""
from __future__ import print_function
import os, sys
import atexit
import collections
import errno
import json
import logging
import socket
import threading
import time
import zmq
from . import config
from . import core
from . import sockets
_logger = core.get_logger(__name__)
#
# Continue is a sentinel value to indicate that a command
# has completed its scheduled slice without producing a result
# and without exceeding its overall timeout.
#
Continue = object()
#
# Empty is a sentinel to distinguish between no result and a result of None
#
Empty = object()
def _unpack(message):
return json.loads(message.decode(config.ENCODING))
def _pack(message):
return json.dumps(message).encode(config.ENCODING)
def timed_out(started_at, wait_for_s):
#
# If the wait time is the sentinel value FOREVER, never time out
# Otherwise time out if the current time is more than wait_for_s seconds after the start time
#
if wait_for_s is config.FOREVER:
return False
else:
return time.time() > started_at + wait_for_s
def _bind_with_timeout(bind_function, args, n_tries=3, retry_interval_s=0.5):
"""Attempt to bind a socket a number of times with a short interval in between
Especially on Linux, crashing out of a networkzero process can leave the sockets
lingering and unable to re-bind on startup. We give it a few goes here to see if
we can bind within a couple of seconds.
"""
n_tries_left = n_tries
while n_tries_left > 0:
try:
return bind_function(*args)
except zmq.error.ZMQError as exc:
_logger.warn("%s; %d tries remaining", exc, n_tries_left)
n_tries_left -= 1
except OSError as exc:
if exc.errno == errno.EADDRINUSE:
_logger.warn("%s; %d tries remaining", exc, n_tries_left)
n_tries_left -= 1
else:
raise
else:
raise core.SocketAlreadyExistsError("Failed to bind after %s tries" % n_tries)
class _Service(object):
"""Convenience container with details of a service to be advertised
Includes the name, address and when it is next due to be advertised
and when it is due to expire if it was discovered.
"""
def __init__(self, name, address, ttl_s=None):
self.name = name
self.address = address
self.ttl_s = ttl_s
self.expires_at = None if ttl_s is None else (time.time() + ttl_s)
self.advertise_at = 0
def __str__(self):
return "_Service %s at %s due to advertise at %s and expire at %s" % (
self.name, self.address,
time.ctime(self.advertise_at), time.ctime(self.expires_at)
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self))
def __eq__(self, other):
return self.name == other.name
class _Command(object):
"""Convenience container with details of a running command
Includes the action ("discover", "advertise" etc.), its parameters, when
it was started -- for timeout purposes -- and any response.
This is used by the process_command functionality
"""
def __init__(self, action, params):
self.action = action
self.params = params
self.started_at = time.time()
self.response = Empty
def __str__(self):
return "_Command: %s (%s) started at %s -> %s" % (self.action, self.params, time.ctime(self.started_at), self.response)
class _Beacon(threading.Thread):
"""Threaded beacon to: listen for adverts & broadcast adverts
"""
rpc_port = 9998
beacon_port = 9999
finder_timeout_s = 0.05
beacon_message_size = 256
time_between_broadcasts_s = config.BEACON_ADVERT_FREQUENCY_S
def __init__(self, beacon_port=None):
threading.Thread.__init__(self)
self.setDaemon(True)
self._stop_event = threading.Event()
self._is_paused = False
self.beacon_port = beacon_port or self.__class__.beacon_port
_logger.debug("Using beacon port %s", self.beacon_port)
#
# Services we're advertising
#
self._services_to_advertise = collections.deque()
#
# Broadcast adverts which we've received (some of which will be our own)
#
self._services_found = {}
#
# _Command requests are collected on one queue
# _Command responses are added to another
#
self._command = None
#
# Set the socket up to broadcast datagrams over UDP
#
self.broadcast_addresses = set(core._find_ip4_broadcast_addresses())
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.socket.bind(("", self.beacon_port))
#
# Add the raw UDP socket to a ZeroMQ socket poller so we can check whether
# it's received anything as part of the beacon's main event loop.
#
self.socket_fd = self.socket.fileno()
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.rpc = sockets.context.socket(zmq.REP)
#
# To avoid problems when restarting a beacon not long after it's been
# closed, force the socket to shut down regardless about 1 second after
# it's been closed.
#
self.rpc.linger = 1000
_bind_with_timeout(self.rpc.bind, ("tcp://127.0.0.1:%s" % self.rpc_port,))
def stop(self):
_logger.debug("About to stop")
self._stop_event.set()
#
# Commands available via RPC are methods whose name starts with "do_"
#
def do_advertise(self, started_at, name, address, fail_if_exists, ttl_s):
_logger.debug("Advertise %s on %s %s TTL=%s", name, address, fail_if_exists, ttl_s)
canonical_address = core.address(address)
for service in self._services_to_advertise:
if service.name == name:
if fail_if_exists:
_logger.error("_Service %s already exists on %s", name, service.address)
return None
else:
_logger.warn("Superseding service %s which already exists on %s", name, service.address)
service = _Service(name, canonical_address, ttl_s)
self._services_to_advertise.append(service)
#
# As a shortcut, automatically "discover" any services we ourselves are advertising
#
self._services_found[name] = service
return canonical_address
def do_unadvertise(self, started_at, name):
_logger.debug("Unadvertise %s", name)
for service in self._services_to_advertise:
if service.name == name:
self._services_to_advertise.remove(service)
break
else:
_logger.warn("No advert found for %s", name)
_logger.debug("Services now: %s", self._services_to_advertise)
def do_pause(self, started_at):
_logger.debug("Pause")
self._is_paused = True
def do_resume(self, started_at):
_logger.debug("Resume")
self._is_paused = False
def do_discover(self, started_at, name, wait_for_s):
_logger.debug("Discover %s waiting for %s seconds", name, wait_for_s)
discovered = self._services_found.get(name)
#
# If we've got a match, return it. Otherwise:
# * If we're due to wait for ever, continue
# * If we're out of time return None
# * Otherwise we've still got time left: continue
#
if discovered:
return discovered.address
if timed_out(started_at, wait_for_s):
return None
else:
return Continue
def do_discover_all(self, started_at):
_logger.debug("Discover all")
return [(service.name, service.address) for service in self._services_found.values()]
def do_reset(self, started_at):
_logger.debug("Reset")
self.do_pause(started_at)
self._services_found.clear()
self._services_to_advertise.clear()
self.do_resume(started_at)
def do_stop(self, started_at):
_logger.debug("Stop")
self.stop()
def listen_for_one_advert(self):
events = dict(self.poller.poll(1000 * self.finder_timeout_s))
if self.socket_fd not in events:
return
message, source = self.socket.recvfrom(self.beacon_message_size)
_logger.debug("Broadcast message received: %r", message)
service_name, service_address, ttl_s = _unpack(message)
service = _Service(service_name, service_address, ttl_s)
self._services_found[service_name] = service
def broadcast_one_advert(self):
if self._services_to_advertise:
next_service = self._services_to_advertise[0]
if next_service.advertise_at < time.time():
_logger.debug("%s due to advertise at %s", next_service.name, time.ctime(next_service.advertise_at))
message = _pack([next_service.name, next_service.address, next_service.ttl_s])
for broadcast_address in self.broadcast_addresses:
_logger.debug("Advertising on %s", broadcast_address)
self.socket.sendto(message, 0, (broadcast_address, self.beacon_port))
next_service.advertise_at = time.time() + self.time_between_broadcasts_s
self._services_to_advertise.rotate(-1)
def remove_expired_adverts(self):
for name, service in list(self._services_found.items()):
#
# A service with an empty expiry time never expired
#
if service.expires_at is None:
continue
if service.expires_at <= time.time():
_logger.warn("Removing advert for %s which expired at %s",
name, time.ctime(service.expires_at))
del self._services_found[name]
def poll_command_request(self):
"""If the command RPC socket has an incoming request,
separate it into its action and its params and put it
on the command request queue.
"""
try:
message = self.rpc.recv(zmq.NOBLOCK)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return
else:
raise
_logger.debug("Received command %s", message)
segments = _unpack(message)
action, params = segments[0], segments[1:]
_logger.debug("Adding %s, %s to the request queue", action, params)
self._command = _Command(action, params)
def process_command(self):
if not self._command:
return
else:
_logger.debug("process_command: %s", self._command.action)
command = self._command
_logger.debug("Picked %s, %s, %s", self._command.action, self._command.params, self._command.started_at)
function = getattr(self, "do_" + command.action.lower(), None)
if not function:
raise NotImplementedError("%s is not a valid action")
else:
try:
result = function(command.started_at, *command.params)
except:
_logger.exception("Problem calling %s with %s", command.action, command.params)
result = None
_logger.debug("result = %s", result)
#
# result will be Continue if the action cannot be completed
# (eg a discovery) but its time is not yet expired. Leave
# the command on the stack for now.
#
if result is Continue:
return
#
# If we get a result, add the result to the response
# queue and pop the request off the stack.
#
self._command.response = result
def poll_command_reponse(self):
"""If the latest request has a response, issue it as a
reply to the RPC socket.
"""
if self._command.response is not Empty:
_logger.debug("Sending response %s", self._command.response)
self.rpc.send(_pack(self._command.response))
self._command = None
def run(self):
_logger.info("Starting discovery")
while not self._stop_event.wait(0):
try:
#
# If we're not already processing one, check for an command
# to advertise/discover from a local process.
#
if not self._command:
self.poll_command_request()
#
# If we're paused no adverts will be broadcast. Adverts
# will be received and stale ones expired
#
if not self._is_paused:
#
# Broadcast the first advert whose advertising schedule
# has arrived
#
self.broadcast_one_advert()
#
# See if an advert broadcast has arrived
#
self.listen_for_one_advert()
#
# See if any adverts have expired
#
self.remove_expired_adverts()
#
# If we're processing a command, see if it's complete
#
if self._command:
self.process_command()
self.poll_command_reponse()
except:
_logger.exception("Problem in beacon thread")
break
_logger.info("Ending discovery")
self.rpc.close()
self.socket.close()
_beacon = None
_remote_beacon = object()
def _start_beacon(port=None):
"""Start a beacon thread within this process if no beacon is currently
running on this machine.
In general this is called automatically when an attempt is made to
advertise or discover. It might be convenient, though, to call this
function directly if you want to have a process whose only job is
to host this beacon so that it doesn't shut down when other processes
shut down.
"""
global _beacon
if _beacon is None:
_logger.debug("About to start beacon with port %s", port)
try:
_beacon = _Beacon(port)
except (OSError, socket.error) as exc:
if exc.errno == errno.EADDRINUSE:
_logger.warn("Beacon already active on this machine")
#
# _remote_beacon is simply a not-None sentinel value
# to distinguish between the case where we have not
# yet started a beacon and where we have found one
# in another process.
#
_beacon = _remote_beacon
else:
raise
else:
_beacon.start()
def _stop_beacon():
#
# Mostly for testing: shutdown the beacon if it's running
# locally and clear it globally so the next attempt will
# start fresh.
#
global _beacon
if _beacon and _beacon is not _remote_beacon:
_beacon.stop()
_beacon.join()
_beacon = None
def _rpc(action, *args, **kwargs):
_logger.debug("About to send rpc request %s with args %s, kwargs %s", action, args, kwargs)
wait_for_s = kwargs.pop("wait_for_s", 5)
with sockets.context.socket(zmq.REQ) as socket:
#
# To avoid problems when restarting a beacon not long after it's been
# closed, force the socket to shut down regardless about 1 second after
# it's been closed.
#
socket.connect("tcp://localhost:%s" % _Beacon.rpc_port)
socket.send(_pack([action] + list(args)))
reply = sockets._sockets._receive_with_timeout(socket, wait_for_s)
return _unpack(reply)
def _pause():
return _rpc("pause")
def _resume():
return _rpc("resume")
_services_advertised = {}
def advertise(name, address=None, fail_if_exists=False, ttl_s=config.ADVERT_TTL_S):
"""Advertise a name at an address
Start to advertise service `name` at address `address`. If
the address is not supplied, one is constructed and this is
returned by the function. ie this is a typical use::
address = nw0.advertise("myservice")
:param name: any text
:param address: either "ip:port" or None
:param fail_if_exists: fail if this name is already registered?
:param ttl_s: the advert will persist for this many seconds other beacons
:returns: the address given or constructed
"""
_start_beacon()
address = _rpc("advertise", name, address, fail_if_exists, ttl_s)
_services_advertised[name] = address
return address
def _unadvertise_all():
"""Remove all adverts
"""
for name in _services_advertised:
try:
_unadvertise(name)
except core.SocketTimedOutError:
_logger.warn("Timed out trying to unadvertise")
break
atexit.register(_unadvertise_all)
def _unadvertise(name):
"""Remove the advert for a name
This is intended for internal use only at the moment. When a process
exits it can remove adverts for its services from the beacon running
on that machine. (Of course, if the beacon thread is part of of the
same service, all its adverts will cease).
"""
_start_beacon()
return _rpc("unadvertise", name)
def discover(name, wait_for_s=60):
"""Discover a service by name
Look for an advert to a named service::
address = nw0.discover("myservice")
:param name: any text
:param wait_for_s: how many seconds to wait before giving up
:returns: the address found or None
"""
_start_beacon()
#
# It's possible to enter a deadlock situation where the first
# process fires off a discovery request and waits for the
# second process to advertise. But the second process has to
# connect to the rpc port of the first process' beacon and
# its advertisement is queued behind the pending discovery.
#
# To give both a chance of succeeding we operate in bursts,
# allowing them to interleave.
#
t0 = time.time()
while True:
discovery = _rpc("discover", name, 0.5)
if discovery:
return discovery
if timed_out(t0, wait_for_s):
return None
def discover_all():
"""Produce a list of all known services and their addresses
Ask for all known services as a list of 2-tuples: (name, address)
This could, eg, be used to form a dictionary of services::
services = dict(nw0.discover_all())
:returns: a list of 2-tuples [(name, address), ...]
"""
_start_beacon()
return _rpc("discover_all")
def discover_group(group, separator="/", exclude=None):
"""Produce a list of all services and their addresses in a group
A group is an optional form of namespace within the discovery mechanism.
If an advertised name has the form <group><sep><name> it is deemed to
belong to <group>. Note that the service's name is still the full
string <group><sep><name>. The group concept is simply for discovery and
to assist differentiation, eg, in a classroom group.
:param group: the name of a group prefix
:param separator: the separator character [/]
:param exclude: an iterable of names to exclude (or None)
:returns: a list of 2-tuples [(name, address), ...]
"""
_start_beacon()
if exclude is None:
names_to_exclude = set()
else:
names_to_exclude = set(exclude)
all_discovered = _rpc("discover_all")
return [(name, address)
for (name, address) in all_discovered
if name.startswith("%s%s" % (group, separator))
and name not in names_to_exclude
]
def reset_beacon():
"""Clear the adverts which the beacon is carrying
(This is mostly useful when testing, to get a fresh start)
"""
_start_beacon()
return _rpc("reset")
if __name__ == '__main__':
params = [arg.lower() for arg in sys.argv]
if "--debug" in params:
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
handler = logging.StreamHandler()
handler.setLevel(logging_level)
handler.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
_logger.addHandler(handler)
_start_beacon()
_logger.info("Beacon started at %s", time.asctime())
while True:
time.sleep(1)
|
tjguk/networkzero | networkzero/discovery.py | discover_group | python | def discover_group(group, separator="/", exclude=None):
_start_beacon()
if exclude is None:
names_to_exclude = set()
else:
names_to_exclude = set(exclude)
all_discovered = _rpc("discover_all")
return [(name, address)
for (name, address) in all_discovered
if name.startswith("%s%s" % (group, separator))
and name not in names_to_exclude
] | Produce a list of all services and their addresses in a group
A group is an optional form of namespace within the discovery mechanism.
If an advertised name has the form <group><sep><name> it is deemed to
belong to <group>. Note that the service's name is still the full
string <group><sep><name>. The group concept is simply for discovery and
to assist differentiation, eg, in a classroom group.
:param group: the name of a group prefix
:param separator: the separator character [/]
:param exclude: an iterable of names to exclude (or None)
:returns: a list of 2-tuples [(name, address), ...] | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/discovery.py#L582-L607 | [
"def _start_beacon(port=None):\n \"\"\"Start a beacon thread within this process if no beacon is currently\n running on this machine.\n\n In general this is called automatically when an attempt is made to\n advertise or discover. It might be convenient, though, to call this\n function directly if you... | # -*- coding: utf-8 -*-
"""Advertise and collect advertisements of network services
The discovery module offers:
* A UDP broadcast socket which:
- Listens for and keeps track of service adverts from this and other
machines & processes
- Broadcasts services advertised by this process
* A ZeroMQ socket which allow any process on this machine to
communicate with its broadcast socket
In other words, we have a beacon which listens to instructions
from processes on this machine while sending out and listening
to adverts broadcast to/from all machines on the network.
The beacon is started automatically in a daemon thread when the first
attempt is made to advertise or discover. If another process already
has a beacon running (ie if this beacon can't bind to its port) this
beacon thread will shut down with no further action.
The module-level functions to advertise and discover will open a connection
to a ZeroMQ socket on this machine (which might be hosted by this or by another
process) and will use this socket to send commands to the beacon thread which
will update or return its internal list of advertised services.
As an additional convenience, the :func:`advertise` function will, if given no
specific address, generate a suitable ip:port pair by interrogating the system.
This functionality is actually in :func:`networkzero.address` (qv).
"""
from __future__ import print_function
import os, sys
import atexit
import collections
import errno
import json
import logging
import socket
import threading
import time
import zmq
from . import config
from . import core
from . import sockets
_logger = core.get_logger(__name__)
#
# Continue is a sentinel value to indicate that a command
# has completed its scheduled slice without producing a result
# and without exceeding its overall timeout.
#
Continue = object()
#
# Empty is a sentinel to distinguish between no result and a result of None
#
Empty = object()
def _unpack(message):
return json.loads(message.decode(config.ENCODING))
def _pack(message):
return json.dumps(message).encode(config.ENCODING)
def timed_out(started_at, wait_for_s):
#
# If the wait time is the sentinel value FOREVER, never time out
# Otherwise time out if the current time is more than wait_for_s seconds after the start time
#
if wait_for_s is config.FOREVER:
return False
else:
return time.time() > started_at + wait_for_s
def _bind_with_timeout(bind_function, args, n_tries=3, retry_interval_s=0.5):
"""Attempt to bind a socket a number of times with a short interval in between
Especially on Linux, crashing out of a networkzero process can leave the sockets
lingering and unable to re-bind on startup. We give it a few goes here to see if
we can bind within a couple of seconds.
"""
n_tries_left = n_tries
while n_tries_left > 0:
try:
return bind_function(*args)
except zmq.error.ZMQError as exc:
_logger.warn("%s; %d tries remaining", exc, n_tries_left)
n_tries_left -= 1
except OSError as exc:
if exc.errno == errno.EADDRINUSE:
_logger.warn("%s; %d tries remaining", exc, n_tries_left)
n_tries_left -= 1
else:
raise
else:
raise core.SocketAlreadyExistsError("Failed to bind after %s tries" % n_tries)
class _Service(object):
"""Convenience container with details of a service to be advertised
Includes the name, address and when it is next due to be advertised
and when it is due to expire if it was discovered.
"""
def __init__(self, name, address, ttl_s=None):
self.name = name
self.address = address
self.ttl_s = ttl_s
self.expires_at = None if ttl_s is None else (time.time() + ttl_s)
self.advertise_at = 0
def __str__(self):
return "_Service %s at %s due to advertise at %s and expire at %s" % (
self.name, self.address,
time.ctime(self.advertise_at), time.ctime(self.expires_at)
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self))
def __eq__(self, other):
return self.name == other.name
class _Command(object):
"""Convenience container with details of a running command
Includes the action ("discover", "advertise" etc.), its parameters, when
it was started -- for timeout purposes -- and any response.
This is used by the process_command functionality
"""
def __init__(self, action, params):
self.action = action
self.params = params
self.started_at = time.time()
self.response = Empty
def __str__(self):
return "_Command: %s (%s) started at %s -> %s" % (self.action, self.params, time.ctime(self.started_at), self.response)
class _Beacon(threading.Thread):
"""Threaded beacon to: listen for adverts & broadcast adverts
"""
rpc_port = 9998
beacon_port = 9999
finder_timeout_s = 0.05
beacon_message_size = 256
time_between_broadcasts_s = config.BEACON_ADVERT_FREQUENCY_S
def __init__(self, beacon_port=None):
threading.Thread.__init__(self)
self.setDaemon(True)
self._stop_event = threading.Event()
self._is_paused = False
self.beacon_port = beacon_port or self.__class__.beacon_port
_logger.debug("Using beacon port %s", self.beacon_port)
#
# Services we're advertising
#
self._services_to_advertise = collections.deque()
#
# Broadcast adverts which we've received (some of which will be our own)
#
self._services_found = {}
#
# _Command requests are collected on one queue
# _Command responses are added to another
#
self._command = None
#
# Set the socket up to broadcast datagrams over UDP
#
self.broadcast_addresses = set(core._find_ip4_broadcast_addresses())
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.socket.bind(("", self.beacon_port))
#
# Add the raw UDP socket to a ZeroMQ socket poller so we can check whether
# it's received anything as part of the beacon's main event loop.
#
self.socket_fd = self.socket.fileno()
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.rpc = sockets.context.socket(zmq.REP)
#
# To avoid problems when restarting a beacon not long after it's been
# closed, force the socket to shut down regardless about 1 second after
# it's been closed.
#
self.rpc.linger = 1000
_bind_with_timeout(self.rpc.bind, ("tcp://127.0.0.1:%s" % self.rpc_port,))
def stop(self):
_logger.debug("About to stop")
self._stop_event.set()
#
# Commands available via RPC are methods whose name starts with "do_"
#
def do_advertise(self, started_at, name, address, fail_if_exists, ttl_s):
_logger.debug("Advertise %s on %s %s TTL=%s", name, address, fail_if_exists, ttl_s)
canonical_address = core.address(address)
for service in self._services_to_advertise:
if service.name == name:
if fail_if_exists:
_logger.error("_Service %s already exists on %s", name, service.address)
return None
else:
_logger.warn("Superseding service %s which already exists on %s", name, service.address)
service = _Service(name, canonical_address, ttl_s)
self._services_to_advertise.append(service)
#
# As a shortcut, automatically "discover" any services we ourselves are advertising
#
self._services_found[name] = service
return canonical_address
def do_unadvertise(self, started_at, name):
_logger.debug("Unadvertise %s", name)
for service in self._services_to_advertise:
if service.name == name:
self._services_to_advertise.remove(service)
break
else:
_logger.warn("No advert found for %s", name)
_logger.debug("Services now: %s", self._services_to_advertise)
def do_pause(self, started_at):
_logger.debug("Pause")
self._is_paused = True
def do_resume(self, started_at):
_logger.debug("Resume")
self._is_paused = False
def do_discover(self, started_at, name, wait_for_s):
_logger.debug("Discover %s waiting for %s seconds", name, wait_for_s)
discovered = self._services_found.get(name)
#
# If we've got a match, return it. Otherwise:
# * If we're due to wait for ever, continue
# * If we're out of time return None
# * Otherwise we've still got time left: continue
#
if discovered:
return discovered.address
if timed_out(started_at, wait_for_s):
return None
else:
return Continue
def do_discover_all(self, started_at):
_logger.debug("Discover all")
return [(service.name, service.address) for service in self._services_found.values()]
def do_reset(self, started_at):
_logger.debug("Reset")
self.do_pause(started_at)
self._services_found.clear()
self._services_to_advertise.clear()
self.do_resume(started_at)
def do_stop(self, started_at):
_logger.debug("Stop")
self.stop()
def listen_for_one_advert(self):
events = dict(self.poller.poll(1000 * self.finder_timeout_s))
if self.socket_fd not in events:
return
message, source = self.socket.recvfrom(self.beacon_message_size)
_logger.debug("Broadcast message received: %r", message)
service_name, service_address, ttl_s = _unpack(message)
service = _Service(service_name, service_address, ttl_s)
self._services_found[service_name] = service
def broadcast_one_advert(self):
if self._services_to_advertise:
next_service = self._services_to_advertise[0]
if next_service.advertise_at < time.time():
_logger.debug("%s due to advertise at %s", next_service.name, time.ctime(next_service.advertise_at))
message = _pack([next_service.name, next_service.address, next_service.ttl_s])
for broadcast_address in self.broadcast_addresses:
_logger.debug("Advertising on %s", broadcast_address)
self.socket.sendto(message, 0, (broadcast_address, self.beacon_port))
next_service.advertise_at = time.time() + self.time_between_broadcasts_s
self._services_to_advertise.rotate(-1)
def remove_expired_adverts(self):
for name, service in list(self._services_found.items()):
#
# A service with an empty expiry time never expired
#
if service.expires_at is None:
continue
if service.expires_at <= time.time():
_logger.warn("Removing advert for %s which expired at %s",
name, time.ctime(service.expires_at))
del self._services_found[name]
def poll_command_request(self):
"""If the command RPC socket has an incoming request,
separate it into its action and its params and put it
on the command request queue.
"""
try:
message = self.rpc.recv(zmq.NOBLOCK)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return
else:
raise
_logger.debug("Received command %s", message)
segments = _unpack(message)
action, params = segments[0], segments[1:]
_logger.debug("Adding %s, %s to the request queue", action, params)
self._command = _Command(action, params)
def process_command(self):
if not self._command:
return
else:
_logger.debug("process_command: %s", self._command.action)
command = self._command
_logger.debug("Picked %s, %s, %s", self._command.action, self._command.params, self._command.started_at)
function = getattr(self, "do_" + command.action.lower(), None)
if not function:
raise NotImplementedError("%s is not a valid action")
else:
try:
result = function(command.started_at, *command.params)
except:
_logger.exception("Problem calling %s with %s", command.action, command.params)
result = None
_logger.debug("result = %s", result)
#
# result will be Continue if the action cannot be completed
# (eg a discovery) but its time is not yet expired. Leave
# the command on the stack for now.
#
if result is Continue:
return
#
# If we get a result, add the result to the response
# queue and pop the request off the stack.
#
self._command.response = result
def poll_command_reponse(self):
"""If the latest request has a response, issue it as a
reply to the RPC socket.
"""
if self._command.response is not Empty:
_logger.debug("Sending response %s", self._command.response)
self.rpc.send(_pack(self._command.response))
self._command = None
def run(self):
_logger.info("Starting discovery")
while not self._stop_event.wait(0):
try:
#
# If we're not already processing one, check for an command
# to advertise/discover from a local process.
#
if not self._command:
self.poll_command_request()
#
# If we're paused no adverts will be broadcast. Adverts
# will be received and stale ones expired
#
if not self._is_paused:
#
# Broadcast the first advert whose advertising schedule
# has arrived
#
self.broadcast_one_advert()
#
# See if an advert broadcast has arrived
#
self.listen_for_one_advert()
#
# See if any adverts have expired
#
self.remove_expired_adverts()
#
# If we're processing a command, see if it's complete
#
if self._command:
self.process_command()
self.poll_command_reponse()
except:
_logger.exception("Problem in beacon thread")
break
_logger.info("Ending discovery")
self.rpc.close()
self.socket.close()
_beacon = None
_remote_beacon = object()
def _start_beacon(port=None):
"""Start a beacon thread within this process if no beacon is currently
running on this machine.
In general this is called automatically when an attempt is made to
advertise or discover. It might be convenient, though, to call this
function directly if you want to have a process whose only job is
to host this beacon so that it doesn't shut down when other processes
shut down.
"""
global _beacon
if _beacon is None:
_logger.debug("About to start beacon with port %s", port)
try:
_beacon = _Beacon(port)
except (OSError, socket.error) as exc:
if exc.errno == errno.EADDRINUSE:
_logger.warn("Beacon already active on this machine")
#
# _remote_beacon is simply a not-None sentinel value
# to distinguish between the case where we have not
# yet started a beacon and where we have found one
# in another process.
#
_beacon = _remote_beacon
else:
raise
else:
_beacon.start()
def _stop_beacon():
#
# Mostly for testing: shutdown the beacon if it's running
# locally and clear it globally so the next attempt will
# start fresh.
#
global _beacon
if _beacon and _beacon is not _remote_beacon:
_beacon.stop()
_beacon.join()
_beacon = None
def _rpc(action, *args, **kwargs):
_logger.debug("About to send rpc request %s with args %s, kwargs %s", action, args, kwargs)
wait_for_s = kwargs.pop("wait_for_s", 5)
with sockets.context.socket(zmq.REQ) as socket:
#
# To avoid problems when restarting a beacon not long after it's been
# closed, force the socket to shut down regardless about 1 second after
# it's been closed.
#
socket.connect("tcp://localhost:%s" % _Beacon.rpc_port)
socket.send(_pack([action] + list(args)))
reply = sockets._sockets._receive_with_timeout(socket, wait_for_s)
return _unpack(reply)
def _pause():
return _rpc("pause")
def _resume():
return _rpc("resume")
_services_advertised = {}
def advertise(name, address=None, fail_if_exists=False, ttl_s=config.ADVERT_TTL_S):
"""Advertise a name at an address
Start to advertise service `name` at address `address`. If
the address is not supplied, one is constructed and this is
returned by the function. ie this is a typical use::
address = nw0.advertise("myservice")
:param name: any text
:param address: either "ip:port" or None
:param fail_if_exists: fail if this name is already registered?
:param ttl_s: the advert will persist for this many seconds other beacons
:returns: the address given or constructed
"""
_start_beacon()
address = _rpc("advertise", name, address, fail_if_exists, ttl_s)
_services_advertised[name] = address
return address
def _unadvertise_all():
"""Remove all adverts
"""
for name in _services_advertised:
try:
_unadvertise(name)
except core.SocketTimedOutError:
_logger.warn("Timed out trying to unadvertise")
break
atexit.register(_unadvertise_all)
def _unadvertise(name):
"""Remove the advert for a name
This is intended for internal use only at the moment. When a process
exits it can remove adverts for its services from the beacon running
on that machine. (Of course, if the beacon thread is part of of the
same service, all its adverts will cease).
"""
_start_beacon()
return _rpc("unadvertise", name)
def discover(name, wait_for_s=60):
"""Discover a service by name
Look for an advert to a named service::
address = nw0.discover("myservice")
:param name: any text
:param wait_for_s: how many seconds to wait before giving up
:returns: the address found or None
"""
_start_beacon()
#
# It's possible to enter a deadlock situation where the first
# process fires off a discovery request and waits for the
# second process to advertise. But the second process has to
# connect to the rpc port of the first process' beacon and
# its advertisement is queued behind the pending discovery.
#
# To give both a chance of succeeding we operate in bursts,
# allowing them to interleave.
#
t0 = time.time()
while True:
discovery = _rpc("discover", name, 0.5)
if discovery:
return discovery
if timed_out(t0, wait_for_s):
return None
def discover_all():
"""Produce a list of all known services and their addresses
Ask for all known services as a list of 2-tuples: (name, address)
This could, eg, be used to form a dictionary of services::
services = dict(nw0.discover_all())
:returns: a list of 2-tuples [(name, address), ...]
"""
_start_beacon()
return _rpc("discover_all")
def discover_group(group, separator="/", exclude=None):
"""Produce a list of all services and their addresses in a group
A group is an optional form of namespace within the discovery mechanism.
If an advertised name has the form <group><sep><name> it is deemed to
belong to <group>. Note that the service's name is still the full
string <group><sep><name>. The group concept is simply for discovery and
to assist differentiation, eg, in a classroom group.
:param group: the name of a group prefix
:param separator: the separator character [/]
:param exclude: an iterable of names to exclude (or None)
:returns: a list of 2-tuples [(name, address), ...]
"""
_start_beacon()
if exclude is None:
names_to_exclude = set()
else:
names_to_exclude = set(exclude)
all_discovered = _rpc("discover_all")
return [(name, address)
for (name, address) in all_discovered
if name.startswith("%s%s" % (group, separator))
and name not in names_to_exclude
]
def reset_beacon():
"""Clear the adverts which the beacon is carrying
(This is mostly useful when testing, to get a fresh start)
"""
_start_beacon()
return _rpc("reset")
if __name__ == '__main__':
params = [arg.lower() for arg in sys.argv]
if "--debug" in params:
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
handler = logging.StreamHandler()
handler.setLevel(logging_level)
handler.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
_logger.addHandler(handler)
_start_beacon()
_logger.info("Beacon started at %s", time.asctime())
while True:
time.sleep(1)
|
tjguk/networkzero | networkzero/discovery.py | _Beacon.poll_command_request | python | def poll_command_request(self):
try:
message = self.rpc.recv(zmq.NOBLOCK)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return
else:
raise
_logger.debug("Received command %s", message)
segments = _unpack(message)
action, params = segments[0], segments[1:]
_logger.debug("Adding %s, %s to the request queue", action, params)
self._command = _Command(action, params) | If the command RPC socket has an incoming request,
separate it into its action and its params and put it
on the command request queue. | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/discovery.py#L320-L337 | [
"def _unpack(message):\n return json.loads(message.decode(config.ENCODING))\n"
] | class _Beacon(threading.Thread):
"""Threaded beacon to: listen for adverts & broadcast adverts
"""
rpc_port = 9998
beacon_port = 9999
finder_timeout_s = 0.05
beacon_message_size = 256
time_between_broadcasts_s = config.BEACON_ADVERT_FREQUENCY_S
def __init__(self, beacon_port=None):
threading.Thread.__init__(self)
self.setDaemon(True)
self._stop_event = threading.Event()
self._is_paused = False
self.beacon_port = beacon_port or self.__class__.beacon_port
_logger.debug("Using beacon port %s", self.beacon_port)
#
# Services we're advertising
#
self._services_to_advertise = collections.deque()
#
# Broadcast adverts which we've received (some of which will be our own)
#
self._services_found = {}
#
# _Command requests are collected on one queue
# _Command responses are added to another
#
self._command = None
#
# Set the socket up to broadcast datagrams over UDP
#
self.broadcast_addresses = set(core._find_ip4_broadcast_addresses())
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.socket.bind(("", self.beacon_port))
#
# Add the raw UDP socket to a ZeroMQ socket poller so we can check whether
# it's received anything as part of the beacon's main event loop.
#
self.socket_fd = self.socket.fileno()
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.rpc = sockets.context.socket(zmq.REP)
#
# To avoid problems when restarting a beacon not long after it's been
# closed, force the socket to shut down regardless about 1 second after
# it's been closed.
#
self.rpc.linger = 1000
_bind_with_timeout(self.rpc.bind, ("tcp://127.0.0.1:%s" % self.rpc_port,))
def stop(self):
_logger.debug("About to stop")
self._stop_event.set()
#
# Commands available via RPC are methods whose name starts with "do_"
#
def do_advertise(self, started_at, name, address, fail_if_exists, ttl_s):
_logger.debug("Advertise %s on %s %s TTL=%s", name, address, fail_if_exists, ttl_s)
canonical_address = core.address(address)
for service in self._services_to_advertise:
if service.name == name:
if fail_if_exists:
_logger.error("_Service %s already exists on %s", name, service.address)
return None
else:
_logger.warn("Superseding service %s which already exists on %s", name, service.address)
service = _Service(name, canonical_address, ttl_s)
self._services_to_advertise.append(service)
#
# As a shortcut, automatically "discover" any services we ourselves are advertising
#
self._services_found[name] = service
return canonical_address
def do_unadvertise(self, started_at, name):
_logger.debug("Unadvertise %s", name)
for service in self._services_to_advertise:
if service.name == name:
self._services_to_advertise.remove(service)
break
else:
_logger.warn("No advert found for %s", name)
_logger.debug("Services now: %s", self._services_to_advertise)
def do_pause(self, started_at):
_logger.debug("Pause")
self._is_paused = True
def do_resume(self, started_at):
_logger.debug("Resume")
self._is_paused = False
def do_discover(self, started_at, name, wait_for_s):
_logger.debug("Discover %s waiting for %s seconds", name, wait_for_s)
discovered = self._services_found.get(name)
#
# If we've got a match, return it. Otherwise:
# * If we're due to wait for ever, continue
# * If we're out of time return None
# * Otherwise we've still got time left: continue
#
if discovered:
return discovered.address
if timed_out(started_at, wait_for_s):
return None
else:
return Continue
def do_discover_all(self, started_at):
_logger.debug("Discover all")
return [(service.name, service.address) for service in self._services_found.values()]
def do_reset(self, started_at):
_logger.debug("Reset")
self.do_pause(started_at)
self._services_found.clear()
self._services_to_advertise.clear()
self.do_resume(started_at)
def do_stop(self, started_at):
_logger.debug("Stop")
self.stop()
def listen_for_one_advert(self):
events = dict(self.poller.poll(1000 * self.finder_timeout_s))
if self.socket_fd not in events:
return
message, source = self.socket.recvfrom(self.beacon_message_size)
_logger.debug("Broadcast message received: %r", message)
service_name, service_address, ttl_s = _unpack(message)
service = _Service(service_name, service_address, ttl_s)
self._services_found[service_name] = service
def broadcast_one_advert(self):
if self._services_to_advertise:
next_service = self._services_to_advertise[0]
if next_service.advertise_at < time.time():
_logger.debug("%s due to advertise at %s", next_service.name, time.ctime(next_service.advertise_at))
message = _pack([next_service.name, next_service.address, next_service.ttl_s])
for broadcast_address in self.broadcast_addresses:
_logger.debug("Advertising on %s", broadcast_address)
self.socket.sendto(message, 0, (broadcast_address, self.beacon_port))
next_service.advertise_at = time.time() + self.time_between_broadcasts_s
self._services_to_advertise.rotate(-1)
def remove_expired_adverts(self):
for name, service in list(self._services_found.items()):
#
# A service with an empty expiry time never expired
#
if service.expires_at is None:
continue
if service.expires_at <= time.time():
_logger.warn("Removing advert for %s which expired at %s",
name, time.ctime(service.expires_at))
del self._services_found[name]
def process_command(self):
if not self._command:
return
else:
_logger.debug("process_command: %s", self._command.action)
command = self._command
_logger.debug("Picked %s, %s, %s", self._command.action, self._command.params, self._command.started_at)
function = getattr(self, "do_" + command.action.lower(), None)
if not function:
raise NotImplementedError("%s is not a valid action")
else:
try:
result = function(command.started_at, *command.params)
except:
_logger.exception("Problem calling %s with %s", command.action, command.params)
result = None
_logger.debug("result = %s", result)
#
# result will be Continue if the action cannot be completed
# (eg a discovery) but its time is not yet expired. Leave
# the command on the stack for now.
#
if result is Continue:
return
#
# If we get a result, add the result to the response
# queue and pop the request off the stack.
#
self._command.response = result
def poll_command_reponse(self):
"""If the latest request has a response, issue it as a
reply to the RPC socket.
"""
if self._command.response is not Empty:
_logger.debug("Sending response %s", self._command.response)
self.rpc.send(_pack(self._command.response))
self._command = None
def run(self):
_logger.info("Starting discovery")
while not self._stop_event.wait(0):
try:
#
# If we're not already processing one, check for an command
# to advertise/discover from a local process.
#
if not self._command:
self.poll_command_request()
#
# If we're paused no adverts will be broadcast. Adverts
# will be received and stale ones expired
#
if not self._is_paused:
#
# Broadcast the first advert whose advertising schedule
# has arrived
#
self.broadcast_one_advert()
#
# See if an advert broadcast has arrived
#
self.listen_for_one_advert()
#
# See if any adverts have expired
#
self.remove_expired_adverts()
#
# If we're processing a command, see if it's complete
#
if self._command:
self.process_command()
self.poll_command_reponse()
except:
_logger.exception("Problem in beacon thread")
break
_logger.info("Ending discovery")
self.rpc.close()
self.socket.close()
|
tjguk/networkzero | networkzero/discovery.py | _Beacon.poll_command_reponse | python | def poll_command_reponse(self):
if self._command.response is not Empty:
_logger.debug("Sending response %s", self._command.response)
self.rpc.send(_pack(self._command.response))
self._command = None | If the latest request has a response, issue it as a
reply to the RPC socket. | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/discovery.py#L373-L380 | [
"def _pack(message):\n return json.dumps(message).encode(config.ENCODING)\n"
] | class _Beacon(threading.Thread):
"""Threaded beacon to: listen for adverts & broadcast adverts
"""
rpc_port = 9998
beacon_port = 9999
finder_timeout_s = 0.05
beacon_message_size = 256
time_between_broadcasts_s = config.BEACON_ADVERT_FREQUENCY_S
def __init__(self, beacon_port=None):
threading.Thread.__init__(self)
self.setDaemon(True)
self._stop_event = threading.Event()
self._is_paused = False
self.beacon_port = beacon_port or self.__class__.beacon_port
_logger.debug("Using beacon port %s", self.beacon_port)
#
# Services we're advertising
#
self._services_to_advertise = collections.deque()
#
# Broadcast adverts which we've received (some of which will be our own)
#
self._services_found = {}
#
# _Command requests are collected on one queue
# _Command responses are added to another
#
self._command = None
#
# Set the socket up to broadcast datagrams over UDP
#
self.broadcast_addresses = set(core._find_ip4_broadcast_addresses())
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.socket.bind(("", self.beacon_port))
#
# Add the raw UDP socket to a ZeroMQ socket poller so we can check whether
# it's received anything as part of the beacon's main event loop.
#
self.socket_fd = self.socket.fileno()
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.rpc = sockets.context.socket(zmq.REP)
#
# To avoid problems when restarting a beacon not long after it's been
# closed, force the socket to shut down regardless about 1 second after
# it's been closed.
#
self.rpc.linger = 1000
_bind_with_timeout(self.rpc.bind, ("tcp://127.0.0.1:%s" % self.rpc_port,))
def stop(self):
_logger.debug("About to stop")
self._stop_event.set()
#
# Commands available via RPC are methods whose name starts with "do_"
#
def do_advertise(self, started_at, name, address, fail_if_exists, ttl_s):
_logger.debug("Advertise %s on %s %s TTL=%s", name, address, fail_if_exists, ttl_s)
canonical_address = core.address(address)
for service in self._services_to_advertise:
if service.name == name:
if fail_if_exists:
_logger.error("_Service %s already exists on %s", name, service.address)
return None
else:
_logger.warn("Superseding service %s which already exists on %s", name, service.address)
service = _Service(name, canonical_address, ttl_s)
self._services_to_advertise.append(service)
#
# As a shortcut, automatically "discover" any services we ourselves are advertising
#
self._services_found[name] = service
return canonical_address
def do_unadvertise(self, started_at, name):
_logger.debug("Unadvertise %s", name)
for service in self._services_to_advertise:
if service.name == name:
self._services_to_advertise.remove(service)
break
else:
_logger.warn("No advert found for %s", name)
_logger.debug("Services now: %s", self._services_to_advertise)
def do_pause(self, started_at):
_logger.debug("Pause")
self._is_paused = True
def do_resume(self, started_at):
_logger.debug("Resume")
self._is_paused = False
def do_discover(self, started_at, name, wait_for_s):
_logger.debug("Discover %s waiting for %s seconds", name, wait_for_s)
discovered = self._services_found.get(name)
#
# If we've got a match, return it. Otherwise:
# * If we're due to wait for ever, continue
# * If we're out of time return None
# * Otherwise we've still got time left: continue
#
if discovered:
return discovered.address
if timed_out(started_at, wait_for_s):
return None
else:
return Continue
def do_discover_all(self, started_at):
_logger.debug("Discover all")
return [(service.name, service.address) for service in self._services_found.values()]
def do_reset(self, started_at):
_logger.debug("Reset")
self.do_pause(started_at)
self._services_found.clear()
self._services_to_advertise.clear()
self.do_resume(started_at)
def do_stop(self, started_at):
_logger.debug("Stop")
self.stop()
def listen_for_one_advert(self):
events = dict(self.poller.poll(1000 * self.finder_timeout_s))
if self.socket_fd not in events:
return
message, source = self.socket.recvfrom(self.beacon_message_size)
_logger.debug("Broadcast message received: %r", message)
service_name, service_address, ttl_s = _unpack(message)
service = _Service(service_name, service_address, ttl_s)
self._services_found[service_name] = service
def broadcast_one_advert(self):
if self._services_to_advertise:
next_service = self._services_to_advertise[0]
if next_service.advertise_at < time.time():
_logger.debug("%s due to advertise at %s", next_service.name, time.ctime(next_service.advertise_at))
message = _pack([next_service.name, next_service.address, next_service.ttl_s])
for broadcast_address in self.broadcast_addresses:
_logger.debug("Advertising on %s", broadcast_address)
self.socket.sendto(message, 0, (broadcast_address, self.beacon_port))
next_service.advertise_at = time.time() + self.time_between_broadcasts_s
self._services_to_advertise.rotate(-1)
def remove_expired_adverts(self):
for name, service in list(self._services_found.items()):
#
# A service with an empty expiry time never expired
#
if service.expires_at is None:
continue
if service.expires_at <= time.time():
_logger.warn("Removing advert for %s which expired at %s",
name, time.ctime(service.expires_at))
del self._services_found[name]
def poll_command_request(self):
"""If the command RPC socket has an incoming request,
separate it into its action and its params and put it
on the command request queue.
"""
try:
message = self.rpc.recv(zmq.NOBLOCK)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return
else:
raise
_logger.debug("Received command %s", message)
segments = _unpack(message)
action, params = segments[0], segments[1:]
_logger.debug("Adding %s, %s to the request queue", action, params)
self._command = _Command(action, params)
def process_command(self):
if not self._command:
return
else:
_logger.debug("process_command: %s", self._command.action)
command = self._command
_logger.debug("Picked %s, %s, %s", self._command.action, self._command.params, self._command.started_at)
function = getattr(self, "do_" + command.action.lower(), None)
if not function:
raise NotImplementedError("%s is not a valid action")
else:
try:
result = function(command.started_at, *command.params)
except:
_logger.exception("Problem calling %s with %s", command.action, command.params)
result = None
_logger.debug("result = %s", result)
#
# result will be Continue if the action cannot be completed
# (eg a discovery) but its time is not yet expired. Leave
# the command on the stack for now.
#
if result is Continue:
return
#
# If we get a result, add the result to the response
# queue and pop the request off the stack.
#
self._command.response = result
def run(self):
_logger.info("Starting discovery")
while not self._stop_event.wait(0):
try:
#
# If we're not already processing one, check for an command
# to advertise/discover from a local process.
#
if not self._command:
self.poll_command_request()
#
# If we're paused no adverts will be broadcast. Adverts
# will be received and stale ones expired
#
if not self._is_paused:
#
# Broadcast the first advert whose advertising schedule
# has arrived
#
self.broadcast_one_advert()
#
# See if an advert broadcast has arrived
#
self.listen_for_one_advert()
#
# See if any adverts have expired
#
self.remove_expired_adverts()
#
# If we're processing a command, see if it's complete
#
if self._command:
self.process_command()
self.poll_command_reponse()
except:
_logger.exception("Problem in beacon thread")
break
_logger.info("Ending discovery")
self.rpc.close()
self.socket.close()
|
tjguk/networkzero | networkzero/messenger.py | send_message_to | python | def send_message_to(address, message=EMPTY, wait_for_reply_s=config.FOREVER):
_logger.info("Sending message %s to %s", message, address)
if isinstance(address, list):
raise core.InvalidAddressError("Multiple addresses are not allowed")
return sockets._sockets.send_message_to(address, message, wait_for_reply_s) | Send a message and return the reply
:param address: a nw0 address (eg from `nw0.discover`)
:param message: any simple Python object, including text & tuples
:param wait_for_reply_s: how many seconds to wait for a reply [default: forever]
:returns: the reply returned from the address or None if out of time | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/messenger.py#L11-L23 | [
"def send_message_to(self, address, message, wait_for_reply_s):\n socket = self.get_socket(address, \"speaker\")\n serialised_message = _serialise(message)\n socket.send(serialised_message)\n return _unserialise(self._receive_with_timeout(socket, wait_for_reply_s))\n"
] | # -*- coding: utf-8 -*-
import uuid
from . import config
from . import core
from . import sockets
_logger = core.get_logger(__name__)
EMPTY = None
def send_message_to(address, message=EMPTY, wait_for_reply_s=config.FOREVER):
"""Send a message and return the reply
:param address: a nw0 address (eg from `nw0.discover`)
:param message: any simple Python object, including text & tuples
:param wait_for_reply_s: how many seconds to wait for a reply [default: forever]
:returns: the reply returned from the address or None if out of time
"""
_logger.info("Sending message %s to %s", message, address)
if isinstance(address, list):
raise core.InvalidAddressError("Multiple addresses are not allowed")
return sockets._sockets.send_message_to(address, message, wait_for_reply_s)
def wait_for_message_from(address, wait_for_s=config.FOREVER, autoreply=False):
"""Wait for a message
:param address: a nw0 address (eg from `nw0.advertise`)
:param wait_for_s: how many seconds to wait for a message before giving up [default: forever]
:param autoreply: whether to send an empty reply [default: No]
:returns: the message received from another address or None if out of time
"""
_logger.info("Waiting for message on %s for %s secs", address, wait_for_s)
message = sockets._sockets.wait_for_message_from(address, wait_for_s)
if message is not None and autoreply:
sockets._sockets.send_reply_to(address, EMPTY)
return message
def send_reply_to(address, reply=EMPTY):
"""Reply to a message previously received
:param address: a nw0 address (eg from `nw0.advertise`)
:param reply: any simple Python object, including text & tuples
"""
_logger.debug("Sending reply %s to %s", reply, address)
return sockets._sockets.send_reply_to(address, reply)
def send_news_to(address, topic, data=None):
"""Publish news to all subscribers
:param address: a nw0 address, eg from `nw0.advertise`
:param topic: any text object
:param data: any simple Python object including test & tuples [default: empty]
"""
_logger.info("Publish topic %s with data %s to %s", topic, data, address)
return sockets._sockets.send_news_to(address, topic, data)
def wait_for_news_from(address, prefix=config.EVERYTHING, wait_for_s=config.FOREVER, is_raw=False):
"""Wait for news whose topic starts with `prefix`.
:param address: a nw0 address, eg from `nw0.discover`
:param prefix: any text object [default: all messages]
:param wait_for_s: how many seconds to wait before giving up [default: forever]
:returns: a 2-tuple of (topic, data) or (None, None) if out of time
"""
_logger.info("Listen on %s for news matching %s waiting for %s secs", address, prefix, wait_for_s)
return sockets._sockets.wait_for_news_from(address, prefix, wait_for_s, is_raw)
|
tjguk/networkzero | networkzero/messenger.py | wait_for_message_from | python | def wait_for_message_from(address, wait_for_s=config.FOREVER, autoreply=False):
_logger.info("Waiting for message on %s for %s secs", address, wait_for_s)
message = sockets._sockets.wait_for_message_from(address, wait_for_s)
if message is not None and autoreply:
sockets._sockets.send_reply_to(address, EMPTY)
return message | Wait for a message
:param address: a nw0 address (eg from `nw0.advertise`)
:param wait_for_s: how many seconds to wait for a message before giving up [default: forever]
:param autoreply: whether to send an empty reply [default: No]
:returns: the message received from another address or None if out of time | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/messenger.py#L25-L38 | [
"def wait_for_message_from(self, address, wait_for_s):\n socket = self.get_socket(address, \"listener\")\n try:\n message = self._receive_with_timeout(socket, wait_for_s)\n except (core.SocketTimedOutError):\n return None\n else:\n return _unserialise(message)\n",
"def send_reply_... | # -*- coding: utf-8 -*-
import uuid
from . import config
from . import core
from . import sockets
_logger = core.get_logger(__name__)
EMPTY = None
def send_message_to(address, message=EMPTY, wait_for_reply_s=config.FOREVER):
"""Send a message and return the reply
:param address: a nw0 address (eg from `nw0.discover`)
:param message: any simple Python object, including text & tuples
:param wait_for_reply_s: how many seconds to wait for a reply [default: forever]
:returns: the reply returned from the address or None if out of time
"""
_logger.info("Sending message %s to %s", message, address)
if isinstance(address, list):
raise core.InvalidAddressError("Multiple addresses are not allowed")
return sockets._sockets.send_message_to(address, message, wait_for_reply_s)
def wait_for_message_from(address, wait_for_s=config.FOREVER, autoreply=False):
"""Wait for a message
:param address: a nw0 address (eg from `nw0.advertise`)
:param wait_for_s: how many seconds to wait for a message before giving up [default: forever]
:param autoreply: whether to send an empty reply [default: No]
:returns: the message received from another address or None if out of time
"""
_logger.info("Waiting for message on %s for %s secs", address, wait_for_s)
message = sockets._sockets.wait_for_message_from(address, wait_for_s)
if message is not None and autoreply:
sockets._sockets.send_reply_to(address, EMPTY)
return message
def send_reply_to(address, reply=EMPTY):
"""Reply to a message previously received
:param address: a nw0 address (eg from `nw0.advertise`)
:param reply: any simple Python object, including text & tuples
"""
_logger.debug("Sending reply %s to %s", reply, address)
return sockets._sockets.send_reply_to(address, reply)
def send_news_to(address, topic, data=None):
"""Publish news to all subscribers
:param address: a nw0 address, eg from `nw0.advertise`
:param topic: any text object
:param data: any simple Python object including test & tuples [default: empty]
"""
_logger.info("Publish topic %s with data %s to %s", topic, data, address)
return sockets._sockets.send_news_to(address, topic, data)
def wait_for_news_from(address, prefix=config.EVERYTHING, wait_for_s=config.FOREVER, is_raw=False):
"""Wait for news whose topic starts with `prefix`.
:param address: a nw0 address, eg from `nw0.discover`
:param prefix: any text object [default: all messages]
:param wait_for_s: how many seconds to wait before giving up [default: forever]
:returns: a 2-tuple of (topic, data) or (None, None) if out of time
"""
_logger.info("Listen on %s for news matching %s waiting for %s secs", address, prefix, wait_for_s)
return sockets._sockets.wait_for_news_from(address, prefix, wait_for_s, is_raw)
|
tjguk/networkzero | networkzero/messenger.py | send_reply_to | python | def send_reply_to(address, reply=EMPTY):
_logger.debug("Sending reply %s to %s", reply, address)
return sockets._sockets.send_reply_to(address, reply) | Reply to a message previously received
:param address: a nw0 address (eg from `nw0.advertise`)
:param reply: any simple Python object, including text & tuples | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/messenger.py#L40-L47 | [
"def send_reply_to(self, address, reply):\n socket = self.get_socket(address, \"listener\")\n reply = _serialise(reply)\n return socket.send(reply)\n"
] | # -*- coding: utf-8 -*-
import uuid
from . import config
from . import core
from . import sockets
_logger = core.get_logger(__name__)
EMPTY = None
def send_message_to(address, message=EMPTY, wait_for_reply_s=config.FOREVER):
"""Send a message and return the reply
:param address: a nw0 address (eg from `nw0.discover`)
:param message: any simple Python object, including text & tuples
:param wait_for_reply_s: how many seconds to wait for a reply [default: forever]
:returns: the reply returned from the address or None if out of time
"""
_logger.info("Sending message %s to %s", message, address)
if isinstance(address, list):
raise core.InvalidAddressError("Multiple addresses are not allowed")
return sockets._sockets.send_message_to(address, message, wait_for_reply_s)
def wait_for_message_from(address, wait_for_s=config.FOREVER, autoreply=False):
"""Wait for a message
:param address: a nw0 address (eg from `nw0.advertise`)
:param wait_for_s: how many seconds to wait for a message before giving up [default: forever]
:param autoreply: whether to send an empty reply [default: No]
:returns: the message received from another address or None if out of time
"""
_logger.info("Waiting for message on %s for %s secs", address, wait_for_s)
message = sockets._sockets.wait_for_message_from(address, wait_for_s)
if message is not None and autoreply:
sockets._sockets.send_reply_to(address, EMPTY)
return message
def send_reply_to(address, reply=EMPTY):
"""Reply to a message previously received
:param address: a nw0 address (eg from `nw0.advertise`)
:param reply: any simple Python object, including text & tuples
"""
_logger.debug("Sending reply %s to %s", reply, address)
return sockets._sockets.send_reply_to(address, reply)
def send_news_to(address, topic, data=None):
"""Publish news to all subscribers
:param address: a nw0 address, eg from `nw0.advertise`
:param topic: any text object
:param data: any simple Python object including test & tuples [default: empty]
"""
_logger.info("Publish topic %s with data %s to %s", topic, data, address)
return sockets._sockets.send_news_to(address, topic, data)
def wait_for_news_from(address, prefix=config.EVERYTHING, wait_for_s=config.FOREVER, is_raw=False):
"""Wait for news whose topic starts with `prefix`.
:param address: a nw0 address, eg from `nw0.discover`
:param prefix: any text object [default: all messages]
:param wait_for_s: how many seconds to wait before giving up [default: forever]
:returns: a 2-tuple of (topic, data) or (None, None) if out of time
"""
_logger.info("Listen on %s for news matching %s waiting for %s secs", address, prefix, wait_for_s)
return sockets._sockets.wait_for_news_from(address, prefix, wait_for_s, is_raw)
|
tjguk/networkzero | networkzero/messenger.py | send_news_to | python | def send_news_to(address, topic, data=None):
_logger.info("Publish topic %s with data %s to %s", topic, data, address)
return sockets._sockets.send_news_to(address, topic, data) | Publish news to all subscribers
:param address: a nw0 address, eg from `nw0.advertise`
:param topic: any text object
:param data: any simple Python object including test & tuples [default: empty] | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/messenger.py#L49-L57 | [
"def send_news_to(self, address, topic, data):\n socket = self.get_socket(address, \"publisher\")\n return socket.send_multipart(_serialise_for_pubsub(topic, data))\n"
] | # -*- coding: utf-8 -*-
import uuid
from . import config
from . import core
from . import sockets
_logger = core.get_logger(__name__)
EMPTY = None
def send_message_to(address, message=EMPTY, wait_for_reply_s=config.FOREVER):
"""Send a message and return the reply
:param address: a nw0 address (eg from `nw0.discover`)
:param message: any simple Python object, including text & tuples
:param wait_for_reply_s: how many seconds to wait for a reply [default: forever]
:returns: the reply returned from the address or None if out of time
"""
_logger.info("Sending message %s to %s", message, address)
if isinstance(address, list):
raise core.InvalidAddressError("Multiple addresses are not allowed")
return sockets._sockets.send_message_to(address, message, wait_for_reply_s)
def wait_for_message_from(address, wait_for_s=config.FOREVER, autoreply=False):
"""Wait for a message
:param address: a nw0 address (eg from `nw0.advertise`)
:param wait_for_s: how many seconds to wait for a message before giving up [default: forever]
:param autoreply: whether to send an empty reply [default: No]
:returns: the message received from another address or None if out of time
"""
_logger.info("Waiting for message on %s for %s secs", address, wait_for_s)
message = sockets._sockets.wait_for_message_from(address, wait_for_s)
if message is not None and autoreply:
sockets._sockets.send_reply_to(address, EMPTY)
return message
def send_reply_to(address, reply=EMPTY):
"""Reply to a message previously received
:param address: a nw0 address (eg from `nw0.advertise`)
:param reply: any simple Python object, including text & tuples
"""
_logger.debug("Sending reply %s to %s", reply, address)
return sockets._sockets.send_reply_to(address, reply)
def send_news_to(address, topic, data=None):
"""Publish news to all subscribers
:param address: a nw0 address, eg from `nw0.advertise`
:param topic: any text object
:param data: any simple Python object including test & tuples [default: empty]
"""
_logger.info("Publish topic %s with data %s to %s", topic, data, address)
return sockets._sockets.send_news_to(address, topic, data)
def wait_for_news_from(address, prefix=config.EVERYTHING, wait_for_s=config.FOREVER, is_raw=False):
"""Wait for news whose topic starts with `prefix`.
:param address: a nw0 address, eg from `nw0.discover`
:param prefix: any text object [default: all messages]
:param wait_for_s: how many seconds to wait before giving up [default: forever]
:returns: a 2-tuple of (topic, data) or (None, None) if out of time
"""
_logger.info("Listen on %s for news matching %s waiting for %s secs", address, prefix, wait_for_s)
return sockets._sockets.wait_for_news_from(address, prefix, wait_for_s, is_raw)
|
tjguk/networkzero | networkzero/messenger.py | wait_for_news_from | python | def wait_for_news_from(address, prefix=config.EVERYTHING, wait_for_s=config.FOREVER, is_raw=False):
_logger.info("Listen on %s for news matching %s waiting for %s secs", address, prefix, wait_for_s)
return sockets._sockets.wait_for_news_from(address, prefix, wait_for_s, is_raw) | Wait for news whose topic starts with `prefix`.
:param address: a nw0 address, eg from `nw0.discover`
:param prefix: any text object [default: all messages]
:param wait_for_s: how many seconds to wait before giving up [default: forever]
:returns: a 2-tuple of (topic, data) or (None, None) if out of time | train | https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/messenger.py#L59-L69 | [
"def wait_for_news_from(self, address, topic, wait_for_s, is_raw=False):\n if isinstance(address, list):\n addresses = address\n else:\n addresses = [address]\n socket = self.get_socket(addresses, \"subscriber\")\n if isinstance(topic, str):\n topics = [topic]\n else:\n to... | # -*- coding: utf-8 -*-
import uuid
from . import config
from . import core
from . import sockets
_logger = core.get_logger(__name__)
EMPTY = None
def send_message_to(address, message=EMPTY, wait_for_reply_s=config.FOREVER):
"""Send a message and return the reply
:param address: a nw0 address (eg from `nw0.discover`)
:param message: any simple Python object, including text & tuples
:param wait_for_reply_s: how many seconds to wait for a reply [default: forever]
:returns: the reply returned from the address or None if out of time
"""
_logger.info("Sending message %s to %s", message, address)
if isinstance(address, list):
raise core.InvalidAddressError("Multiple addresses are not allowed")
return sockets._sockets.send_message_to(address, message, wait_for_reply_s)
def wait_for_message_from(address, wait_for_s=config.FOREVER, autoreply=False):
"""Wait for a message
:param address: a nw0 address (eg from `nw0.advertise`)
:param wait_for_s: how many seconds to wait for a message before giving up [default: forever]
:param autoreply: whether to send an empty reply [default: No]
:returns: the message received from another address or None if out of time
"""
_logger.info("Waiting for message on %s for %s secs", address, wait_for_s)
message = sockets._sockets.wait_for_message_from(address, wait_for_s)
if message is not None and autoreply:
sockets._sockets.send_reply_to(address, EMPTY)
return message
def send_reply_to(address, reply=EMPTY):
"""Reply to a message previously received
:param address: a nw0 address (eg from `nw0.advertise`)
:param reply: any simple Python object, including text & tuples
"""
_logger.debug("Sending reply %s to %s", reply, address)
return sockets._sockets.send_reply_to(address, reply)
def send_news_to(address, topic, data=None):
"""Publish news to all subscribers
:param address: a nw0 address, eg from `nw0.advertise`
:param topic: any text object
:param data: any simple Python object including test & tuples [default: empty]
"""
_logger.info("Publish topic %s with data %s to %s", topic, data, address)
return sockets._sockets.send_news_to(address, topic, data)
def wait_for_news_from(address, prefix=config.EVERYTHING, wait_for_s=config.FOREVER, is_raw=False):
"""Wait for news whose topic starts with `prefix`.
:param address: a nw0 address, eg from `nw0.discover`
:param prefix: any text object [default: all messages]
:param wait_for_s: how many seconds to wait before giving up [default: forever]
:returns: a 2-tuple of (topic, data) or (None, None) if out of time
"""
_logger.info("Listen on %s for news matching %s waiting for %s secs", address, prefix, wait_for_s)
return sockets._sockets.wait_for_news_from(address, prefix, wait_for_s, is_raw)
|
agile4you/bottle-neck | bottle_neck/routing.py | Route.wrap_callable | python | def wrap_callable(cls, uri, methods, callable_obj):
if isinstance(callable_obj, HandlerMeta):
callable_obj.base_endpoint = uri
callable_obj.is_valid = True
return callable_obj
if isinstance(callable_obj, types.FunctionType):
return cls(uri=uri, methods=methods, callable_obj=callable_obj)
raise RouteError("Invalid handler type.") | Wraps function-based callable_obj into a `Route` instance, else
proxies a `bottle_neck.handlers.BaseHandler` subclass instance.
Args:
uri (str): The uri relative path.
methods (tuple): A tuple of valid method strings.
callable_obj (instance): The callable object.
Returns:
A route instance.
Raises:
RouteError for invalid callable object type. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/routing.py#L38-L61 | null | class Route(object):
"""Base Route interface.
It wraps function-based web handlers in order to provide a
same interface for `bottle_neck.routing.Router` class functionality.
"""
__slots__ = ('uri', 'methods', 'callable_obj')
@classmethod
def __init__(self, uri, methods, callable_obj):
self.uri = uri,
self.methods = methods,
self.callable_obj = callable_obj
@property
def is_valid(self):
args = [self.uri, self.methods, self.callable_obj]
return all([arg for arg in args])
def register_app(self, app):
"""Register the route object to a `bottle.Bottle` app instance.
Args:
app (instance):
Returns:
Route instance (for chaining purposes)
"""
app.route(self.uri, methods=self.methods)(self.callable_obj)
return self
|
agile4you/bottle-neck | bottle_neck/routing.py | Route.register_app | python | def register_app(self, app):
app.route(self.uri, methods=self.methods)(self.callable_obj)
return self | Register the route object to a `bottle.Bottle` app instance.
Args:
app (instance):
Returns:
Route instance (for chaining purposes) | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/routing.py#L73-L84 | null | class Route(object):
"""Base Route interface.
It wraps function-based web handlers in order to provide a
same interface for `bottle_neck.routing.Router` class functionality.
"""
__slots__ = ('uri', 'methods', 'callable_obj')
@classmethod
def wrap_callable(cls, uri, methods, callable_obj):
"""Wraps function-based callable_obj into a `Route` instance, else
proxies a `bottle_neck.handlers.BaseHandler` subclass instance.
Args:
uri (str): The uri relative path.
methods (tuple): A tuple of valid method strings.
callable_obj (instance): The callable object.
Returns:
A route instance.
Raises:
RouteError for invalid callable object type.
"""
if isinstance(callable_obj, HandlerMeta):
callable_obj.base_endpoint = uri
callable_obj.is_valid = True
return callable_obj
if isinstance(callable_obj, types.FunctionType):
return cls(uri=uri, methods=methods, callable_obj=callable_obj)
raise RouteError("Invalid handler type.")
def __init__(self, uri, methods, callable_obj):
self.uri = uri,
self.methods = methods,
self.callable_obj = callable_obj
@property
def is_valid(self):
args = [self.uri, self.methods, self.callable_obj]
return all([arg for arg in args])
|
agile4you/bottle-neck | bottle_neck/routing.py | Router.register_handler | python | def register_handler(self, callable_obj, entrypoint, methods=('GET',)):
router_obj = Route.wrap_callable(
uri=entrypoint,
methods=methods,
callable_obj=callable_obj
)
if router_obj.is_valid:
self._routes.add(router_obj)
return self
raise RouteError( # pragma: no cover
"Missing params: methods: {} - entrypoint: {}".format(
methods, entrypoint
)
) | Register a handler callable to a specific route.
Args:
entrypoint (str): The uri relative path.
methods (tuple): A tuple of valid method strings.
callable_obj (callable): The callable object.
Returns:
The Router instance (for chaining purposes).
Raises:
RouteError, for missing routing params or invalid callable
object type. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/routing.py#L98-L128 | [
"def wrap_callable(cls, uri, methods, callable_obj):\n \"\"\"Wraps function-based callable_obj into a `Route` instance, else\n proxies a `bottle_neck.handlers.BaseHandler` subclass instance.\n\n Args:\n uri (str): The uri relative path.\n methods (tuple): A tuple of valid method strings.\n ... | class Router(object):
"""Base Router class for bottle.py WSGI applications.
"""
def __init__(self):
self._routes = set()
@property
def routes(self): # pragma: no cover
return self._routes
def register_handler(self, callable_obj, entrypoint, methods=('GET',)):
"""Register a handler callable to a specific route.
Args:
entrypoint (str): The uri relative path.
methods (tuple): A tuple of valid method strings.
callable_obj (callable): The callable object.
Returns:
The Router instance (for chaining purposes).
Raises:
RouteError, for missing routing params or invalid callable
object type.
"""
router_obj = Route.wrap_callable(
uri=entrypoint,
methods=methods,
callable_obj=callable_obj
)
if router_obj.is_valid:
self._routes.add(router_obj)
return self
raise RouteError( # pragma: no cover
"Missing params: methods: {} - entrypoint: {}".format(
methods, entrypoint
)
)
def mount(self, app=None):
"""Mounts all registered routes to a bottle.py application instance.
Args:
app (instance): A `bottle.Bottle()` application instance.
Returns:
The Router instance (for chaining purposes).
"""
for endpoint in self._routes:
endpoint.register_app(app)
return self
def __repr__(self): # pragma: no cover
return 'Router object: total {} routes'.format(len(self))
def __len__(self):
return len(self._routes)
def __iter__(self): # pragma: no cover
for route in self._routes:
yield route
|
agile4you/bottle-neck | bottle_neck/routing.py | Router.mount | python | def mount(self, app=None):
for endpoint in self._routes:
endpoint.register_app(app)
return self | Mounts all registered routes to a bottle.py application instance.
Args:
app (instance): A `bottle.Bottle()` application instance.
Returns:
The Router instance (for chaining purposes). | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/routing.py#L130-L142 | null | class Router(object):
"""Base Router class for bottle.py WSGI applications.
"""
def __init__(self):
self._routes = set()
@property
def routes(self): # pragma: no cover
return self._routes
def register_handler(self, callable_obj, entrypoint, methods=('GET',)):
"""Register a handler callable to a specific route.
Args:
entrypoint (str): The uri relative path.
methods (tuple): A tuple of valid method strings.
callable_obj (callable): The callable object.
Returns:
The Router instance (for chaining purposes).
Raises:
RouteError, for missing routing params or invalid callable
object type.
"""
router_obj = Route.wrap_callable(
uri=entrypoint,
methods=methods,
callable_obj=callable_obj
)
if router_obj.is_valid:
self._routes.add(router_obj)
return self
raise RouteError( # pragma: no cover
"Missing params: methods: {} - entrypoint: {}".format(
methods, entrypoint
)
)
def __repr__(self): # pragma: no cover
return 'Router object: total {} routes'.format(len(self))
def __len__(self):
return len(self._routes)
def __iter__(self): # pragma: no cover
for route in self._routes:
yield route
|
agile4you/bottle-neck | bottle_neck/response.py | WSResponse.from_status | python | def from_status(cls, status_line, msg=None):
method = getattr(cls, status_line.lower()[4:].replace(' ', '_'))
return method(msg) | Returns a class method from bottle.HTTPError.status_line attribute.
Useful for patching `bottle.HTTPError` for web services.
Args:
status_line (str): bottle.HTTPError.status_line text.
msg: The message data for response.
Returns:
Class method based on status_line arg.
Examples:
>>> status_line = '401 Unauthorized'
>>> error_msg = 'Get out!'
>>> resp = WSResponse.from_status(status_line, error_msg)
>>> resp['errors']
['Get out!']
>>> resp['status_text']
'Unauthorized' | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L111-L132 | null | class WSResponse(object):
"""Base web service response class.
WSResponse class provides a unified API for HTTP responses.
The Response body skeleton looks like this::
{
"status_code": 200,
"status_txt": "OK",
"response_data": {
"id": 65234
"username": "pav"
"email": "pav@geotagaeroview.com"
},
"errors": []
}
Attributes:
status_code (int): Response HTTP status code.
data (object): Response key/value data.
errors (dict): Response key/value errors.
Examples:
>>> response = WSResponse(
... status_code=200,
... data='Hi'
... )
...
>>> response.to_json
OrderedDict([('status_code', 200), ('status_text', 'OK'), ('data', 'Hi'), ('errors', [])])
"""
__slots__ = ['status_code', 'data', 'errors']
expose_status = True
response = bottle.response
def __init__(self, status_code=200, data=None, errors=None):
if status_code not in dict(HTTP_CODES) or\
not isinstance(errors, six.string_types + (list, tuple, type(None),)):
raise WSResponseError('Invalid Response initialization.')
self.status_code = status_code
self.data = data or {}
if isinstance(errors, (six.string_types, )):
errors = [errors]
self.errors = errors
def __repr__(self): # pragma: no cover
return "WebService Response: status={}, data={}".format(
self.status_code, str(self.data)
)
def __eq__(self, other): # pragma: no cover
assert isinstance(other, WSResponse), 'Invalid Type for eq operator.'
return self.status_code == other.status_code and \
self.data == self.data
__str__ = __repr__
@classmethod
@classmethod
def ok(cls, data):
"""Shortcut API for HTTP 200 `OK` response.
Args:
data (object): Response key/value data.
Returns
WSResponse Instance.
"""
return cls(
status_code=200,
data=data
).to_json
@classmethod
def created(cls, data=None):
"""Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json
@classmethod
def not_modified(cls, errors=None):
"""Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json
@classmethod
def bad_request(cls, errors=None):
"""Shortcut API for HTTP 400 `Bad Request` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '400 Bad Request'
return cls(400, errors=errors).to_json
@classmethod
def unauthorized(cls, errors=None):
"""Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json
@classmethod
def forbidden(cls, errors=None):
"""Shortcut API for HTTP 403 `Forbidden` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '403 Forbidden'
return cls(403, errors=errors).to_json
@classmethod
def not_found(cls, errors=None):
"""Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json
@classmethod
def method_not_allowed(cls, errors=None):
"""Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json
@classmethod
def not_implemented(cls, errors=None):
"""Shortcut API for HTTP 501 `Not Implemented` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '501 Not Implemented'
return cls(501, None, errors).to_json
@classmethod
def service_unavailable(cls, errors=None):
"""Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json
@property
def to_json(self):
"""Short cut for JSON response service data.
Returns:
Dict that implements JSON interface.
"""
web_resp = collections.OrderedDict()
web_resp['status_code'] = self.status_code
web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)
web_resp['data'] = self.data if self.data is not None else {}
web_resp['errors'] = self.errors or []
return web_resp
|
agile4you/bottle-neck | bottle_neck/response.py | WSResponse.created | python | def created(cls, data=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json | Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L150-L163 | null | class WSResponse(object):
"""Base web service response class.
WSResponse class provides a unified API for HTTP responses.
The Response body skeleton looks like this::
{
"status_code": 200,
"status_txt": "OK",
"response_data": {
"id": 65234
"username": "pav"
"email": "pav@geotagaeroview.com"
},
"errors": []
}
Attributes:
status_code (int): Response HTTP status code.
data (object): Response key/value data.
errors (dict): Response key/value errors.
Examples:
>>> response = WSResponse(
... status_code=200,
... data='Hi'
... )
...
>>> response.to_json
OrderedDict([('status_code', 200), ('status_text', 'OK'), ('data', 'Hi'), ('errors', [])])
"""
__slots__ = ['status_code', 'data', 'errors']
expose_status = True
response = bottle.response
def __init__(self, status_code=200, data=None, errors=None):
if status_code not in dict(HTTP_CODES) or\
not isinstance(errors, six.string_types + (list, tuple, type(None),)):
raise WSResponseError('Invalid Response initialization.')
self.status_code = status_code
self.data = data or {}
if isinstance(errors, (six.string_types, )):
errors = [errors]
self.errors = errors
def __repr__(self): # pragma: no cover
return "WebService Response: status={}, data={}".format(
self.status_code, str(self.data)
)
def __eq__(self, other): # pragma: no cover
assert isinstance(other, WSResponse), 'Invalid Type for eq operator.'
return self.status_code == other.status_code and \
self.data == self.data
__str__ = __repr__
@classmethod
def from_status(cls, status_line, msg=None):
"""Returns a class method from bottle.HTTPError.status_line attribute.
Useful for patching `bottle.HTTPError` for web services.
Args:
status_line (str): bottle.HTTPError.status_line text.
msg: The message data for response.
Returns:
Class method based on status_line arg.
Examples:
>>> status_line = '401 Unauthorized'
>>> error_msg = 'Get out!'
>>> resp = WSResponse.from_status(status_line, error_msg)
>>> resp['errors']
['Get out!']
>>> resp['status_text']
'Unauthorized'
"""
method = getattr(cls, status_line.lower()[4:].replace(' ', '_'))
return method(msg)
@classmethod
def ok(cls, data):
"""Shortcut API for HTTP 200 `OK` response.
Args:
data (object): Response key/value data.
Returns
WSResponse Instance.
"""
return cls(
status_code=200,
data=data
).to_json
@classmethod
@classmethod
def not_modified(cls, errors=None):
"""Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json
@classmethod
def bad_request(cls, errors=None):
"""Shortcut API for HTTP 400 `Bad Request` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '400 Bad Request'
return cls(400, errors=errors).to_json
@classmethod
def unauthorized(cls, errors=None):
"""Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json
@classmethod
def forbidden(cls, errors=None):
"""Shortcut API for HTTP 403 `Forbidden` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '403 Forbidden'
return cls(403, errors=errors).to_json
@classmethod
def not_found(cls, errors=None):
"""Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json
@classmethod
def method_not_allowed(cls, errors=None):
"""Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json
@classmethod
def not_implemented(cls, errors=None):
"""Shortcut API for HTTP 501 `Not Implemented` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '501 Not Implemented'
return cls(501, None, errors).to_json
@classmethod
def service_unavailable(cls, errors=None):
"""Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json
@property
def to_json(self):
"""Short cut for JSON response service data.
Returns:
Dict that implements JSON interface.
"""
web_resp = collections.OrderedDict()
web_resp['status_code'] = self.status_code
web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)
web_resp['data'] = self.data if self.data is not None else {}
web_resp['errors'] = self.errors or []
return web_resp
|
agile4you/bottle-neck | bottle_neck/response.py | WSResponse.not_modified | python | def not_modified(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json | Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L166-L179 | null | class WSResponse(object):
"""Base web service response class.
WSResponse class provides a unified API for HTTP responses.
The Response body skeleton looks like this::
{
"status_code": 200,
"status_txt": "OK",
"response_data": {
"id": 65234
"username": "pav"
"email": "pav@geotagaeroview.com"
},
"errors": []
}
Attributes:
status_code (int): Response HTTP status code.
data (object): Response key/value data.
errors (dict): Response key/value errors.
Examples:
>>> response = WSResponse(
... status_code=200,
... data='Hi'
... )
...
>>> response.to_json
OrderedDict([('status_code', 200), ('status_text', 'OK'), ('data', 'Hi'), ('errors', [])])
"""
__slots__ = ['status_code', 'data', 'errors']
expose_status = True
response = bottle.response
def __init__(self, status_code=200, data=None, errors=None):
if status_code not in dict(HTTP_CODES) or\
not isinstance(errors, six.string_types + (list, tuple, type(None),)):
raise WSResponseError('Invalid Response initialization.')
self.status_code = status_code
self.data = data or {}
if isinstance(errors, (six.string_types, )):
errors = [errors]
self.errors = errors
def __repr__(self): # pragma: no cover
return "WebService Response: status={}, data={}".format(
self.status_code, str(self.data)
)
def __eq__(self, other): # pragma: no cover
assert isinstance(other, WSResponse), 'Invalid Type for eq operator.'
return self.status_code == other.status_code and \
self.data == self.data
__str__ = __repr__
@classmethod
def from_status(cls, status_line, msg=None):
"""Returns a class method from bottle.HTTPError.status_line attribute.
Useful for patching `bottle.HTTPError` for web services.
Args:
status_line (str): bottle.HTTPError.status_line text.
msg: The message data for response.
Returns:
Class method based on status_line arg.
Examples:
>>> status_line = '401 Unauthorized'
>>> error_msg = 'Get out!'
>>> resp = WSResponse.from_status(status_line, error_msg)
>>> resp['errors']
['Get out!']
>>> resp['status_text']
'Unauthorized'
"""
method = getattr(cls, status_line.lower()[4:].replace(' ', '_'))
return method(msg)
@classmethod
def ok(cls, data):
"""Shortcut API for HTTP 200 `OK` response.
Args:
data (object): Response key/value data.
Returns
WSResponse Instance.
"""
return cls(
status_code=200,
data=data
).to_json
@classmethod
def created(cls, data=None):
"""Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json
@classmethod
@classmethod
def bad_request(cls, errors=None):
"""Shortcut API for HTTP 400 `Bad Request` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '400 Bad Request'
return cls(400, errors=errors).to_json
@classmethod
def unauthorized(cls, errors=None):
"""Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json
@classmethod
def forbidden(cls, errors=None):
"""Shortcut API for HTTP 403 `Forbidden` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '403 Forbidden'
return cls(403, errors=errors).to_json
@classmethod
def not_found(cls, errors=None):
"""Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json
@classmethod
def method_not_allowed(cls, errors=None):
"""Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json
@classmethod
def not_implemented(cls, errors=None):
"""Shortcut API for HTTP 501 `Not Implemented` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '501 Not Implemented'
return cls(501, None, errors).to_json
@classmethod
def service_unavailable(cls, errors=None):
"""Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json
@property
def to_json(self):
"""Short cut for JSON response service data.
Returns:
Dict that implements JSON interface.
"""
web_resp = collections.OrderedDict()
web_resp['status_code'] = self.status_code
web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)
web_resp['data'] = self.data if self.data is not None else {}
web_resp['errors'] = self.errors or []
return web_resp
|
agile4you/bottle-neck | bottle_neck/response.py | WSResponse.bad_request | python | def bad_request(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '400 Bad Request'
return cls(400, errors=errors).to_json | Shortcut API for HTTP 400 `Bad Request` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L182-L195 | null | class WSResponse(object):
"""Base web service response class.
WSResponse class provides a unified API for HTTP responses.
The Response body skeleton looks like this::
{
"status_code": 200,
"status_txt": "OK",
"response_data": {
"id": 65234
"username": "pav"
"email": "pav@geotagaeroview.com"
},
"errors": []
}
Attributes:
status_code (int): Response HTTP status code.
data (object): Response key/value data.
errors (dict): Response key/value errors.
Examples:
>>> response = WSResponse(
... status_code=200,
... data='Hi'
... )
...
>>> response.to_json
OrderedDict([('status_code', 200), ('status_text', 'OK'), ('data', 'Hi'), ('errors', [])])
"""
__slots__ = ['status_code', 'data', 'errors']
expose_status = True
response = bottle.response
def __init__(self, status_code=200, data=None, errors=None):
if status_code not in dict(HTTP_CODES) or\
not isinstance(errors, six.string_types + (list, tuple, type(None),)):
raise WSResponseError('Invalid Response initialization.')
self.status_code = status_code
self.data = data or {}
if isinstance(errors, (six.string_types, )):
errors = [errors]
self.errors = errors
def __repr__(self): # pragma: no cover
return "WebService Response: status={}, data={}".format(
self.status_code, str(self.data)
)
def __eq__(self, other): # pragma: no cover
assert isinstance(other, WSResponse), 'Invalid Type for eq operator.'
return self.status_code == other.status_code and \
self.data == self.data
__str__ = __repr__
@classmethod
def from_status(cls, status_line, msg=None):
"""Returns a class method from bottle.HTTPError.status_line attribute.
Useful for patching `bottle.HTTPError` for web services.
Args:
status_line (str): bottle.HTTPError.status_line text.
msg: The message data for response.
Returns:
Class method based on status_line arg.
Examples:
>>> status_line = '401 Unauthorized'
>>> error_msg = 'Get out!'
>>> resp = WSResponse.from_status(status_line, error_msg)
>>> resp['errors']
['Get out!']
>>> resp['status_text']
'Unauthorized'
"""
method = getattr(cls, status_line.lower()[4:].replace(' ', '_'))
return method(msg)
@classmethod
def ok(cls, data):
"""Shortcut API for HTTP 200 `OK` response.
Args:
data (object): Response key/value data.
Returns
WSResponse Instance.
"""
return cls(
status_code=200,
data=data
).to_json
@classmethod
def created(cls, data=None):
"""Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json
@classmethod
def not_modified(cls, errors=None):
"""Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json
@classmethod
@classmethod
def unauthorized(cls, errors=None):
"""Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json
@classmethod
def forbidden(cls, errors=None):
"""Shortcut API for HTTP 403 `Forbidden` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '403 Forbidden'
return cls(403, errors=errors).to_json
@classmethod
def not_found(cls, errors=None):
"""Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json
@classmethod
def method_not_allowed(cls, errors=None):
"""Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json
@classmethod
def not_implemented(cls, errors=None):
"""Shortcut API for HTTP 501 `Not Implemented` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '501 Not Implemented'
return cls(501, None, errors).to_json
@classmethod
def service_unavailable(cls, errors=None):
"""Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json
@property
def to_json(self):
"""Short cut for JSON response service data.
Returns:
Dict that implements JSON interface.
"""
web_resp = collections.OrderedDict()
web_resp['status_code'] = self.status_code
web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)
web_resp['data'] = self.data if self.data is not None else {}
web_resp['errors'] = self.errors or []
return web_resp
|
agile4you/bottle-neck | bottle_neck/response.py | WSResponse.unauthorized | python | def unauthorized(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json | Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L198-L211 | null | class WSResponse(object):
"""Base web service response class.
WSResponse class provides a unified API for HTTP responses.
The Response body skeleton looks like this::
{
"status_code": 200,
"status_txt": "OK",
"response_data": {
"id": 65234
"username": "pav"
"email": "pav@geotagaeroview.com"
},
"errors": []
}
Attributes:
status_code (int): Response HTTP status code.
data (object): Response key/value data.
errors (dict): Response key/value errors.
Examples:
>>> response = WSResponse(
... status_code=200,
... data='Hi'
... )
...
>>> response.to_json
OrderedDict([('status_code', 200), ('status_text', 'OK'), ('data', 'Hi'), ('errors', [])])
"""
__slots__ = ['status_code', 'data', 'errors']
expose_status = True
response = bottle.response
def __init__(self, status_code=200, data=None, errors=None):
if status_code not in dict(HTTP_CODES) or\
not isinstance(errors, six.string_types + (list, tuple, type(None),)):
raise WSResponseError('Invalid Response initialization.')
self.status_code = status_code
self.data = data or {}
if isinstance(errors, (six.string_types, )):
errors = [errors]
self.errors = errors
def __repr__(self): # pragma: no cover
return "WebService Response: status={}, data={}".format(
self.status_code, str(self.data)
)
def __eq__(self, other): # pragma: no cover
assert isinstance(other, WSResponse), 'Invalid Type for eq operator.'
return self.status_code == other.status_code and \
self.data == self.data
__str__ = __repr__
@classmethod
def from_status(cls, status_line, msg=None):
"""Returns a class method from bottle.HTTPError.status_line attribute.
Useful for patching `bottle.HTTPError` for web services.
Args:
status_line (str): bottle.HTTPError.status_line text.
msg: The message data for response.
Returns:
Class method based on status_line arg.
Examples:
>>> status_line = '401 Unauthorized'
>>> error_msg = 'Get out!'
>>> resp = WSResponse.from_status(status_line, error_msg)
>>> resp['errors']
['Get out!']
>>> resp['status_text']
'Unauthorized'
"""
method = getattr(cls, status_line.lower()[4:].replace(' ', '_'))
return method(msg)
@classmethod
def ok(cls, data):
"""Shortcut API for HTTP 200 `OK` response.
Args:
data (object): Response key/value data.
Returns
WSResponse Instance.
"""
return cls(
status_code=200,
data=data
).to_json
@classmethod
def created(cls, data=None):
"""Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json
@classmethod
def not_modified(cls, errors=None):
"""Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json
@classmethod
def bad_request(cls, errors=None):
"""Shortcut API for HTTP 400 `Bad Request` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '400 Bad Request'
return cls(400, errors=errors).to_json
@classmethod
@classmethod
def forbidden(cls, errors=None):
"""Shortcut API for HTTP 403 `Forbidden` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '403 Forbidden'
return cls(403, errors=errors).to_json
@classmethod
def not_found(cls, errors=None):
"""Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json
@classmethod
def method_not_allowed(cls, errors=None):
"""Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json
@classmethod
def not_implemented(cls, errors=None):
"""Shortcut API for HTTP 501 `Not Implemented` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '501 Not Implemented'
return cls(501, None, errors).to_json
@classmethod
def service_unavailable(cls, errors=None):
"""Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json
@property
def to_json(self):
"""Short cut for JSON response service data.
Returns:
Dict that implements JSON interface.
"""
web_resp = collections.OrderedDict()
web_resp['status_code'] = self.status_code
web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)
web_resp['data'] = self.data if self.data is not None else {}
web_resp['errors'] = self.errors or []
return web_resp
|
agile4you/bottle-neck | bottle_neck/response.py | WSResponse.forbidden | python | def forbidden(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '403 Forbidden'
return cls(403, errors=errors).to_json | Shortcut API for HTTP 403 `Forbidden` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L214-L227 | null | class WSResponse(object):
"""Base web service response class.
WSResponse class provides a unified API for HTTP responses.
The Response body skeleton looks like this::
{
"status_code": 200,
"status_txt": "OK",
"response_data": {
"id": 65234
"username": "pav"
"email": "pav@geotagaeroview.com"
},
"errors": []
}
Attributes:
status_code (int): Response HTTP status code.
data (object): Response key/value data.
errors (dict): Response key/value errors.
Examples:
>>> response = WSResponse(
... status_code=200,
... data='Hi'
... )
...
>>> response.to_json
OrderedDict([('status_code', 200), ('status_text', 'OK'), ('data', 'Hi'), ('errors', [])])
"""
__slots__ = ['status_code', 'data', 'errors']
expose_status = True
response = bottle.response
def __init__(self, status_code=200, data=None, errors=None):
if status_code not in dict(HTTP_CODES) or\
not isinstance(errors, six.string_types + (list, tuple, type(None),)):
raise WSResponseError('Invalid Response initialization.')
self.status_code = status_code
self.data = data or {}
if isinstance(errors, (six.string_types, )):
errors = [errors]
self.errors = errors
def __repr__(self): # pragma: no cover
return "WebService Response: status={}, data={}".format(
self.status_code, str(self.data)
)
def __eq__(self, other): # pragma: no cover
assert isinstance(other, WSResponse), 'Invalid Type for eq operator.'
return self.status_code == other.status_code and \
self.data == self.data
__str__ = __repr__
@classmethod
def from_status(cls, status_line, msg=None):
"""Returns a class method from bottle.HTTPError.status_line attribute.
Useful for patching `bottle.HTTPError` for web services.
Args:
status_line (str): bottle.HTTPError.status_line text.
msg: The message data for response.
Returns:
Class method based on status_line arg.
Examples:
>>> status_line = '401 Unauthorized'
>>> error_msg = 'Get out!'
>>> resp = WSResponse.from_status(status_line, error_msg)
>>> resp['errors']
['Get out!']
>>> resp['status_text']
'Unauthorized'
"""
method = getattr(cls, status_line.lower()[4:].replace(' ', '_'))
return method(msg)
@classmethod
def ok(cls, data):
"""Shortcut API for HTTP 200 `OK` response.
Args:
data (object): Response key/value data.
Returns
WSResponse Instance.
"""
return cls(
status_code=200,
data=data
).to_json
@classmethod
def created(cls, data=None):
"""Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json
@classmethod
def not_modified(cls, errors=None):
"""Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json
@classmethod
def bad_request(cls, errors=None):
"""Shortcut API for HTTP 400 `Bad Request` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '400 Bad Request'
return cls(400, errors=errors).to_json
@classmethod
def unauthorized(cls, errors=None):
"""Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json
@classmethod
@classmethod
def not_found(cls, errors=None):
"""Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json
@classmethod
def method_not_allowed(cls, errors=None):
"""Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json
@classmethod
def not_implemented(cls, errors=None):
"""Shortcut API for HTTP 501 `Not Implemented` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '501 Not Implemented'
return cls(501, None, errors).to_json
@classmethod
def service_unavailable(cls, errors=None):
"""Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json
@property
def to_json(self):
"""Short cut for JSON response service data.
Returns:
Dict that implements JSON interface.
"""
web_resp = collections.OrderedDict()
web_resp['status_code'] = self.status_code
web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)
web_resp['data'] = self.data if self.data is not None else {}
web_resp['errors'] = self.errors or []
return web_resp
|
agile4you/bottle-neck | bottle_neck/response.py | WSResponse.not_found | python | def not_found(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json | Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L230-L243 | null | class WSResponse(object):
"""Base web service response class.
WSResponse class provides a unified API for HTTP responses.
The Response body skeleton looks like this::
{
"status_code": 200,
"status_txt": "OK",
"response_data": {
"id": 65234
"username": "pav"
"email": "pav@geotagaeroview.com"
},
"errors": []
}
Attributes:
status_code (int): Response HTTP status code.
data (object): Response key/value data.
errors (dict): Response key/value errors.
Examples:
>>> response = WSResponse(
... status_code=200,
... data='Hi'
... )
...
>>> response.to_json
OrderedDict([('status_code', 200), ('status_text', 'OK'), ('data', 'Hi'), ('errors', [])])
"""
__slots__ = ['status_code', 'data', 'errors']
expose_status = True
response = bottle.response
def __init__(self, status_code=200, data=None, errors=None):
if status_code not in dict(HTTP_CODES) or\
not isinstance(errors, six.string_types + (list, tuple, type(None),)):
raise WSResponseError('Invalid Response initialization.')
self.status_code = status_code
self.data = data or {}
if isinstance(errors, (six.string_types, )):
errors = [errors]
self.errors = errors
def __repr__(self): # pragma: no cover
return "WebService Response: status={}, data={}".format(
self.status_code, str(self.data)
)
def __eq__(self, other): # pragma: no cover
assert isinstance(other, WSResponse), 'Invalid Type for eq operator.'
return self.status_code == other.status_code and \
self.data == self.data
__str__ = __repr__
@classmethod
def from_status(cls, status_line, msg=None):
"""Returns a class method from bottle.HTTPError.status_line attribute.
Useful for patching `bottle.HTTPError` for web services.
Args:
status_line (str): bottle.HTTPError.status_line text.
msg: The message data for response.
Returns:
Class method based on status_line arg.
Examples:
>>> status_line = '401 Unauthorized'
>>> error_msg = 'Get out!'
>>> resp = WSResponse.from_status(status_line, error_msg)
>>> resp['errors']
['Get out!']
>>> resp['status_text']
'Unauthorized'
"""
method = getattr(cls, status_line.lower()[4:].replace(' ', '_'))
return method(msg)
@classmethod
def ok(cls, data):
"""Shortcut API for HTTP 200 `OK` response.
Args:
data (object): Response key/value data.
Returns
WSResponse Instance.
"""
return cls(
status_code=200,
data=data
).to_json
@classmethod
def created(cls, data=None):
"""Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json
@classmethod
def not_modified(cls, errors=None):
"""Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json
@classmethod
def bad_request(cls, errors=None):
"""Shortcut API for HTTP 400 `Bad Request` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '400 Bad Request'
return cls(400, errors=errors).to_json
@classmethod
def unauthorized(cls, errors=None):
"""Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json
@classmethod
def forbidden(cls, errors=None):
"""Shortcut API for HTTP 403 `Forbidden` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '403 Forbidden'
return cls(403, errors=errors).to_json
@classmethod
@classmethod
def method_not_allowed(cls, errors=None):
"""Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json
@classmethod
def not_implemented(cls, errors=None):
"""Shortcut API for HTTP 501 `Not Implemented` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '501 Not Implemented'
return cls(501, None, errors).to_json
@classmethod
def service_unavailable(cls, errors=None):
"""Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json
@property
def to_json(self):
"""Short cut for JSON response service data.
Returns:
Dict that implements JSON interface.
"""
web_resp = collections.OrderedDict()
web_resp['status_code'] = self.status_code
web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)
web_resp['data'] = self.data if self.data is not None else {}
web_resp['errors'] = self.errors or []
return web_resp
|
agile4you/bottle-neck | bottle_neck/response.py | WSResponse.method_not_allowed | python | def method_not_allowed(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json | Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L246-L259 | null | class WSResponse(object):
"""Base web service response class.
WSResponse class provides a unified API for HTTP responses.
The Response body skeleton looks like this::
{
"status_code": 200,
"status_txt": "OK",
"response_data": {
"id": 65234
"username": "pav"
"email": "pav@geotagaeroview.com"
},
"errors": []
}
Attributes:
status_code (int): Response HTTP status code.
data (object): Response key/value data.
errors (dict): Response key/value errors.
Examples:
>>> response = WSResponse(
... status_code=200,
... data='Hi'
... )
...
>>> response.to_json
OrderedDict([('status_code', 200), ('status_text', 'OK'), ('data', 'Hi'), ('errors', [])])
"""
__slots__ = ['status_code', 'data', 'errors']
expose_status = True
response = bottle.response
def __init__(self, status_code=200, data=None, errors=None):
if status_code not in dict(HTTP_CODES) or\
not isinstance(errors, six.string_types + (list, tuple, type(None),)):
raise WSResponseError('Invalid Response initialization.')
self.status_code = status_code
self.data = data or {}
if isinstance(errors, (six.string_types, )):
errors = [errors]
self.errors = errors
def __repr__(self): # pragma: no cover
return "WebService Response: status={}, data={}".format(
self.status_code, str(self.data)
)
def __eq__(self, other): # pragma: no cover
assert isinstance(other, WSResponse), 'Invalid Type for eq operator.'
return self.status_code == other.status_code and \
self.data == self.data
__str__ = __repr__
@classmethod
def from_status(cls, status_line, msg=None):
"""Returns a class method from bottle.HTTPError.status_line attribute.
Useful for patching `bottle.HTTPError` for web services.
Args:
status_line (str): bottle.HTTPError.status_line text.
msg: The message data for response.
Returns:
Class method based on status_line arg.
Examples:
>>> status_line = '401 Unauthorized'
>>> error_msg = 'Get out!'
>>> resp = WSResponse.from_status(status_line, error_msg)
>>> resp['errors']
['Get out!']
>>> resp['status_text']
'Unauthorized'
"""
method = getattr(cls, status_line.lower()[4:].replace(' ', '_'))
return method(msg)
@classmethod
def ok(cls, data):
"""Shortcut API for HTTP 200 `OK` response.
Args:
data (object): Response key/value data.
Returns
WSResponse Instance.
"""
return cls(
status_code=200,
data=data
).to_json
@classmethod
def created(cls, data=None):
"""Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json
@classmethod
def not_modified(cls, errors=None):
"""Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json
@classmethod
def bad_request(cls, errors=None):
"""Shortcut API for HTTP 400 `Bad Request` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '400 Bad Request'
return cls(400, errors=errors).to_json
@classmethod
def unauthorized(cls, errors=None):
"""Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json
@classmethod
def forbidden(cls, errors=None):
"""Shortcut API for HTTP 403 `Forbidden` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '403 Forbidden'
return cls(403, errors=errors).to_json
@classmethod
def not_found(cls, errors=None):
"""Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json
@classmethod
@classmethod
def not_implemented(cls, errors=None):
"""Shortcut API for HTTP 501 `Not Implemented` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '501 Not Implemented'
return cls(501, None, errors).to_json
@classmethod
def service_unavailable(cls, errors=None):
"""Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json
@property
def to_json(self):
"""Short cut for JSON response service data.
Returns:
Dict that implements JSON interface.
"""
web_resp = collections.OrderedDict()
web_resp['status_code'] = self.status_code
web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)
web_resp['data'] = self.data if self.data is not None else {}
web_resp['errors'] = self.errors or []
return web_resp
|
agile4you/bottle-neck | bottle_neck/response.py | WSResponse.not_implemented | python | def not_implemented(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '501 Not Implemented'
return cls(501, None, errors).to_json | Shortcut API for HTTP 501 `Not Implemented` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L262-L275 | null | class WSResponse(object):
"""Base web service response class.
WSResponse class provides a unified API for HTTP responses.
The Response body skeleton looks like this::
{
"status_code": 200,
"status_txt": "OK",
"response_data": {
"id": 65234
"username": "pav"
"email": "pav@geotagaeroview.com"
},
"errors": []
}
Attributes:
status_code (int): Response HTTP status code.
data (object): Response key/value data.
errors (dict): Response key/value errors.
Examples:
>>> response = WSResponse(
... status_code=200,
... data='Hi'
... )
...
>>> response.to_json
OrderedDict([('status_code', 200), ('status_text', 'OK'), ('data', 'Hi'), ('errors', [])])
"""
__slots__ = ['status_code', 'data', 'errors']
expose_status = True
response = bottle.response
def __init__(self, status_code=200, data=None, errors=None):
if status_code not in dict(HTTP_CODES) or\
not isinstance(errors, six.string_types + (list, tuple, type(None),)):
raise WSResponseError('Invalid Response initialization.')
self.status_code = status_code
self.data = data or {}
if isinstance(errors, (six.string_types, )):
errors = [errors]
self.errors = errors
def __repr__(self): # pragma: no cover
return "WebService Response: status={}, data={}".format(
self.status_code, str(self.data)
)
def __eq__(self, other): # pragma: no cover
assert isinstance(other, WSResponse), 'Invalid Type for eq operator.'
return self.status_code == other.status_code and \
self.data == self.data
__str__ = __repr__
@classmethod
def from_status(cls, status_line, msg=None):
"""Returns a class method from bottle.HTTPError.status_line attribute.
Useful for patching `bottle.HTTPError` for web services.
Args:
status_line (str): bottle.HTTPError.status_line text.
msg: The message data for response.
Returns:
Class method based on status_line arg.
Examples:
>>> status_line = '401 Unauthorized'
>>> error_msg = 'Get out!'
>>> resp = WSResponse.from_status(status_line, error_msg)
>>> resp['errors']
['Get out!']
>>> resp['status_text']
'Unauthorized'
"""
method = getattr(cls, status_line.lower()[4:].replace(' ', '_'))
return method(msg)
@classmethod
def ok(cls, data):
"""Shortcut API for HTTP 200 `OK` response.
Args:
data (object): Response key/value data.
Returns
WSResponse Instance.
"""
return cls(
status_code=200,
data=data
).to_json
@classmethod
def created(cls, data=None):
"""Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json
@classmethod
def not_modified(cls, errors=None):
"""Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json
@classmethod
def bad_request(cls, errors=None):
"""Shortcut API for HTTP 400 `Bad Request` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '400 Bad Request'
return cls(400, errors=errors).to_json
@classmethod
def unauthorized(cls, errors=None):
"""Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json
@classmethod
def forbidden(cls, errors=None):
"""Shortcut API for HTTP 403 `Forbidden` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '403 Forbidden'
return cls(403, errors=errors).to_json
@classmethod
def not_found(cls, errors=None):
"""Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json
@classmethod
def method_not_allowed(cls, errors=None):
"""Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json
@classmethod
@classmethod
def service_unavailable(cls, errors=None):
"""Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json
@property
def to_json(self):
"""Short cut for JSON response service data.
Returns:
Dict that implements JSON interface.
"""
web_resp = collections.OrderedDict()
web_resp['status_code'] = self.status_code
web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)
web_resp['data'] = self.data if self.data is not None else {}
web_resp['errors'] = self.errors or []
return web_resp
|
agile4you/bottle-neck | bottle_neck/response.py | WSResponse.service_unavailable | python | def service_unavailable(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json | Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L278-L291 | null | class WSResponse(object):
"""Base web service response class.
WSResponse class provides a unified API for HTTP responses.
The Response body skeleton looks like this::
{
"status_code": 200,
"status_txt": "OK",
"response_data": {
"id": 65234
"username": "pav"
"email": "pav@geotagaeroview.com"
},
"errors": []
}
Attributes:
status_code (int): Response HTTP status code.
data (object): Response key/value data.
errors (dict): Response key/value errors.
Examples:
>>> response = WSResponse(
... status_code=200,
... data='Hi'
... )
...
>>> response.to_json
OrderedDict([('status_code', 200), ('status_text', 'OK'), ('data', 'Hi'), ('errors', [])])
"""
__slots__ = ['status_code', 'data', 'errors']
expose_status = True
response = bottle.response
def __init__(self, status_code=200, data=None, errors=None):
if status_code not in dict(HTTP_CODES) or\
not isinstance(errors, six.string_types + (list, tuple, type(None),)):
raise WSResponseError('Invalid Response initialization.')
self.status_code = status_code
self.data = data or {}
if isinstance(errors, (six.string_types, )):
errors = [errors]
self.errors = errors
def __repr__(self): # pragma: no cover
return "WebService Response: status={}, data={}".format(
self.status_code, str(self.data)
)
def __eq__(self, other): # pragma: no cover
assert isinstance(other, WSResponse), 'Invalid Type for eq operator.'
return self.status_code == other.status_code and \
self.data == self.data
__str__ = __repr__
@classmethod
def from_status(cls, status_line, msg=None):
"""Returns a class method from bottle.HTTPError.status_line attribute.
Useful for patching `bottle.HTTPError` for web services.
Args:
status_line (str): bottle.HTTPError.status_line text.
msg: The message data for response.
Returns:
Class method based on status_line arg.
Examples:
>>> status_line = '401 Unauthorized'
>>> error_msg = 'Get out!'
>>> resp = WSResponse.from_status(status_line, error_msg)
>>> resp['errors']
['Get out!']
>>> resp['status_text']
'Unauthorized'
"""
method = getattr(cls, status_line.lower()[4:].replace(' ', '_'))
return method(msg)
@classmethod
def ok(cls, data):
"""Shortcut API for HTTP 200 `OK` response.
Args:
data (object): Response key/value data.
Returns
WSResponse Instance.
"""
return cls(
status_code=200,
data=data
).to_json
@classmethod
def created(cls, data=None):
"""Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json
@classmethod
def not_modified(cls, errors=None):
"""Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json
@classmethod
def bad_request(cls, errors=None):
"""Shortcut API for HTTP 400 `Bad Request` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '400 Bad Request'
return cls(400, errors=errors).to_json
@classmethod
def unauthorized(cls, errors=None):
"""Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json
@classmethod
def forbidden(cls, errors=None):
"""Shortcut API for HTTP 403 `Forbidden` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '403 Forbidden'
return cls(403, errors=errors).to_json
@classmethod
def not_found(cls, errors=None):
"""Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json
@classmethod
def method_not_allowed(cls, errors=None):
"""Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json
@classmethod
def not_implemented(cls, errors=None):
"""Shortcut API for HTTP 501 `Not Implemented` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '501 Not Implemented'
return cls(501, None, errors).to_json
@classmethod
@property
def to_json(self):
"""Short cut for JSON response service data.
Returns:
Dict that implements JSON interface.
"""
web_resp = collections.OrderedDict()
web_resp['status_code'] = self.status_code
web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)
web_resp['data'] = self.data if self.data is not None else {}
web_resp['errors'] = self.errors or []
return web_resp
|
agile4you/bottle-neck | bottle_neck/response.py | WSResponse.to_json | python | def to_json(self):
web_resp = collections.OrderedDict()
web_resp['status_code'] = self.status_code
web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)
web_resp['data'] = self.data if self.data is not None else {}
web_resp['errors'] = self.errors or []
return web_resp | Short cut for JSON response service data.
Returns:
Dict that implements JSON interface. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L294-L308 | null | class WSResponse(object):
"""Base web service response class.
WSResponse class provides a unified API for HTTP responses.
The Response body skeleton looks like this::
{
"status_code": 200,
"status_txt": "OK",
"response_data": {
"id": 65234
"username": "pav"
"email": "pav@geotagaeroview.com"
},
"errors": []
}
Attributes:
status_code (int): Response HTTP status code.
data (object): Response key/value data.
errors (dict): Response key/value errors.
Examples:
>>> response = WSResponse(
... status_code=200,
... data='Hi'
... )
...
>>> response.to_json
OrderedDict([('status_code', 200), ('status_text', 'OK'), ('data', 'Hi'), ('errors', [])])
"""
__slots__ = ['status_code', 'data', 'errors']
expose_status = True
response = bottle.response
def __init__(self, status_code=200, data=None, errors=None):
if status_code not in dict(HTTP_CODES) or\
not isinstance(errors, six.string_types + (list, tuple, type(None),)):
raise WSResponseError('Invalid Response initialization.')
self.status_code = status_code
self.data = data or {}
if isinstance(errors, (six.string_types, )):
errors = [errors]
self.errors = errors
def __repr__(self): # pragma: no cover
return "WebService Response: status={}, data={}".format(
self.status_code, str(self.data)
)
def __eq__(self, other): # pragma: no cover
assert isinstance(other, WSResponse), 'Invalid Type for eq operator.'
return self.status_code == other.status_code and \
self.data == self.data
__str__ = __repr__
@classmethod
def from_status(cls, status_line, msg=None):
"""Returns a class method from bottle.HTTPError.status_line attribute.
Useful for patching `bottle.HTTPError` for web services.
Args:
status_line (str): bottle.HTTPError.status_line text.
msg: The message data for response.
Returns:
Class method based on status_line arg.
Examples:
>>> status_line = '401 Unauthorized'
>>> error_msg = 'Get out!'
>>> resp = WSResponse.from_status(status_line, error_msg)
>>> resp['errors']
['Get out!']
>>> resp['status_text']
'Unauthorized'
"""
method = getattr(cls, status_line.lower()[4:].replace(' ', '_'))
return method(msg)
@classmethod
def ok(cls, data):
"""Shortcut API for HTTP 200 `OK` response.
Args:
data (object): Response key/value data.
Returns
WSResponse Instance.
"""
return cls(
status_code=200,
data=data
).to_json
@classmethod
def created(cls, data=None):
"""Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json
@classmethod
def not_modified(cls, errors=None):
"""Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json
@classmethod
def bad_request(cls, errors=None):
"""Shortcut API for HTTP 400 `Bad Request` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '400 Bad Request'
return cls(400, errors=errors).to_json
@classmethod
def unauthorized(cls, errors=None):
"""Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json
@classmethod
def forbidden(cls, errors=None):
"""Shortcut API for HTTP 403 `Forbidden` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '403 Forbidden'
return cls(403, errors=errors).to_json
@classmethod
def not_found(cls, errors=None):
"""Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json
@classmethod
def method_not_allowed(cls, errors=None):
"""Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json
@classmethod
def not_implemented(cls, errors=None):
"""Shortcut API for HTTP 501 `Not Implemented` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '501 Not Implemented'
return cls(501, None, errors).to_json
@classmethod
def service_unavailable(cls, errors=None):
"""Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json
@property
|
agile4you/bottle-neck | bottle_neck/webapi.py | paginator | python | def paginator(limit, offset, record_count, base_uri, page_nav_tpl='&limit={}&offset={}'):
total_pages = int(math.ceil(record_count / limit))
next_cond = limit + offset <= record_count
prev_cond = offset >= limit
next_page = base_uri + page_nav_tpl.format(limit, offset + limit) if next_cond else None
prev_page = base_uri + page_nav_tpl.format(limit, offset - limit) if prev_cond else None
return OrderedDict([
('total_count', record_count),
('total_pages', total_pages),
('next_page', next_page),
('prev_page', prev_page)
]) | Compute pagination info for collection filtering.
Args:
limit (int): Collection filter limit.
offset (int): Collection filter offset.
record_count (int): Collection filter total record count.
base_uri (str): Collection filter base uri (without limit, offset)
page_nav_tpl (str): Pagination template.
Returns:
A mapping of pagination info. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/webapi.py#L36-L64 | null | # -*- coding: utf-8 -*-
"""Bottle.py API development utilities.
Provides some extra functionality for developing web API's with `bottle.py`.
"""
from __future__ import absolute_import
__author__ = 'pav'
__date__ = '2015-2-3'
__all__ = ['cors_enable_hook', 'strip_path_hook', 'paginator']
from bottle_neck import __version__
from collections import OrderedDict
import bottle
import math
version = tuple(map(int, __version__.split('.')))
def cors_enable_hook():
bottle.response.headers['Access-Control-Allow-Origin'] = '*'
bottle.response.headers['Access-Control-Allow-Headers'] = \
'Authorization, Credentials, X-Requested-With, Content-Type'
bottle.response.headers['Access-Control-Allow-Methods'] = \
'GET, PUT, POST, OPTIONS, DELETE'
def strip_path_hook():
"""Ignore trailing slashes.
"""
bottle.request.environ['PATH_INFO'] = \
bottle.request.environ['PATH_INFO'].rstrip('/')
|
agile4you/bottle-neck | bottle_neck/cbv.py | cached_classproperty | python | def cached_classproperty(fun):
@functools.wraps(fun)
def get(cls):
try:
return cls.__cache[fun]
except AttributeError:
cls.__cache = {}
except KeyError: # pragma: no cover
pass
ret = cls.__cache[fun] = fun(cls)
return ret
return classproperty(get) | A memorization decorator for class properties.
It implements the above `classproperty` decorator, with
the difference that the function result is computed and attached
to class as direct attribute. (Lazy loading and caching.) | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/cbv.py#L92-L109 | [
"def classproperty(func):\n \"\"\"classproperty decorator.\n Using this decorator a class can have a property. Necessary for properties\n that don't need instance initialization. Works exactly the same as a\n normal property.\n\n Examples:\n >>> class MyClass(object):\n ... @classpr... | # -*- coding: utf-8 -*-
"""CBV for bottle.py application instances.
Provides a base class for creating class-based web handlers for bottle.py
application instances with application routing mechanism.
"""
from __future__ import absolute_import
__author__ = "Papavassiliou Vassilis"
__date__ = "2015-11-29"
__all__ = ['BaseHandler', 'HandlerMeta', 'route_method', 'plugin_method',
'HandlerError', 'HandlerHTTPMethodError', 'HandlerPluginError',
'BaseHandlerPlugin']
from bottle_neck import __version__
import bottle
import functools
import inspect
import six
import re
version = tuple(map(int, __version__.split('.')))
DEFAULT_ROUTES = ("get", "put", "post", "delete", "patch", "options")
CORS_ROUTES = ("put", "post", "delete", "patch")
PLUGIN_SCOPE = (
(False, 'plugins'),
(True, 'global_plugins')
)
class HandlerError(Exception):
"""Base module Exception class.
"""
pass
class HandlerHTTPMethodError(HandlerError):
"""Raises for invalid HTTP method declaration.
"""
pass
class HandlerPluginError(HandlerError):
"""Raises when a handler plugin error occurs.
"""
pass
class ClassPropertyDescriptor(object):
"""ClassProperty Descriptor class.
Straight up stolen from stack overflow Implements class level property
non-data descriptor.
"""
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, cls=None):
if cls is None: # pragma: no cover
cls = type(obj)
return self.fget.__get__(obj, cls)()
def classproperty(func):
"""classproperty decorator.
Using this decorator a class can have a property. Necessary for properties
that don't need instance initialization. Works exactly the same as a
normal property.
Examples:
>>> class MyClass(object):
... @classproperty
... def my_prop(self):
... return self.__name__ + ' class'
...
>>> MyClass.my_prop
'MyClass class'
"""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
def plugin_method(*plugin_names):
"""Plugin Method decorator.
Signs a web handler function with the plugins to be applied as attributes.
Args:
plugin_names (list): A list of plugin callable names
Returns:
A wrapped handler callable.
Examples:
>>> @plugin_method('json', 'bill')
... def method():
... return "Hello!"
...
>>> print method.json
True
>>> print method.bill
True
"""
def wrapper(callable_obj):
for plugin_name in plugin_names:
if not hasattr(callable_obj, plugin_name):
setattr(callable_obj, plugin_name, True)
return callable_obj
return wrapper
def route_method(method_name, extra_part=False):
"""Custom handler routing decorator.
Signs a web handler callable with the http method as attribute.
Args:
method_name (str): HTTP method name (i.e GET, POST)
extra_part (bool): Indicates if wrapped callable name should be a part
of the actual endpoint.
Returns:
A wrapped handler callable.
examples:
>>> @route_method('GET')
... def method():
... return "Hello!"
...
>>> method.http_method
'GET'
>>> method.url_extra_part
None
"""
def wrapper(callable_obj):
if method_name.lower() not in DEFAULT_ROUTES:
raise HandlerHTTPMethodError(
'Invalid http method in method: {}'.format(method_name)
)
callable_obj.http_method = method_name.upper()
callable_obj.url_extra_part = callable_obj.__name__ if extra_part\
else None
return classmethod(callable_obj)
return wrapper
class BaseHandlerPlugin(object):
"""
"""
def __init__(self, callable_object, *args, **kwargs): # pragma: no cover
self._wrapped = callable_object
self._args = args
self._kwargs = kwargs
self.__doc__ = callable_object.__doc__
@cached_classproperty
def func_name(self):
cls_name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__name__)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', cls_name).lower()
def apply(self, *args, **kwargs): # pragma: no cover
raise NotImplementedError("Must Override `apply` method")
def __call__(self, *args, **kwargs): # pragma: no cover
return self.apply(*args, **kwargs)
class HandlerMeta(type):
"""BaseHandler metaclass
Provides meta functionality for ensuring that `http_method` don't require
an instance and test and that derived classes implements `Singleton`.
"""
_instances = {}
def __new__(mcs, name, bases, attrs):
http_methods = [attr for attr in attrs if
attr.lower() in DEFAULT_ROUTES]
for handler in http_methods:
attrs[handler] = classmethod(attrs[handler])
return super(HandlerMeta, mcs).__new__(mcs, name, bases, attrs)
def __call__(cls, *args):
"""Enforce Singleton class initiation
"""
if cls not in cls._instances:
cls._instances[cls] = super(
HandlerMeta, cls
).__call__(*args)
return cls._instances[cls]
@six.add_metaclass(HandlerMeta)
class BaseHandler(object):
"""Base Handler class for implementing Class-Based Web handler for bottle.
Subclass `BaseHandler` in order to its API for application routing, and
implement any of the known http method:
Class Attributes:
render (object): bottle.jinja2_render.
request (object): bottle.request
response (object): bottle.response
redirect (object):bottle.redirect
abort (object): bottle.abort
base_endpoint (str): The handler endpoint prefix.
cors_enabled (bool): Indicates if CORS is enabled.
plugins (dict): A key/value mapping of available plugins.
global_plugins (dict): A key/value mapping default applying plugins.
"""
render = bottle.jinja2_template
request = bottle.request
response = bottle.response
redirect = bottle.redirect
abort = bottle.abort
response_factory = None
base_endpoint = '/'
cors_enabled = True
plugins = dict()
global_plugins = dict()
@classmethod
def add_plugin(cls, plugin_callables, global_scope=False):
"""Register a new plugin in handler.
Args:
plugin_callables (list): A list of plugin callables.
global_scope (bool): Indicates The scope of the plugin.
Returns:
Class instance.
"""
repo = getattr(cls, dict(PLUGIN_SCOPE)[global_scope])
for plugin_callable in plugin_callables:
if hasattr(plugin_callable, 'func_name'):
repo[plugin_callable.func_name] = plugin_callable
else: # pragma: no cover
repo[plugin_callable.__name__] = plugin_callable
return cls
@classmethod
def register_app(cls, application):
"""Register class view in bottle application.
Args:
application (instance): A bottle.Bottle() instance.
Returns:
Class instance.
"""
if cls is BaseHandler:
raise HandlerError("Cant register a `BaseHandler` class instance")
routes = cls._get_http_members()
router = getattr(application, "route")
for func_name, func_callable in routes:
method_args = inspect.signature(func_callable).parameters
if hasattr(func_callable, 'http_method'):
http_method = func_callable.http_method
url_extra_part = func_callable.url_extra_part
else:
http_method = func_name
url_extra_part = None
applied_plugins = [pl for pl in cls.plugins
if hasattr(func_callable, pl)]
for plugin in applied_plugins: # pragma: no cover
try:
func_callable = cls.plugins[plugin](func_callable)
except TypeError as error:
raise HandlerPluginError(error.args)
for global_plugin in cls.global_plugins:
func_callable = cls.global_plugins[global_plugin](
func_callable
)
for entrypoint in cls._build_routes(method_args, url_extra_part):
router(entrypoint, method=[http_method.upper()])(func_callable)
if cls.cors_enabled:
cls_desc = cls.__doc__ or ''
options_data = {
"handler": {"name": cls.__name__, "desc": cls_desc.strip()},
"http_methods": [r[0] for r in routes]
}
router(
cls.base_endpoint,
method=["OPTIONS"]
)(lambda: options_data)
router(
"{}/<url:re:.+>".format(cls.base_endpoint).replace("//", "/"),
method=["OPTIONS"]
)(lambda url: options_data)
return cls
@classmethod
def _build_routes(cls, method_args, url_extra_part=None):
"""Create bottle route for a handler http method."""
prefix = '/{}'.format(url_extra_part) if url_extra_part else ''
endpoint = cls.base_endpoint + prefix
if not method_args:
return [endpoint]
endpoints = []
for args_list in cls._router_helper(method_args):
prefix = '/:' if args_list else ''
endpoints.append((endpoint + prefix + '/:'.join(args_list)).replace("//", '/'))
return endpoints
@classmethod
def _router_helper(cls, method_args):
"""Detect default Nullable method arguments and return
multiple routes per callable.
"""
fixed_params = [param for param in method_args
if method_args[param].default is not None]
nullable_params = [param for param in method_args if param not in fixed_params]
combinations = [fixed_params]
combinations += [combinations[-1] + [param] for param in nullable_params]
return combinations
@classmethod
def _get_http_members(cls):
"""Filter all `http` specific method from handler class.
"""
return [member for member in
inspect.getmembers(cls, predicate=inspect.ismethod)
if cls._check_http_member(member)]
@staticmethod
def _check_http_member(member):
"""Checks if a class method has HTTP info.
"""
return member[0].lower() in DEFAULT_ROUTES or \
hasattr(member[1], 'http_method')
if __name__ == '__main__': # pragma: no cover
import doctest
doctest.testmod()
|
agile4you/bottle-neck | bottle_neck/cbv.py | plugin_method | python | def plugin_method(*plugin_names):
def wrapper(callable_obj):
for plugin_name in plugin_names:
if not hasattr(callable_obj, plugin_name):
setattr(callable_obj, plugin_name, True)
return callable_obj
return wrapper | Plugin Method decorator.
Signs a web handler function with the plugins to be applied as attributes.
Args:
plugin_names (list): A list of plugin callable names
Returns:
A wrapped handler callable.
Examples:
>>> @plugin_method('json', 'bill')
... def method():
... return "Hello!"
...
>>> print method.json
True
>>> print method.bill
True | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/cbv.py#L112-L138 | null | # -*- coding: utf-8 -*-
"""CBV for bottle.py application instances.
Provides a base class for creating class-based web handlers for bottle.py
application instances with application routing mechanism.
"""
from __future__ import absolute_import
__author__ = "Papavassiliou Vassilis"
__date__ = "2015-11-29"
__all__ = ['BaseHandler', 'HandlerMeta', 'route_method', 'plugin_method',
'HandlerError', 'HandlerHTTPMethodError', 'HandlerPluginError',
'BaseHandlerPlugin']
from bottle_neck import __version__
import bottle
import functools
import inspect
import six
import re
version = tuple(map(int, __version__.split('.')))
DEFAULT_ROUTES = ("get", "put", "post", "delete", "patch", "options")
CORS_ROUTES = ("put", "post", "delete", "patch")
PLUGIN_SCOPE = (
(False, 'plugins'),
(True, 'global_plugins')
)
class HandlerError(Exception):
"""Base module Exception class.
"""
pass
class HandlerHTTPMethodError(HandlerError):
"""Raises for invalid HTTP method declaration.
"""
pass
class HandlerPluginError(HandlerError):
"""Raises when a handler plugin error occurs.
"""
pass
class ClassPropertyDescriptor(object):
"""ClassProperty Descriptor class.
Straight up stolen from stack overflow Implements class level property
non-data descriptor.
"""
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, cls=None):
if cls is None: # pragma: no cover
cls = type(obj)
return self.fget.__get__(obj, cls)()
def classproperty(func):
"""classproperty decorator.
Using this decorator a class can have a property. Necessary for properties
that don't need instance initialization. Works exactly the same as a
normal property.
Examples:
>>> class MyClass(object):
... @classproperty
... def my_prop(self):
... return self.__name__ + ' class'
...
>>> MyClass.my_prop
'MyClass class'
"""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
def cached_classproperty(fun):
"""A memorization decorator for class properties.
It implements the above `classproperty` decorator, with
the difference that the function result is computed and attached
to class as direct attribute. (Lazy loading and caching.)
"""
@functools.wraps(fun)
def get(cls):
try:
return cls.__cache[fun]
except AttributeError:
cls.__cache = {}
except KeyError: # pragma: no cover
pass
ret = cls.__cache[fun] = fun(cls)
return ret
return classproperty(get)
def route_method(method_name, extra_part=False):
"""Custom handler routing decorator.
Signs a web handler callable with the http method as attribute.
Args:
method_name (str): HTTP method name (i.e GET, POST)
extra_part (bool): Indicates if wrapped callable name should be a part
of the actual endpoint.
Returns:
A wrapped handler callable.
examples:
>>> @route_method('GET')
... def method():
... return "Hello!"
...
>>> method.http_method
'GET'
>>> method.url_extra_part
None
"""
def wrapper(callable_obj):
if method_name.lower() not in DEFAULT_ROUTES:
raise HandlerHTTPMethodError(
'Invalid http method in method: {}'.format(method_name)
)
callable_obj.http_method = method_name.upper()
callable_obj.url_extra_part = callable_obj.__name__ if extra_part\
else None
return classmethod(callable_obj)
return wrapper
class BaseHandlerPlugin(object):
"""
"""
def __init__(self, callable_object, *args, **kwargs): # pragma: no cover
self._wrapped = callable_object
self._args = args
self._kwargs = kwargs
self.__doc__ = callable_object.__doc__
@cached_classproperty
def func_name(self):
cls_name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__name__)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', cls_name).lower()
def apply(self, *args, **kwargs): # pragma: no cover
raise NotImplementedError("Must Override `apply` method")
def __call__(self, *args, **kwargs): # pragma: no cover
return self.apply(*args, **kwargs)
class HandlerMeta(type):
"""BaseHandler metaclass
Provides meta functionality for ensuring that `http_method` don't require
an instance and test and that derived classes implements `Singleton`.
"""
_instances = {}
def __new__(mcs, name, bases, attrs):
http_methods = [attr for attr in attrs if
attr.lower() in DEFAULT_ROUTES]
for handler in http_methods:
attrs[handler] = classmethod(attrs[handler])
return super(HandlerMeta, mcs).__new__(mcs, name, bases, attrs)
def __call__(cls, *args):
"""Enforce Singleton class initiation
"""
if cls not in cls._instances:
cls._instances[cls] = super(
HandlerMeta, cls
).__call__(*args)
return cls._instances[cls]
@six.add_metaclass(HandlerMeta)
class BaseHandler(object):
"""Base Handler class for implementing Class-Based Web handler for bottle.
Subclass `BaseHandler` in order to its API for application routing, and
implement any of the known http method:
Class Attributes:
render (object): bottle.jinja2_render.
request (object): bottle.request
response (object): bottle.response
redirect (object):bottle.redirect
abort (object): bottle.abort
base_endpoint (str): The handler endpoint prefix.
cors_enabled (bool): Indicates if CORS is enabled.
plugins (dict): A key/value mapping of available plugins.
global_plugins (dict): A key/value mapping default applying plugins.
"""
render = bottle.jinja2_template
request = bottle.request
response = bottle.response
redirect = bottle.redirect
abort = bottle.abort
response_factory = None
base_endpoint = '/'
cors_enabled = True
plugins = dict()
global_plugins = dict()
@classmethod
def add_plugin(cls, plugin_callables, global_scope=False):
"""Register a new plugin in handler.
Args:
plugin_callables (list): A list of plugin callables.
global_scope (bool): Indicates The scope of the plugin.
Returns:
Class instance.
"""
repo = getattr(cls, dict(PLUGIN_SCOPE)[global_scope])
for plugin_callable in plugin_callables:
if hasattr(plugin_callable, 'func_name'):
repo[plugin_callable.func_name] = plugin_callable
else: # pragma: no cover
repo[plugin_callable.__name__] = plugin_callable
return cls
@classmethod
def register_app(cls, application):
"""Register class view in bottle application.
Args:
application (instance): A bottle.Bottle() instance.
Returns:
Class instance.
"""
if cls is BaseHandler:
raise HandlerError("Cant register a `BaseHandler` class instance")
routes = cls._get_http_members()
router = getattr(application, "route")
for func_name, func_callable in routes:
method_args = inspect.signature(func_callable).parameters
if hasattr(func_callable, 'http_method'):
http_method = func_callable.http_method
url_extra_part = func_callable.url_extra_part
else:
http_method = func_name
url_extra_part = None
applied_plugins = [pl for pl in cls.plugins
if hasattr(func_callable, pl)]
for plugin in applied_plugins: # pragma: no cover
try:
func_callable = cls.plugins[plugin](func_callable)
except TypeError as error:
raise HandlerPluginError(error.args)
for global_plugin in cls.global_plugins:
func_callable = cls.global_plugins[global_plugin](
func_callable
)
for entrypoint in cls._build_routes(method_args, url_extra_part):
router(entrypoint, method=[http_method.upper()])(func_callable)
if cls.cors_enabled:
cls_desc = cls.__doc__ or ''
options_data = {
"handler": {"name": cls.__name__, "desc": cls_desc.strip()},
"http_methods": [r[0] for r in routes]
}
router(
cls.base_endpoint,
method=["OPTIONS"]
)(lambda: options_data)
router(
"{}/<url:re:.+>".format(cls.base_endpoint).replace("//", "/"),
method=["OPTIONS"]
)(lambda url: options_data)
return cls
@classmethod
def _build_routes(cls, method_args, url_extra_part=None):
"""Create bottle route for a handler http method."""
prefix = '/{}'.format(url_extra_part) if url_extra_part else ''
endpoint = cls.base_endpoint + prefix
if not method_args:
return [endpoint]
endpoints = []
for args_list in cls._router_helper(method_args):
prefix = '/:' if args_list else ''
endpoints.append((endpoint + prefix + '/:'.join(args_list)).replace("//", '/'))
return endpoints
@classmethod
def _router_helper(cls, method_args):
"""Detect default Nullable method arguments and return
multiple routes per callable.
"""
fixed_params = [param for param in method_args
if method_args[param].default is not None]
nullable_params = [param for param in method_args if param not in fixed_params]
combinations = [fixed_params]
combinations += [combinations[-1] + [param] for param in nullable_params]
return combinations
@classmethod
def _get_http_members(cls):
"""Filter all `http` specific method from handler class.
"""
return [member for member in
inspect.getmembers(cls, predicate=inspect.ismethod)
if cls._check_http_member(member)]
@staticmethod
def _check_http_member(member):
"""Checks if a class method has HTTP info.
"""
return member[0].lower() in DEFAULT_ROUTES or \
hasattr(member[1], 'http_method')
if __name__ == '__main__': # pragma: no cover
import doctest
doctest.testmod()
|
agile4you/bottle-neck | bottle_neck/cbv.py | route_method | python | def route_method(method_name, extra_part=False):
def wrapper(callable_obj):
if method_name.lower() not in DEFAULT_ROUTES:
raise HandlerHTTPMethodError(
'Invalid http method in method: {}'.format(method_name)
)
callable_obj.http_method = method_name.upper()
callable_obj.url_extra_part = callable_obj.__name__ if extra_part\
else None
return classmethod(callable_obj)
return wrapper | Custom handler routing decorator.
Signs a web handler callable with the http method as attribute.
Args:
method_name (str): HTTP method name (i.e GET, POST)
extra_part (bool): Indicates if wrapped callable name should be a part
of the actual endpoint.
Returns:
A wrapped handler callable.
examples:
>>> @route_method('GET')
... def method():
... return "Hello!"
...
>>> method.http_method
'GET'
>>> method.url_extra_part
None | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/cbv.py#L141-L175 | null | # -*- coding: utf-8 -*-
"""CBV for bottle.py application instances.
Provides a base class for creating class-based web handlers for bottle.py
application instances with application routing mechanism.
"""
from __future__ import absolute_import
__author__ = "Papavassiliou Vassilis"
__date__ = "2015-11-29"
__all__ = ['BaseHandler', 'HandlerMeta', 'route_method', 'plugin_method',
'HandlerError', 'HandlerHTTPMethodError', 'HandlerPluginError',
'BaseHandlerPlugin']
from bottle_neck import __version__
import bottle
import functools
import inspect
import six
import re
version = tuple(map(int, __version__.split('.')))
DEFAULT_ROUTES = ("get", "put", "post", "delete", "patch", "options")
CORS_ROUTES = ("put", "post", "delete", "patch")
PLUGIN_SCOPE = (
(False, 'plugins'),
(True, 'global_plugins')
)
class HandlerError(Exception):
"""Base module Exception class.
"""
pass
class HandlerHTTPMethodError(HandlerError):
"""Raises for invalid HTTP method declaration.
"""
pass
class HandlerPluginError(HandlerError):
"""Raises when a handler plugin error occurs.
"""
pass
class ClassPropertyDescriptor(object):
"""ClassProperty Descriptor class.
Straight up stolen from stack overflow Implements class level property
non-data descriptor.
"""
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, cls=None):
if cls is None: # pragma: no cover
cls = type(obj)
return self.fget.__get__(obj, cls)()
def classproperty(func):
"""classproperty decorator.
Using this decorator a class can have a property. Necessary for properties
that don't need instance initialization. Works exactly the same as a
normal property.
Examples:
>>> class MyClass(object):
... @classproperty
... def my_prop(self):
... return self.__name__ + ' class'
...
>>> MyClass.my_prop
'MyClass class'
"""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
def cached_classproperty(fun):
"""A memorization decorator for class properties.
It implements the above `classproperty` decorator, with
the difference that the function result is computed and attached
to class as direct attribute. (Lazy loading and caching.)
"""
@functools.wraps(fun)
def get(cls):
try:
return cls.__cache[fun]
except AttributeError:
cls.__cache = {}
except KeyError: # pragma: no cover
pass
ret = cls.__cache[fun] = fun(cls)
return ret
return classproperty(get)
def plugin_method(*plugin_names):
"""Plugin Method decorator.
Signs a web handler function with the plugins to be applied as attributes.
Args:
plugin_names (list): A list of plugin callable names
Returns:
A wrapped handler callable.
Examples:
>>> @plugin_method('json', 'bill')
... def method():
... return "Hello!"
...
>>> print method.json
True
>>> print method.bill
True
"""
def wrapper(callable_obj):
for plugin_name in plugin_names:
if not hasattr(callable_obj, plugin_name):
setattr(callable_obj, plugin_name, True)
return callable_obj
return wrapper
class BaseHandlerPlugin(object):
"""
"""
def __init__(self, callable_object, *args, **kwargs): # pragma: no cover
self._wrapped = callable_object
self._args = args
self._kwargs = kwargs
self.__doc__ = callable_object.__doc__
@cached_classproperty
def func_name(self):
cls_name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__name__)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', cls_name).lower()
def apply(self, *args, **kwargs): # pragma: no cover
raise NotImplementedError("Must Override `apply` method")
def __call__(self, *args, **kwargs): # pragma: no cover
return self.apply(*args, **kwargs)
class HandlerMeta(type):
"""BaseHandler metaclass
Provides meta functionality for ensuring that `http_method` don't require
an instance and test and that derived classes implements `Singleton`.
"""
_instances = {}
def __new__(mcs, name, bases, attrs):
http_methods = [attr for attr in attrs if
attr.lower() in DEFAULT_ROUTES]
for handler in http_methods:
attrs[handler] = classmethod(attrs[handler])
return super(HandlerMeta, mcs).__new__(mcs, name, bases, attrs)
def __call__(cls, *args):
"""Enforce Singleton class initiation
"""
if cls not in cls._instances:
cls._instances[cls] = super(
HandlerMeta, cls
).__call__(*args)
return cls._instances[cls]
@six.add_metaclass(HandlerMeta)
class BaseHandler(object):
"""Base Handler class for implementing Class-Based Web handler for bottle.
Subclass `BaseHandler` in order to its API for application routing, and
implement any of the known http method:
Class Attributes:
render (object): bottle.jinja2_render.
request (object): bottle.request
response (object): bottle.response
redirect (object):bottle.redirect
abort (object): bottle.abort
base_endpoint (str): The handler endpoint prefix.
cors_enabled (bool): Indicates if CORS is enabled.
plugins (dict): A key/value mapping of available plugins.
global_plugins (dict): A key/value mapping default applying plugins.
"""
render = bottle.jinja2_template
request = bottle.request
response = bottle.response
redirect = bottle.redirect
abort = bottle.abort
response_factory = None
base_endpoint = '/'
cors_enabled = True
plugins = dict()
global_plugins = dict()
@classmethod
def add_plugin(cls, plugin_callables, global_scope=False):
"""Register a new plugin in handler.
Args:
plugin_callables (list): A list of plugin callables.
global_scope (bool): Indicates The scope of the plugin.
Returns:
Class instance.
"""
repo = getattr(cls, dict(PLUGIN_SCOPE)[global_scope])
for plugin_callable in plugin_callables:
if hasattr(plugin_callable, 'func_name'):
repo[plugin_callable.func_name] = plugin_callable
else: # pragma: no cover
repo[plugin_callable.__name__] = plugin_callable
return cls
@classmethod
def register_app(cls, application):
"""Register class view in bottle application.
Args:
application (instance): A bottle.Bottle() instance.
Returns:
Class instance.
"""
if cls is BaseHandler:
raise HandlerError("Cant register a `BaseHandler` class instance")
routes = cls._get_http_members()
router = getattr(application, "route")
for func_name, func_callable in routes:
method_args = inspect.signature(func_callable).parameters
if hasattr(func_callable, 'http_method'):
http_method = func_callable.http_method
url_extra_part = func_callable.url_extra_part
else:
http_method = func_name
url_extra_part = None
applied_plugins = [pl for pl in cls.plugins
if hasattr(func_callable, pl)]
for plugin in applied_plugins: # pragma: no cover
try:
func_callable = cls.plugins[plugin](func_callable)
except TypeError as error:
raise HandlerPluginError(error.args)
for global_plugin in cls.global_plugins:
func_callable = cls.global_plugins[global_plugin](
func_callable
)
for entrypoint in cls._build_routes(method_args, url_extra_part):
router(entrypoint, method=[http_method.upper()])(func_callable)
if cls.cors_enabled:
cls_desc = cls.__doc__ or ''
options_data = {
"handler": {"name": cls.__name__, "desc": cls_desc.strip()},
"http_methods": [r[0] for r in routes]
}
router(
cls.base_endpoint,
method=["OPTIONS"]
)(lambda: options_data)
router(
"{}/<url:re:.+>".format(cls.base_endpoint).replace("//", "/"),
method=["OPTIONS"]
)(lambda url: options_data)
return cls
@classmethod
def _build_routes(cls, method_args, url_extra_part=None):
"""Create bottle route for a handler http method."""
prefix = '/{}'.format(url_extra_part) if url_extra_part else ''
endpoint = cls.base_endpoint + prefix
if not method_args:
return [endpoint]
endpoints = []
for args_list in cls._router_helper(method_args):
prefix = '/:' if args_list else ''
endpoints.append((endpoint + prefix + '/:'.join(args_list)).replace("//", '/'))
return endpoints
@classmethod
def _router_helper(cls, method_args):
"""Detect default Nullable method arguments and return
multiple routes per callable.
"""
fixed_params = [param for param in method_args
if method_args[param].default is not None]
nullable_params = [param for param in method_args if param not in fixed_params]
combinations = [fixed_params]
combinations += [combinations[-1] + [param] for param in nullable_params]
return combinations
@classmethod
def _get_http_members(cls):
"""Filter all `http` specific method from handler class.
"""
return [member for member in
inspect.getmembers(cls, predicate=inspect.ismethod)
if cls._check_http_member(member)]
@staticmethod
def _check_http_member(member):
"""Checks if a class method has HTTP info.
"""
return member[0].lower() in DEFAULT_ROUTES or \
hasattr(member[1], 'http_method')
if __name__ == '__main__': # pragma: no cover
import doctest
doctest.testmod()
|
agile4you/bottle-neck | bottle_neck/plugins.py | WrapErrorPlugin.apply | python | def apply(self, callback, context): # pragma: no cover
def wrapper(*args, **kwargs):
try:
return callback(*args, **kwargs)
except bottle.HTTPError as error:
return self.error_wrapper.from_status(
status_line=error.status_line,
msg=error.body
)
return wrapper | Apply the HTTPError wrapper to the callback. | train | https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/plugins.py#L61-L74 | null | class WrapErrorPlugin(BasePlugin):
"""Middleware class that catches `bottle.HTTPError` exceptions and returns
default HTTP status code 200 using `bottle_neck.response.WSResponse` class
for error wrapping.
"""
def __init__(self, keyword, error_wrapper_cls): # pragma: no cover
self.error_wrapper = error_wrapper_cls
self.keyword = keyword
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.