repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
anlutro/russell
|
russell/engine.py
|
BlogEngine.add_pages
|
python
|
def add_pages(self, path='pages'):
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
|
Look through a directory for markdown files and add them as pages.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L98-L109
|
[
"def _listfiles(root_dir):\n\tresults = set()\n\n\tfor root, _, files in os.walk(root_dir):\n\t\tfor file in files:\n\t\t\tresults.add(os.path.join(root, file))\n\n\treturn results\n",
"def add_pages(self, pages, resort=True):\n\tself.pages.extend(pages)\n\tif resort:\n\t\tself.pages.sort()\n"
] |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_posts(self, path='posts'):
"""
Look through a directory for markdown files and add them as posts.
"""
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
def add_asset_hashes(self, path='dist/assets'):
"""
Scan through a directory and add hashes for each file found.
"""
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
def get_posts(self, num=None, tag=None, private=False):
"""
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
"""
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_pages(self):
"""
Generate HTML out of the pages added to the blog.
"""
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
def generate_posts(self):
"""
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
"""
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
def generate_tags(self):
"""
Generate one HTML page for each tag, each containing all posts that
match that tag.
"""
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
def generate_page(self, path, template, **kwargs):
"""
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
"""
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
def generate_index(self, num_posts=5):
"""
Generate the front page, aka index.html.
"""
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
def generate_sitemap(self, path='sitemap.xml', https=False):
"""
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
def write_file(self, path, contents):
"""
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
"""
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
anlutro/russell
|
russell/engine.py
|
BlogEngine.add_posts
|
python
|
def add_posts(self, path='posts'):
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
|
Look through a directory for markdown files and add them as posts.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L111-L119
|
[
"def _listfiles(root_dir):\n\tresults = set()\n\n\tfor root, _, files in os.walk(root_dir):\n\t\tfor file in files:\n\t\t\tresults.add(os.path.join(root, file))\n\n\treturn results\n",
"def add_posts(self, posts, resort=True):\n\tself.posts.extend(posts)\n\tfor post in posts:\n\t\tfor tag in post.tags:\n\t\t\tif tag not in self.tags:\n\t\t\t\tself.tags.append(tag)\n\n\tif resort:\n\t\tself.tags.sort()\n\t\tself.posts.sort()\n"
] |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_pages(self, path='pages'):
"""
Look through a directory for markdown files and add them as pages.
"""
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
def add_asset_hashes(self, path='dist/assets'):
"""
Scan through a directory and add hashes for each file found.
"""
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
def get_posts(self, num=None, tag=None, private=False):
"""
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
"""
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_pages(self):
"""
Generate HTML out of the pages added to the blog.
"""
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
def generate_posts(self):
"""
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
"""
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
def generate_tags(self):
"""
Generate one HTML page for each tag, each containing all posts that
match that tag.
"""
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
def generate_page(self, path, template, **kwargs):
"""
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
"""
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
def generate_index(self, num_posts=5):
"""
Generate the front page, aka index.html.
"""
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
def generate_sitemap(self, path='sitemap.xml', https=False):
"""
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
def write_file(self, path, contents):
"""
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
"""
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
anlutro/russell
|
russell/engine.py
|
BlogEngine.copy_assets
|
python
|
def copy_assets(self, path='assets'):
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
|
Copy assets into the destination directory.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L121-L132
|
[
"def _get_dist_path(self, path, directory=None):\n\tif isinstance(path, str):\n\t\tpath = [path]\n\tif directory:\n\t\tpath.insert(0, directory)\n\treturn os.path.join(self.root_path, 'dist', *path)\n"
] |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_pages(self, path='pages'):
"""
Look through a directory for markdown files and add them as pages.
"""
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
def add_posts(self, path='posts'):
"""
Look through a directory for markdown files and add them as posts.
"""
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
def add_asset_hashes(self, path='dist/assets'):
"""
Scan through a directory and add hashes for each file found.
"""
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
def get_posts(self, num=None, tag=None, private=False):
"""
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
"""
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_pages(self):
"""
Generate HTML out of the pages added to the blog.
"""
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
def generate_posts(self):
"""
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
"""
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
def generate_tags(self):
"""
Generate one HTML page for each tag, each containing all posts that
match that tag.
"""
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
def generate_page(self, path, template, **kwargs):
"""
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
"""
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
def generate_index(self, num_posts=5):
"""
Generate the front page, aka index.html.
"""
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
def generate_sitemap(self, path='sitemap.xml', https=False):
"""
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
def write_file(self, path, contents):
"""
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
"""
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
anlutro/russell
|
russell/engine.py
|
BlogEngine.add_asset_hashes
|
python
|
def add_asset_hashes(self, path='dist/assets'):
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
|
Scan through a directory and add hashes for each file found.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L134-L142
|
[
"def _listfiles(root_dir):\n\tresults = set()\n\n\tfor root, _, files in os.walk(root_dir):\n\t\tfor file in files:\n\t\t\tresults.add(os.path.join(root, file))\n\n\treturn results\n"
] |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_pages(self, path='pages'):
"""
Look through a directory for markdown files and add them as pages.
"""
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
def add_posts(self, path='posts'):
"""
Look through a directory for markdown files and add them as posts.
"""
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
def get_posts(self, num=None, tag=None, private=False):
"""
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
"""
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_pages(self):
"""
Generate HTML out of the pages added to the blog.
"""
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
def generate_posts(self):
"""
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
"""
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
def generate_tags(self):
"""
Generate one HTML page for each tag, each containing all posts that
match that tag.
"""
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
def generate_page(self, path, template, **kwargs):
"""
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
"""
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
def generate_index(self, num_posts=5):
"""
Generate the front page, aka index.html.
"""
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
def generate_sitemap(self, path='sitemap.xml', https=False):
"""
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
def write_file(self, path, contents):
"""
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
"""
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
anlutro/russell
|
russell/engine.py
|
BlogEngine.get_posts
|
python
|
def get_posts(self, num=None, tag=None, private=False):
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
|
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L144-L166
| null |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_pages(self, path='pages'):
"""
Look through a directory for markdown files and add them as pages.
"""
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
def add_posts(self, path='posts'):
"""
Look through a directory for markdown files and add them as posts.
"""
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
def add_asset_hashes(self, path='dist/assets'):
"""
Scan through a directory and add hashes for each file found.
"""
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_pages(self):
"""
Generate HTML out of the pages added to the blog.
"""
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
def generate_posts(self):
"""
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
"""
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
def generate_tags(self):
"""
Generate one HTML page for each tag, each containing all posts that
match that tag.
"""
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
def generate_page(self, path, template, **kwargs):
"""
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
"""
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
def generate_index(self, num_posts=5):
"""
Generate the front page, aka index.html.
"""
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
def generate_sitemap(self, path='sitemap.xml', https=False):
"""
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
def write_file(self, path, contents):
"""
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
"""
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
anlutro/russell
|
russell/engine.py
|
BlogEngine.generate_pages
|
python
|
def generate_pages(self):
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
|
Generate HTML out of the pages added to the blog.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L180-L185
|
[
"def generate_page(self, path, template, **kwargs):\n\t\"\"\"\n\tGenerate the HTML for a single page. You usually don't need to call this\n\tmethod manually, it is used by a lot of other, more end-user friendly\n\tmethods.\n\n\tArgs:\n\t path (str): Where to place the page relative to the root URL. Usually\n\t something like \"index\", \"about-me\", \"projects/example\", etc.\n\t template (str): Which jinja template to use to render the page.\n\t **kwargs: Kwargs will be passed on to the jinja template. Also, if\n\t the `page` kwarg is passed, its directory attribute will be\n\t prepended to the path.\n\t\"\"\"\n\tdirectory = None\n\tif kwargs.get('page'):\n\t\tdirectory = kwargs['page'].dir\n\n\tpath = self._get_dist_path(path, directory=directory)\n\tif not path.endswith('.html'):\n\t\tpath = path + '.html'\n\n\tif not os.path.isdir(os.path.dirname(path)):\n\t\tos.makedirs(os.path.dirname(path))\n\n\thtml = self._get_template(template).render(**kwargs)\n\n\twith open(path, 'w+') as file:\n\t\tfile.write(html)\n"
] |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_pages(self, path='pages'):
"""
Look through a directory for markdown files and add them as pages.
"""
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
def add_posts(self, path='posts'):
"""
Look through a directory for markdown files and add them as posts.
"""
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
def add_asset_hashes(self, path='dist/assets'):
"""
Scan through a directory and add hashes for each file found.
"""
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
def get_posts(self, num=None, tag=None, private=False):
"""
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
"""
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_posts(self):
"""
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
"""
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
def generate_tags(self):
"""
Generate one HTML page for each tag, each containing all posts that
match that tag.
"""
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
def generate_page(self, path, template, **kwargs):
"""
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
"""
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
def generate_index(self, num_posts=5):
"""
Generate the front page, aka index.html.
"""
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
def generate_sitemap(self, path='sitemap.xml', https=False):
"""
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
def write_file(self, path, contents):
"""
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
"""
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
anlutro/russell
|
russell/engine.py
|
BlogEngine.generate_posts
|
python
|
def generate_posts(self):
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
|
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L187-L198
|
[
"def generate_page(self, path, template, **kwargs):\n\t\"\"\"\n\tGenerate the HTML for a single page. You usually don't need to call this\n\tmethod manually, it is used by a lot of other, more end-user friendly\n\tmethods.\n\n\tArgs:\n\t path (str): Where to place the page relative to the root URL. Usually\n\t something like \"index\", \"about-me\", \"projects/example\", etc.\n\t template (str): Which jinja template to use to render the page.\n\t **kwargs: Kwargs will be passed on to the jinja template. Also, if\n\t the `page` kwarg is passed, its directory attribute will be\n\t prepended to the path.\n\t\"\"\"\n\tdirectory = None\n\tif kwargs.get('page'):\n\t\tdirectory = kwargs['page'].dir\n\n\tpath = self._get_dist_path(path, directory=directory)\n\tif not path.endswith('.html'):\n\t\tpath = path + '.html'\n\n\tif not os.path.isdir(os.path.dirname(path)):\n\t\tos.makedirs(os.path.dirname(path))\n\n\thtml = self._get_template(template).render(**kwargs)\n\n\twith open(path, 'w+') as file:\n\t\tfile.write(html)\n"
] |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_pages(self, path='pages'):
"""
Look through a directory for markdown files and add them as pages.
"""
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
def add_posts(self, path='posts'):
"""
Look through a directory for markdown files and add them as posts.
"""
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
def add_asset_hashes(self, path='dist/assets'):
"""
Scan through a directory and add hashes for each file found.
"""
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
def get_posts(self, num=None, tag=None, private=False):
"""
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
"""
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_pages(self):
"""
Generate HTML out of the pages added to the blog.
"""
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
def generate_tags(self):
"""
Generate one HTML page for each tag, each containing all posts that
match that tag.
"""
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
def generate_page(self, path, template, **kwargs):
"""
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
"""
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
def generate_index(self, num_posts=5):
"""
Generate the front page, aka index.html.
"""
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
def generate_sitemap(self, path='sitemap.xml', https=False):
"""
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
def write_file(self, path, contents):
"""
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
"""
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
anlutro/russell
|
russell/engine.py
|
BlogEngine.generate_tags
|
python
|
def generate_tags(self):
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
|
Generate one HTML page for each tag, each containing all posts that
match that tag.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L200-L208
|
[
"def get_posts(self, num=None, tag=None, private=False):\n\t\"\"\"\n\tGet all the posts added to the blog.\n\n\tArgs:\n\t num (int): Optional. If provided, only return N posts (sorted by date,\n\t most recent first).\n\t tag (Tag): Optional. If provided, only return posts that have a\n\t specific tag.\n\t private (bool): By default (if False), private posts are not included.\n\t If set to True, private posts will also be included.\n\t\"\"\"\n\tposts = self.posts\n\n\tif not private:\n\t\tposts = [post for post in posts if post.public]\n\n\tif tag:\n\t\tposts = [post for post in posts if tag in post.tags]\n\n\tif num:\n\t\treturn posts[:num]\n\treturn posts\n",
"def generate_page(self, path, template, **kwargs):\n\t\"\"\"\n\tGenerate the HTML for a single page. You usually don't need to call this\n\tmethod manually, it is used by a lot of other, more end-user friendly\n\tmethods.\n\n\tArgs:\n\t path (str): Where to place the page relative to the root URL. Usually\n\t something like \"index\", \"about-me\", \"projects/example\", etc.\n\t template (str): Which jinja template to use to render the page.\n\t **kwargs: Kwargs will be passed on to the jinja template. Also, if\n\t the `page` kwarg is passed, its directory attribute will be\n\t prepended to the path.\n\t\"\"\"\n\tdirectory = None\n\tif kwargs.get('page'):\n\t\tdirectory = kwargs['page'].dir\n\n\tpath = self._get_dist_path(path, directory=directory)\n\tif not path.endswith('.html'):\n\t\tpath = path + '.html'\n\n\tif not os.path.isdir(os.path.dirname(path)):\n\t\tos.makedirs(os.path.dirname(path))\n\n\thtml = self._get_template(template).render(**kwargs)\n\n\twith open(path, 'w+') as file:\n\t\tfile.write(html)\n"
] |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_pages(self, path='pages'):
"""
Look through a directory for markdown files and add them as pages.
"""
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
def add_posts(self, path='posts'):
"""
Look through a directory for markdown files and add them as posts.
"""
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
def add_asset_hashes(self, path='dist/assets'):
"""
Scan through a directory and add hashes for each file found.
"""
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
def get_posts(self, num=None, tag=None, private=False):
"""
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
"""
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_pages(self):
"""
Generate HTML out of the pages added to the blog.
"""
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
def generate_posts(self):
"""
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
"""
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
def generate_page(self, path, template, **kwargs):
"""
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
"""
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
def generate_index(self, num_posts=5):
"""
Generate the front page, aka index.html.
"""
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
def generate_sitemap(self, path='sitemap.xml', https=False):
"""
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
def write_file(self, path, contents):
"""
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
"""
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
anlutro/russell
|
russell/engine.py
|
BlogEngine.generate_page
|
python
|
def generate_page(self, path, template, **kwargs):
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
|
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L210-L238
|
[
"def _get_dist_path(self, path, directory=None):\n\tif isinstance(path, str):\n\t\tpath = [path]\n\tif directory:\n\t\tpath.insert(0, directory)\n\treturn os.path.join(self.root_path, 'dist', *path)\n",
"def _get_template(self, template):\n\tif isinstance(template, str):\n\t\ttemplate = self.jinja.get_template(template)\n\treturn template\n"
] |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_pages(self, path='pages'):
"""
Look through a directory for markdown files and add them as pages.
"""
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
def add_posts(self, path='posts'):
"""
Look through a directory for markdown files and add them as posts.
"""
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
def add_asset_hashes(self, path='dist/assets'):
"""
Scan through a directory and add hashes for each file found.
"""
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
def get_posts(self, num=None, tag=None, private=False):
"""
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
"""
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_pages(self):
"""
Generate HTML out of the pages added to the blog.
"""
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
def generate_posts(self):
"""
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
"""
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
def generate_tags(self):
"""
Generate one HTML page for each tag, each containing all posts that
match that tag.
"""
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
def generate_index(self, num_posts=5):
"""
Generate the front page, aka index.html.
"""
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
def generate_sitemap(self, path='sitemap.xml', https=False):
"""
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
def write_file(self, path, contents):
"""
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
"""
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
anlutro/russell
|
russell/engine.py
|
BlogEngine.generate_index
|
python
|
def generate_index(self, num_posts=5):
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
|
Generate the front page, aka index.html.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L240-L245
|
[
"def get_posts(self, num=None, tag=None, private=False):\n\t\"\"\"\n\tGet all the posts added to the blog.\n\n\tArgs:\n\t num (int): Optional. If provided, only return N posts (sorted by date,\n\t most recent first).\n\t tag (Tag): Optional. If provided, only return posts that have a\n\t specific tag.\n\t private (bool): By default (if False), private posts are not included.\n\t If set to True, private posts will also be included.\n\t\"\"\"\n\tposts = self.posts\n\n\tif not private:\n\t\tposts = [post for post in posts if post.public]\n\n\tif tag:\n\t\tposts = [post for post in posts if tag in post.tags]\n\n\tif num:\n\t\treturn posts[:num]\n\treturn posts\n",
"def generate_page(self, path, template, **kwargs):\n\t\"\"\"\n\tGenerate the HTML for a single page. You usually don't need to call this\n\tmethod manually, it is used by a lot of other, more end-user friendly\n\tmethods.\n\n\tArgs:\n\t path (str): Where to place the page relative to the root URL. Usually\n\t something like \"index\", \"about-me\", \"projects/example\", etc.\n\t template (str): Which jinja template to use to render the page.\n\t **kwargs: Kwargs will be passed on to the jinja template. Also, if\n\t the `page` kwarg is passed, its directory attribute will be\n\t prepended to the path.\n\t\"\"\"\n\tdirectory = None\n\tif kwargs.get('page'):\n\t\tdirectory = kwargs['page'].dir\n\n\tpath = self._get_dist_path(path, directory=directory)\n\tif not path.endswith('.html'):\n\t\tpath = path + '.html'\n\n\tif not os.path.isdir(os.path.dirname(path)):\n\t\tos.makedirs(os.path.dirname(path))\n\n\thtml = self._get_template(template).render(**kwargs)\n\n\twith open(path, 'w+') as file:\n\t\tfile.write(html)\n"
] |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_pages(self, path='pages'):
"""
Look through a directory for markdown files and add them as pages.
"""
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
def add_posts(self, path='posts'):
"""
Look through a directory for markdown files and add them as posts.
"""
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
def add_asset_hashes(self, path='dist/assets'):
"""
Scan through a directory and add hashes for each file found.
"""
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
def get_posts(self, num=None, tag=None, private=False):
"""
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
"""
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_pages(self):
"""
Generate HTML out of the pages added to the blog.
"""
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
def generate_posts(self):
"""
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
"""
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
def generate_tags(self):
"""
Generate one HTML page for each tag, each containing all posts that
match that tag.
"""
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
def generate_page(self, path, template, **kwargs):
"""
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
"""
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
def generate_sitemap(self, path='sitemap.xml', https=False):
"""
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
def write_file(self, path, contents):
"""
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
"""
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
anlutro/russell
|
russell/engine.py
|
BlogEngine.generate_rss
|
python
|
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
|
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L253-L268
|
[
"def get_rss_feed(blog, only_excerpt=True, https=False):\n\tgenerator = FeedGenerator()\n\n\troot_href = schema_url(blog.root_url, https)\n\tgenerator.id(root_href)\n\tgenerator.link(href=root_href, rel='alternate')\n\tgenerator.title(blog.site_title)\n\tgenerator.subtitle(blog.site_desc or blog.site_title)\n\n\tfor post in blog.get_posts():\n\t\tif only_excerpt:\n\t\t\tread_more = 'Read the full article at <a href=\"%s\" target=\"_blank\">%s</a>' % (post.url, post.url)\n\t\t\tbody = '<p>%s</p><p>%s</p>' % (post.excerpt, read_more)\n\t\telse:\n\t\t\tbody = post.body\n\n\t\tpost_href = schema_url(post.url, https=https)\n\n\t\tentry = generator.add_entry()\n\t\tentry.id(post_href)\n\t\tentry.link(href=post_href, rel='alternate')\n\t\tentry.title(post.title)\n\t\tentry.description(body)\n\t\tentry.published(post.pubdate)\n\n\treturn generator\n",
"def _get_dist_path(self, path, directory=None):\n\tif isinstance(path, str):\n\t\tpath = [path]\n\tif directory:\n\t\tpath.insert(0, directory)\n\treturn os.path.join(self.root_path, 'dist', *path)\n"
] |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_pages(self, path='pages'):
"""
Look through a directory for markdown files and add them as pages.
"""
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
def add_posts(self, path='posts'):
"""
Look through a directory for markdown files and add them as posts.
"""
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
def add_asset_hashes(self, path='dist/assets'):
"""
Scan through a directory and add hashes for each file found.
"""
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
def get_posts(self, num=None, tag=None, private=False):
"""
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
"""
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_pages(self):
"""
Generate HTML out of the pages added to the blog.
"""
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
def generate_posts(self):
"""
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
"""
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
def generate_tags(self):
"""
Generate one HTML page for each tag, each containing all posts that
match that tag.
"""
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
def generate_page(self, path, template, **kwargs):
"""
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
"""
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
def generate_index(self, num_posts=5):
"""
Generate the front page, aka index.html.
"""
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_sitemap(self, path='sitemap.xml', https=False):
"""
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
def write_file(self, path, contents):
"""
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
"""
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
anlutro/russell
|
russell/engine.py
|
BlogEngine.generate_sitemap
|
python
|
def generate_sitemap(self, path='sitemap.xml', https=False):
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
|
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L270-L281
|
[
"def generate_sitemap(blog, https=True):\n\treturn SitemapGenerator(blog, https).generate_sitemap()\n",
"def write_file(self, path, contents):\n\t\"\"\"\n\tWrite a file of any type to the destination path. Useful for files like\n\trobots.txt, manifest.json, and so on.\n\n\tArgs:\n\t path (str): The name of the file to write to.\n\t contents (str or bytes): The contents to write.\n\t\"\"\"\n\tpath = self._get_dist_path(path)\n\tif not os.path.isdir(os.path.dirname(path)):\n\t\tos.makedirs(os.path.dirname(path))\n\tif isinstance(contents, bytes):\n\t\tmode = 'wb+'\n\telse:\n\t\tmode = 'w'\n\twith open(path, mode) as file:\n\t\tfile.write(contents)\n"
] |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_pages(self, path='pages'):
"""
Look through a directory for markdown files and add them as pages.
"""
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
def add_posts(self, path='posts'):
"""
Look through a directory for markdown files and add them as posts.
"""
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
def add_asset_hashes(self, path='dist/assets'):
"""
Scan through a directory and add hashes for each file found.
"""
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
def get_posts(self, num=None, tag=None, private=False):
"""
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
"""
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_pages(self):
"""
Generate HTML out of the pages added to the blog.
"""
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
def generate_posts(self):
"""
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
"""
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
def generate_tags(self):
"""
Generate one HTML page for each tag, each containing all posts that
match that tag.
"""
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
def generate_page(self, path, template, **kwargs):
"""
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
"""
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
def generate_index(self, num_posts=5):
"""
Generate the front page, aka index.html.
"""
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
def write_file(self, path, contents):
"""
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
"""
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
anlutro/russell
|
russell/engine.py
|
BlogEngine.write_file
|
python
|
def write_file(self, path, contents):
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents)
|
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write.
|
train
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L283-L300
|
[
"def _get_dist_path(self, path, directory=None):\n\tif isinstance(path, str):\n\t\tpath = [path]\n\tif directory:\n\t\tpath.insert(0, directory)\n\treturn os.path.join(self.root_path, 'dist', *path)\n"
] |
class BlogEngine:
"""
The main instance that contains blog configuration and content, as well as
generating end results.
"""
def __init__(self, root_path, root_url, site_title, site_desc=None):
"""
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website.
"""
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self.cm.posts
self.tags = self.cm.tags
self.asset_hash = {}
self.jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),
)
self.jinja.globals.update({
'a': make_link,
'asset_hash': self.asset_hash,
'asset_url': self.get_asset_url,
'now': datetime.now(),
'root_url': self.root_url,
'site_description': self.site_desc,
'site_title': self.site_title,
'tags': self.tags,
})
def get_asset_url(self, path):
"""
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory.
"""
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url
def add_pages(self, path='pages'):
"""
Look through a directory for markdown files and add them as pages.
"""
pages_path = os.path.join(self.root_path, path)
pages = []
for file in _listfiles(pages_path):
page_dir = os.path.relpath(os.path.dirname(file), pages_path)
if page_dir == '.':
page_dir = None
pages.append(self.cm.Page.from_file(file, directory=page_dir))
self.cm.add_pages(pages)
def add_posts(self, path='posts'):
"""
Look through a directory for markdown files and add them as posts.
"""
path = os.path.join(self.root_path, path)
self.cm.add_posts([
self.cm.Post.from_file(file)
for file in _listfiles(path)
])
def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
def add_asset_hashes(self, path='dist/assets'):
"""
Scan through a directory and add hashes for each file found.
"""
for fullpath in _listfiles(os.path.join(self.root_path, path)):
relpath = fullpath.replace(self.root_path + '/' + path + '/', '')
md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum)
self.asset_hash[relpath] = md5sum
def get_posts(self, num=None, tag=None, private=False):
"""
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
"""
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
def _get_dist_path(self, path, directory=None):
if isinstance(path, str):
path = [path]
if directory:
path.insert(0, directory)
return os.path.join(self.root_path, 'dist', *path)
def _get_template(self, template):
if isinstance(template, str):
template = self.jinja.get_template(template)
return template
def generate_pages(self):
"""
Generate HTML out of the pages added to the blog.
"""
for page in self.pages:
self.generate_page(page.slug, template='page.html.jinja', page=page)
def generate_posts(self):
"""
Generate single-post HTML files out of posts added to the blog. Will not
generate front page, archives or tag files - those have to be generated
separately.
"""
for post in self.posts:
self.generate_page(
['posts', post.slug],
template='post.html.jinja',
post=post,
)
def generate_tags(self):
"""
Generate one HTML page for each tag, each containing all posts that
match that tag.
"""
for tag in self.tags:
posts = self.get_posts(tag=tag, private=True)
self.generate_page(['tags', tag.slug],
template='archive.html.jinja', posts=posts)
def generate_page(self, path, template, **kwargs):
"""
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
"""
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if not path.endswith('.html'):
path = path + '.html'
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
def generate_index(self, num_posts=5):
"""
Generate the front page, aka index.html.
"""
posts = self.get_posts(num=num_posts)
self.generate_page('index', template='index.html.jinja', posts=posts)
def generate_archive(self):
"""
Generate the archive HTML page.
"""
self.generate_page('archive', template='archive.html.jinja', posts=self.get_posts())
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False):
"""
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
def generate_sitemap(self, path='sitemap.xml', https=False):
"""
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
"""
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
verify_file_exists
|
python
|
def verify_file_exists(file_name, file_location):
return __os.path.isfile(__os.path.join(file_location, file_name))
|
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L77-L87
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
file_name_increase
|
python
|
def file_name_increase(file_name, file_location):
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
|
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L90-L112
|
[
"def verify_file_exists(file_name, file_location):\n \"\"\"\n Function to verify if a file exists\n Args:\n file_name: The name of file to check\n file_location: The location of the file, derive from the os module\n\n Returns: returns boolean True or False\n\n \"\"\"\n return __os.path.isfile(__os.path.join(file_location, file_name))\n"
] |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
verify_directory
|
python
|
def verify_directory(directory_name, directory_location, directory_create=False):
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
|
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L115-L131
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
list_to_file
|
python
|
def list_to_file(orig_list, file_name, file_location):
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
|
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L134-L162
|
[
"def add_line_break(list_line):\n \"\"\"\n Create a line break at the end of a string\n Args:\n list_line: string\n\n Returns: A string with a line break\n\n \"\"\"\n list_line = ('%s\\n' % (list_line,))\n return list_line\n"
] |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
file_to_list
|
python
|
def file_to_list(file_name, file_location):
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
|
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L165-L179
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
csv_to_dict
|
python
|
def csv_to_dict(file_name, file_location):
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
|
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L182-L206
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
dict_to_csv
|
python
|
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
|
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L209-L229
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
store_object
|
python
|
def store_object(file_name, save_key, file_location, object_to_store=None):
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
|
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L232-L253
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
retrieve_object_from_file
|
python
|
def retrieve_object_from_file(file_name, save_key, file_location):
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
|
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L256-L276
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
delete_object_from_file
|
python
|
def delete_object_from_file(file_name, save_key, file_location):
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
|
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L279-L293
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
verify_key_in_shelve
|
python
|
def verify_key_in_shelve(file_name, save_key, file_location):
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
|
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L296-L315
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
get_keys_from_shelve
|
python
|
def get_keys_from_shelve(file_name, file_location):
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
|
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L318-L335
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
remove_symbol_add_symbol
|
python
|
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
|
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L377-L389
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
list_files_in_directory
|
python
|
def list_files_in_directory(full_directory_path):
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
|
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L392-L405
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
list_directories_in_directory
|
python
|
def list_directories_in_directory(full_directory_path):
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
|
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L408-L421
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
split_strings_in_list_retain_spaces
|
python
|
def split_strings_in_list_retain_spaces(orig_list):
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
|
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L446-L459
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
random_line_data
|
python
|
def random_line_data(chars_per_line=80):
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
|
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L462-L472
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
random_data
|
python
|
def random_data(line_count=1, chars_per_line=80):
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
|
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L475-L487
| null |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
"""
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
"""
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
btr1975/persistentdatatools
|
persistentdatatools/persistentdatatools.py
|
collect_and_zip_files
|
python
|
def collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None):
temp_list = list()
if isinstance(dir_list, list):
for dir_name in dir_list:
if not __os.path.isdir(dir_name):
error = 'Function collect_and_zip_files received an item that is not a directory {}'.format(dir_name)
LOGGER.critical(error)
raise Exception(error)
else:
error = 'Function collect_and_zip_files expected dir_list to be a list but received a {}'.format(type(dir_list))
LOGGER.critical(error)
raise TypeError(error)
if not file_extension_list and not file_name_list:
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
temp_list.append(__os.path.join(dir_name, file_name))
if file_extension_list:
if isinstance(file_extension_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
garbage, extension = file_name.split('.')
if extension in file_extension_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_extension_list to be a ' \
'list but received a {}'.format(type(file_extension_list))
LOGGER.critical(error)
raise TypeError(error)
if file_name_list:
if isinstance(file_name_list, list):
for dir_name in dir_list:
temp_files_list = list_files_in_directory(dir_name)
for file_name in temp_files_list:
if file_name in file_name_list:
temp_list.append(__os.path.join(dir_name, file_name))
else:
error = 'Function collect_and_zip_files expected file_name_list to be a list but ' \
'received a {}'.format(type(file_name_list))
LOGGER.critical(error)
raise TypeError(error)
if len(zip_file_name.split('.')) == 2:
name, ext = zip_file_name.split('.')
if ext != 'zip':
LOGGER.warning('Changed the extension of zip_file_name={} to be zip'.format(zip_file_name))
zip_file_name = '{}.{}'.format(name, 'zip')
else:
error = 'Function collect_and_zip_files expected zip_file_name to only contain one . ' \
'but received {}'.format(zip_file_name)
LOGGER.critical(error)
raise NameError(error)
with __zipfile.ZipFile(__os.path.join(output_dir, zip_file_name), 'a') as the_zip_file:
for file in temp_list:
the_zip_file.write(file)
the_zip_file.close()
|
Function to collect files and make a zip file
:param dir_list: A list of directories
:param output_dir: The output directory
:param zip_file_name: Zip file name
:param file_extension_list: A list of extensions of files to find
:param file_name_list: A list of file names to find
:return:
Outputs a zip file
Note: If no file_extension_list and file_name_list are provided it will zip the entire directory.
|
train
|
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L490-L569
|
[
"def list_files_in_directory(full_directory_path):\n \"\"\"\n List the files in a specified directory\n Args:\n full_directory_path: The full directory path to check, derive from the os module\n\n Returns: returns a list of files\n\n \"\"\"\n files = list()\n for file_name in __os.listdir(full_directory_path):\n if __os.path.isfile(__os.path.join(full_directory_path, file_name)):\n files.append(file_name)\n return files\n"
] |
#!/usr/bin/env python3
##########################################################
# Script Name: modPersistentDataTools.py #
# Script Type: Python #
# Updated By: Benjamin P. Trachtenberg #
# Date Written 9/17/2015 #
# #
# Description: #
# Collection of tools for IP Address's #
# #
##########################################################
import logging
import shelve as __shelve
import csv as __csv
import os as __os
import sys as __sys
import re as __re
import random as __random
import string as __string
import zipfile as __zipfile
__author__ = 'Benjamin P. Trachtenberg'
__copyright__ = "Copyright (c) 2016, Benjamin P. Trachtenberg"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (2, 2, 9)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = 'Benjamin P. Trachtenberg'
__email__ = 'e_ben_75-python@yahoo.com'
LOGGER = logging.getLogger(__name__)
"""
Functions included in v2.0.0
list_to_file(orig_list, file_name, file_location)
file_to_list(file_name, file_location)
csv_to_dict(file_name, file_location)
store_object(file_name, save_key, file_location, object_to_store=None)
retrieve_object_from_file(file_name, save_key, file_location)
delete_object_from_file(file_name, save_key, file_location)
verify_key_in_shelve(file_name, save_key, file_location)
remove_spaces(string_item)
remove_spaces_add_hyphen(string_item)
remove_extra_spaces(string_item)
verify_file_exists(file_name, file_location)
verify_directory(directory_name, directory_location, directory_create=False)
file_name_increase(file_name, file_location)
dict_to_csv(orig_dict, file_name, field_names_tuple, file_location)
remove_symbol_add_symbol(string_item, remove_symbol, add_symbol)
list_files_in_directory(full_directory_path)
Functions included in v2.2.2
get_keys_from_shelve(file_name, file_location)
Update to Functions in v2.2.5
retrieve_object_from_file
Uses get to retrieve key now, will not throw exception if it doesn't exist
verify_key_in_shelve
Uses get to retreive key now, will still return True, or False
Functions included in v2.2.5
split_string_retain_spaces(string)
split_strings_in_list_retain_spaces(orig_list)
join_split_string(split_string)
Functions included in v2.2.6
random_line_data(chars_per_line=80)
random_data(line_count=1, chars_per_line=80)
Functions included in v2.2.9
collect_and_zip_files(dir_list, output_dir, zip_file_name, file_extension_list=None, file_name_list=None)
"""
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name))
def file_name_increase(file_name, file_location):
"""
Function to increase a filename by a number 1
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns a good filename.
"""
add_one = 1
file_name_temp = file_name
while verify_file_exists(file_name_temp, file_location):
try:
name, file_extension = file_name.split('.')
file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)
except Exception as e:
LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))
name = file_name
file_name_temp = '%s-%i' % (name, add_one)
add_one += 1
file_name = file_name_temp
return file_name
def verify_directory(directory_name, directory_location, directory_create=False):
"""
Function to verify if a directory exists
Args:
directory_name: The name of directory to check
directory_location: The location of the directory, derive from the os module
directory_create: If you want to create the directory
Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
"""
if not directory_create:
return __os.path.exists(__os.path.join(directory_location, directory_name))
elif directory_create:
good = __os.path.exists(__os.path.join(directory_location, directory_name))
if not good:
__os.mkdir(__os.path.join(directory_location, directory_name))
def list_to_file(orig_list, file_name, file_location):
"""
Function to export a list to a text file
Args:
orig_list: The list you want exported
file_name: The name of the exported file
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open(file, "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
def file_to_list(file_name, file_location):
"""
Function to import a text file to a list
Args:
file_name: The name of file to be import
file_location: The location of the file, derive from the os module
Returns: returns a list
"""
file = __os.path.join(file_location, file_name)
read_file = open(file, "r")
temp_list = read_file.read().splitlines()
read_file.close()
return temp_list
def csv_to_dict(file_name, file_location):
"""
Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary
"""
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):
"""
Function to export a dictionary to a csv file
Args:
orig_dict: The dictionary you want exported
file_name: The name of the exported file
field_names_tuple: The fieldnames in a tuple
file_location: The location of the file, derive from the os module
Returns: returns the filename info
"""
file = __os.path.join(file_location, file_name)
csv_write = open(file, 'a')
writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n')
headers = dict((n, n) for n in field_names_tuple)
writer.writerow(headers)
for dict_key, a in list(orig_dict.items()):
writer.writerow(orig_dict[dict_key])
csv_write.close()
return file_name
def store_object(file_name, save_key, file_location, object_to_store=None):
"""
Function to store objects in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key to store the item to
file_location: The location of the file, derive from the os module
object_to_store: The object you want to store
Returns:
"""
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))
print('Bad storage dB, rebuilding!!')
__os.remove(file)
shelve_store = __shelve.open(file)
shelve_store[save_key] = object_to_store
shelve_store.close()
def retrieve_object_from_file(file_name, save_key, file_location):
"""
Function to retrieve objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: Returns the stored object
"""
shelve_store = None
file = __os.path.join(file_location, file_name)
try:
shelve_store = __shelve.open(file)
except Exception as e:
LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))
__sys.exit('Storage dB is not readable, closing App!!')
stored_object = shelve_store.get(save_key)
shelve_store.close()
return stored_object
def delete_object_from_file(file_name, save_key, file_location):
"""
Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns:
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close()
def verify_key_in_shelve(file_name, save_key, file_location):
"""
Function to check for a key in a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: returns true or false
"""
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
exists = shelve_store.get(save_key)
shelve_store.close()
if exists:
return True
elif not exists:
return False
def get_keys_from_shelve(file_name, file_location):
"""
Function to retreive all keys in a shelve
Args:
file_name: Shelve storage file name
file_location: The location of the file, derive from the os module
Returns:
a list of the keys
"""
temp_list = list()
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
for key in shelve_store:
temp_list.append(key)
shelve_store.close()
return temp_list
def remove_spaces(string_item):
"""
Remove all spaces from a string
Args:
string_item: String that you want to remove spaces from
Returns: returns a string without any spaces
"""
string_item = ''.join(string_item.split())
return string_item
def remove_spaces_add_hyphen(string_item):
"""
Remove all spaces from a string and replace them with a hyphen
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with spaces replaced with a hyphen
"""
string_item = '-'.join(string_item.split())
return string_item
def remove_extra_spaces(string_item):
"""
Remove all extra spaces from a string leaving single spaces
Args:
string_item: String that you want to remove spaces from
Returns: returns a string with single spacing
"""
string_item = ' '.join(string_item.split())
return string_item
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):
"""
Remove a symbol from a string, and replace it with a different one
Args:
string_item: String that you want to replace symbols in
remove_symbol: Symbol to remove
add_symbol: Symbol to add
Returns: returns a string with symbols swapped
"""
string_item = add_symbol.join(string_item.split(remove_symbol))
return string_item
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files
def list_directories_in_directory(full_directory_path):
"""
List the directories in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of directories
"""
directories = list()
for directory_name in __os.listdir(full_directory_path):
if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):
directories.append(directory_name)
return directories
def split_string_retain_spaces(string):
"""
Function to split a string, and retain spaces to rejoin
:param string: A String
:return:
A split sting
"""
return __re.split(r'(\s+)', string)
def join_split_string(split_string):
"""
Function to join a split string
:param split_string: A Split String
:return:
A joined string
"""
return ''.join(split_string)
def split_strings_in_list_retain_spaces(orig_list):
"""
Function to split every line in a list, and retain spaces for a rejoin
:param orig_list: Original list
:return:
A List with split lines
"""
temp_list = list()
for line in orig_list:
line_split = __re.split(r'(\s+)', line)
temp_list.append(line_split)
return temp_list
def random_line_data(chars_per_line=80):
"""
Function to create a line of a random string
Args:
chars_per_line: An integer that says how many characters to return
Returns:
A String
"""
return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
def random_data(line_count=1, chars_per_line=80):
"""
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
"""
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
|
racker/torment
|
torment/contexts/docker/compose.py
|
up
|
python
|
def up(services: Iterable[str] = ()) -> int:
'''Start the specified docker-compose services.
Parameters
----------
:``services``: a list of docker-compose service names to start (must be
defined in docker-compose.yml)
Return Value(s)
---------------
The integer status of ``docker-compose up``.
'''
services = list(services)
if not len(services):
raise ValueError('empty iterable passed to up(): {0}'.format(services))
return _call('docker-compose up --no-color -d ' + ' '.join(services), shell = True)
|
Start the specified docker-compose services.
Parameters
----------
:``services``: a list of docker-compose service names to start (must be
defined in docker-compose.yml)
Return Value(s)
---------------
The integer status of ``docker-compose up``.
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/contexts/docker/compose.py#L54-L75
|
[
"def _call(command: str, *args, **kwargs) -> int:\n '''Wrapper around ``subprocess.Popen`` that sends command output to logger.\n\n .. seealso::\n\n ``subprocess.Popen``_\n\n Parameters\n ----------\n\n :``command``: string form of the command to execute\n\n All other parameters are passed directly to ``subprocess.Popen``.\n\n Return Value(s)\n ---------------\n\n The integer status of command.\n\n '''\n\n child = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE, *args, **kwargs)\n\n def log():\n '''Send processes stdout and stderr to logger.'''\n\n for fh in select.select(( child.stdout, child.stderr, ), (), (), 0)[0]:\n line = fh.readline()[:-1]\n\n if len(line):\n getattr(logger, {\n child.stdout: 'debug',\n child.stderr: 'error',\n }[fh])('%s: %s', command, line)\n\n while child.poll() is None:\n log()\n\n log()\n\n return child.wait()\n"
] |
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import select
import subprocess
import typing # noqa (use mypy typing)
from typing import Iterable
logger = logging.getLogger(__name__)
def found() -> bool:
'''Determines if docker-compose is available as a shell command.
Not only determines if docker-compose is available in the shell's PATH but
also ensures that docker-compose can successfully stop all services.
Return Value(s)
---------------
True if docker-compose is available and functional; otherwise, False.
'''
return 0 == _call('which docker-compose', shell = True) and 0 == stop()
def stop() -> int:
'''Stop all docker-compose services.
Return Value(s)
---------------
The integer status of ``docker-compose stop``.
'''
return _call('docker-compose stop', shell = True)
def _call(command: str, *args, **kwargs) -> int:
'''Wrapper around ``subprocess.Popen`` that sends command output to logger.
.. seealso::
``subprocess.Popen``_
Parameters
----------
:``command``: string form of the command to execute
All other parameters are passed directly to ``subprocess.Popen``.
Return Value(s)
---------------
The integer status of command.
'''
child = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE, *args, **kwargs)
def log():
'''Send processes stdout and stderr to logger.'''
for fh in select.select(( child.stdout, child.stderr, ), (), (), 0)[0]:
line = fh.readline()[:-1]
if len(line):
getattr(logger, {
child.stdout: 'debug',
child.stderr: 'error',
}[fh])('%s: %s', command, line)
while child.poll() is None:
log()
log()
return child.wait()
|
racker/torment
|
torment/contexts/docker/compose.py
|
_call
|
python
|
def _call(command: str, *args, **kwargs) -> int:
'''Wrapper around ``subprocess.Popen`` that sends command output to logger.
.. seealso::
``subprocess.Popen``_
Parameters
----------
:``command``: string form of the command to execute
All other parameters are passed directly to ``subprocess.Popen``.
Return Value(s)
---------------
The integer status of command.
'''
child = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE, *args, **kwargs)
def log():
'''Send processes stdout and stderr to logger.'''
for fh in select.select(( child.stdout, child.stderr, ), (), (), 0)[0]:
line = fh.readline()[:-1]
if len(line):
getattr(logger, {
child.stdout: 'debug',
child.stderr: 'error',
}[fh])('%s: %s', command, line)
while child.poll() is None:
log()
log()
return child.wait()
|
Wrapper around ``subprocess.Popen`` that sends command output to logger.
.. seealso::
``subprocess.Popen``_
Parameters
----------
:``command``: string form of the command to execute
All other parameters are passed directly to ``subprocess.Popen``.
Return Value(s)
---------------
The integer status of command.
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/contexts/docker/compose.py#L78-L118
|
[
"def log():\n '''Send processes stdout and stderr to logger.'''\n\n for fh in select.select(( child.stdout, child.stderr, ), (), (), 0)[0]:\n line = fh.readline()[:-1]\n\n if len(line):\n getattr(logger, {\n child.stdout: 'debug',\n child.stderr: 'error',\n }[fh])('%s: %s', command, line)\n"
] |
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import select
import subprocess
import typing # noqa (use mypy typing)
from typing import Iterable
logger = logging.getLogger(__name__)
def found() -> bool:
'''Determines if docker-compose is available as a shell command.
Not only determines if docker-compose is available in the shell's PATH but
also ensures that docker-compose can successfully stop all services.
Return Value(s)
---------------
True if docker-compose is available and functional; otherwise, False.
'''
return 0 == _call('which docker-compose', shell = True) and 0 == stop()
def stop() -> int:
'''Stop all docker-compose services.
Return Value(s)
---------------
The integer status of ``docker-compose stop``.
'''
return _call('docker-compose stop', shell = True)
def up(services: Iterable[str] = ()) -> int:
'''Start the specified docker-compose services.
Parameters
----------
:``services``: a list of docker-compose service names to start (must be
defined in docker-compose.yml)
Return Value(s)
---------------
The integer status of ``docker-compose up``.
'''
services = list(services)
if not len(services):
raise ValueError('empty iterable passed to up(): {0}'.format(services))
return _call('docker-compose up --no-color -d ' + ' '.join(services), shell = True)
|
racker/torment
|
torment/fixtures/__init__.py
|
of
|
python
|
def of(fixture_classes: Iterable[type], context: Union[None, 'torment.TestContext'] = None) -> Iterable['torment.fixtures.Fixture']:
'''Obtain all Fixture objects of the provided classes.
**Parameters**
:``fixture_classes``: classes inheriting from ``torment.fixtures.Fixture``
:``context``: a ``torment.TestContext`` to initialize Fixtures with
**Return Value(s)**
Instantiated ``torment.fixtures.Fixture`` objects for each individual
fixture class that inherits from one of the provided classes.
'''
classes = list(copy.copy(fixture_classes))
fixtures = [] # type: Iterable[torment.fixtures.Fixture]
while len(classes):
current = classes.pop()
subclasses = current.__subclasses__()
if len(subclasses):
classes.extend(subclasses)
elif current not in fixture_classes:
fixtures.append(current(context))
return fixtures
|
Obtain all Fixture objects of the provided classes.
**Parameters**
:``fixture_classes``: classes inheriting from ``torment.fixtures.Fixture``
:``context``: a ``torment.TestContext`` to initialize Fixtures with
**Return Value(s)**
Instantiated ``torment.fixtures.Fixture`` objects for each individual
fixture class that inherits from one of the provided classes.
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/fixtures/__init__.py#L293-L320
| null |
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import inspect
import logging
import os
import sys
import typing # noqa (use mypy typing)
import uuid
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import Tuple
from typing import Union
from torment import decorators
logger = logging.getLogger(__name__)
class Fixture(object):
'''Collection of data and actions for a particular test case.
Intended as a base class for custom fixtures. Fixture provides an API
that simplifies writing scalable test cases.
Creating Fixture objects is broken into two parts. This keeps the logic for
a class of test cases separate from the data for particular cases while
allowing re-use of the data provided by a fixture.
The first part of Fixture object creation is crafting a proper subclass that
implements the necessary actions:
:``__init__``: pre-data population initialization
:``initialize``: post-data population initialization
:``setup``: pre-run setup
:``run``: REQUIRED—run code under test
:``check``: verify results of run
.. note::
``initialize`` is run during ``__init__`` and setup is run after;
otherwise, they serve the same function. The split allows different
actions to occur in different areas of the class heirarchy and generally
isn't necessary.
By default all actions are noops and simply do nothing but run is required.
These actions allow complex class hierarchies to provide nuanced testing
behavior. For example, Fixture provides the absolute bare minimum to test
any Fixture and no more. By adding a set of subclasses, common
initialization and checks can be performed at one layer while specific run
decisions and checks can happen at a lower layer.
The second part of Fixture object creation is crafting the data. Tying data
to a Fixture class should be done with ``torment.fixtures.register``. It
provides a declarative interface that binds a dictionary to a Fixture (keys
of dictionary become Fixture properties). ``torment.fixtures.register``
creates a subclass that the rest of the torment knows how to transform into
test cases that are compatible with nose.
**Examples**
Simplest Fixture subclass:
.. code-block:: python
class MyFixture(Fixture):
pass
Of course, to be useful the Fixture needs definitions of setup, run, and
check that actually test the code we're interested in checking:
.. code-block:: python
def add(x, y):
return x + y
class AddFixture(Fixture):
def run(self):
self.result = add(self.parameters['x'], self.parameters['y'])
def check(self):
self.context.assertEqual(self.result, self.expected)
This fixture uses a couple of conventions (not requirements):
#. ``self.parameters`` as a dictionary of parameter names to values
#. ``self.expected`` as the value we expect as a result
#. ``self.result`` as the holder inside the fixture between ``run`` and
``check``
This show-cases the ridiculity of using this testing framework for simple
functions that have few cases that require testing. This framework is
designed to allow many cases to be easily and declaritively defined.
The last component required to get these fixtures to actually run is hooking
them together with a context:
.. code-block:: python
from torment import contexts
class AddUnitTest(contexts.TestContext, metaclass = contexts.MetaContext):
fixture_classes = (
MyFixture,
AddFixture,
)
The context that wraps a Fixture subclass should eventually inherit from
TestContext (which inherits from ``unittest.TestCase`` and provides its assert
methods). In order for nose to find and execute this ``TestContext``, it
must have a name that contains Test.
**Properties**
* ``category``
* ``description`` (override)
* ``name`` (do **not** override)
**Methods To Override**
* ``__init__``
* ``check``
* ``initialize``
* ``run (required)``
* ``setup``
**Instance Variables**
:``context``: the ``torment.TestContext`` this case is running in which
provides the assertion methods of ``unittest.TestCase``.
'''
def __init__(self, context: 'torment.TestContext') -> None:
'''Create Fixture
Initializes the Fixture's context (can be changed like any other
property).
**Parameters**
:``context``: a subclass of ``torment.TestContext`` that provides
assertion methods and any other environmental information
for this test case
'''
self.context = context
@property
def category(self) -> str:
'''Fixture's category (the containing testing module name)
**Examples**
:module: test_torment.test_unit.test_fixtures.fixture_a44bc6dda6654b1395a8c2cbd55d964d
:category: fixtures
'''
logger.debug('dir(self.__module__): %s', dir(self.__module__))
return self.__module__.__name__.rsplit('.', 2)[-2].replace('test_', '')
@property
def description(self) -> str:
'''Test name in nose output (intended to be overridden).'''
return '{0.uuid.hex}—{1}'.format(self, self.context.module)
@property
def name(self) -> str:
'''Method name in nose runtime.'''
return 'test_' + self.__class__.__name__
def initialize(self) -> None:
'''Post-data population initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called during ``__init__`` and after properties have been populated by
``torment.fixtures.register``.
'''
pass
def setup(self) -> None:
'''Pre-run initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after properties have been populated by
``torment.fixtures.register``.
'''
pass
def check(self) -> None:
'''Check that run ran as expected.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after ``run`` and should be used to verify that run performed the
expected actions.
'''
pass
def _execute(self) -> None:
'''Run Fixture actions (setup, run, check).
Core test loop for Fixture. Executes setup, run, and check in order.
'''
if hasattr(self, '_last_resolver_exception'):
logger.warning('last exception from %s.%s:', self.__class__.__name__, self._last_resolver_exception[0], exc_info = self._last_resolver_exception[1])
self.setup()
self.run()
self.check()
class ErrorFixture(Fixture):
'''Common error checking for Fixture.
Intended as a mixin when registering a new Fixture (via register) that will
check an error case (one throwing an exception).
**Examples**
Using the AddFixture from the Examples in Fixture, we can create a Fixture
that handles (an obviously contrived) exception by either crafting a new
Fixture object or invoking register with the appropriate base classes.
New Fixture Object:
.. code-block:: python
class ErrorAddFixture(ErrorFixture, AddFixture):
pass
Via call to register:
.. code-block:: python
register(globals(), ( ErrorFixture, AddFixture, ), { … })
'''
@property
def description(self) -> str:
'''Test name in nose output (adds error reason as result portion).'''
return super().description + ' → {0.error}'.format(self)
def run(self) -> None:
'''Calls sibling with exception expectation.'''
with self.context.assertRaises(self.error.__class__) as error:
super().run()
self.exception = error.exception
@decorators.log
def register(namespace, base_classes: Tuple[type], properties: Dict[str, Any]) -> None:
'''Register a Fixture class in namespace with the given properties.
Creates a Fixture class (not object) and inserts it into the provided
namespace. The properties is a dict but allows functions to reference other
properties and acts like a small DSL (domain specific language). This is
really just a declarative way to compose data about a test fixture and make
it repeatable.
Files calling this function are expected to house one or more Fixtures and
have a name that ends with a UUID without its hyphens. For example:
foo_38de9ceec5694c96ace90c9ca37e5bcb.py. This UUID is used to uniquely
track the Fixture through the test suite and allow Fixtures to scale without
concern.
**Parameters**
:``namespace``: dictionary to insert the generated class into
:``base_classes``: list of classes the new class should inherit
:``properties``: dictionary of properties with their values
Properties can have the following forms:
:functions: invoked with the Fixture as it's argument
:classes: instantiated without any arguments (unless it subclasses
``torment.fixtures.Fixture`` in which case it's passed context)
:literals: any standard python type (i.e. int, str, dict)
.. note::
function execution may error (this will be emitted as a logging event).
functions will continually be tried until they resolve or the same set
of functions is continually erroring. These functions that failed to
resolve are left in tact for later processing.
Properties by the following names also have defined behavior:
:description: added to the Fixture's description as an addendum
:error: must be a dictionary with three keys:
:class: class to instantiate (usually an exception)
:args: arguments to pass to class initialization
:kwargs: keyword arguments to pass to class initialization
:mocks: dictionary mapping mock symbols to corresponding values
Properties by the following names are reserved and should not be used:
* name
'''
# ensure we have a clean copy of the data
# and won't stomp on re-uses elsewhere in
# someone's code
props = copy.deepcopy(properties)
desc = props.pop('description', None) # type: Union[str, None]
caller_frame = inspect.stack()[1]
caller_file = caller_frame[1]
caller_module = inspect.getmodule(caller_frame[0])
my_uuid = uuid.UUID(os.path.basename(caller_file).replace('.py', '').rsplit('_', 1)[-1])
class_name = _unique_class_name(namespace, my_uuid)
@property
def description(self) -> str:
_ = super(self.__class__, self).description
if desc is not None:
_ += '—' + desc
return _
def __init__(self, context: 'torment.TestContext') -> None:
super(self.__class__, self).__init__(context)
functions = {}
for name, value in props.items():
if name == 'error':
self.error = value['class'](*value.get('args', ()), **value.get('kwargs', {}))
continue
if inspect.isclass(value):
if issubclass(value, Fixture):
value = value(self.context)
else:
value = value()
if inspect.isfunction(value):
functions[name] = value
continue
setattr(self, name, value)
_resolve_functions(functions, self)
self.initialize()
def setup(self) -> None:
if hasattr(self, 'mocks'):
logger.debug('self.mocks: %s', self.mocks)
for mock_symbol, mock_result in self.mocks.items():
if _find_mocker(mock_symbol, self.context)():
_prepare_mock(self.context, mock_symbol, **mock_result)
super(self.__class__, self).setup()
namespace[class_name] = type(class_name, base_classes, {
'description': description,
'__init__': __init__,
'__module__': caller_module,
'setup': setup,
'uuid': my_uuid,
})
def _prepare_mock(context: 'torment.contexts.TestContext', symbol: str, return_value = None, side_effect = None) -> None:
'''Sets return value or side effect of symbol's mock in context.
.. seealso:: :py:func:`_find_mocker`
**Parameters**
:``context``: the search context
:``symbol``: the symbol to be located
:``return_value``: pass through to mock ``return_value``
:``side_effect``: pass through to mock ``side_effect``
'''
methods = symbol.split('.')
index = len(methods)
mock = None
while index > 0:
name = 'mocked_' + '_'.join(methods[:index]).lower()
logger.debug('name: %s', name)
if hasattr(context, name):
mock = getattr(context, name)
break
index -= 1
logger.debug('mock: %s', mock)
if mock is not None:
mock = functools.reduce(getattr, methods[index:], mock)
logger.debug('mock: %s', mock)
if return_value is not None:
mock.return_value = return_value
if side_effect is not None:
mock.side_effect = side_effect
mock.reset_mock()
def _find_mocker(symbol: str, context: 'torment.contexts.TestContext') -> Callable[[], bool]:
'''Find method within the context that mocks symbol.
Given a symbol (i.e. ``tornado.httpclient.AsyncHTTPClient.fetch``), find
the shortest ``mock_`` method that resembles the symbol. Resembles means
the lowercased and periods replaced with underscores.
If no match is found, a dummy function (only returns False) is returned.
**Parameters**
:``symbol``: the symbol to be located
:``context``: the search context
**Return Value(s)**
The method used to mock the symbol.
**Examples**
Assuming the symbol is ``tornado.httpclient.AsyncHTTPClient.fetch``, the
first of the following methods would be returned:
* ``mock_tornado``
* ``mock_tornado_httpclient``
* ``mock_tornado_httpclient_asynchttpclient``
* ``mock_tornado_httpclient_asynchttpclient_fetch``
'''
components = []
method = None
for component in symbol.split('.'):
components.append(component.lower())
name = '_'.join([ 'mock' ] + components)
if hasattr(context, name):
method = getattr(context, name)
break
if method is None:
logger.warn('no mocker for %s', symbol)
def noop(*args, **kwargs):
return False
method = noop
return method
def _resolve_functions(functions: Dict[str, Callable[[Any], Any]], fixture: Fixture) -> None:
'''Apply functions and collect values as properties on fixture.
Call functions and apply their values as properteis on fixture.
Functions will continue to get applied until no more functions resolve.
All unresolved functions are logged and the last exception to have
occurred is also logged. This function does not return but adds the
results to fixture directly.
**Parameters**
:``functions``: dict mapping function names (property names) to
callable functions
:``fixture``: Fixture to add values to
'''
exc_info = last_function = None
function_count = float('inf')
while function_count > len(functions):
function_count = len(functions)
for name, function in copy.copy(functions).items():
try:
setattr(fixture, name, copy.deepcopy(function(fixture)))
del functions[name]
except:
exc_info = sys.exc_info()
logger.debug('name: %s', name)
logger.debug('exc_info: %s', exc_info)
last_function = name
if len(functions):
logger.warning('unprocessed Fixture properties: %s', ','.join(functions.keys()))
logger.warning('last exception from %s.%s:', fixture.name, last_function, exc_info = exc_info)
setattr(fixture, '_last_resolver_exception', ( last_function, exc_info, ))
for name, function in copy.copy(functions).items():
setattr(fixture, name, function)
def _unique_class_name(namespace: Dict[str, Any], uuid: uuid.UUID) -> str:
'''Generate unique to namespace name for a class using uuid.
**Parameters**
:``namespace``: the namespace to verify uniqueness against
:``uuid``: the "unique" portion of the name
**Return Value(s)**
A unique string (in namespace) using uuid.
'''
count = 0
name = original_name = 'f_' + uuid.hex
while name in namespace:
count += 1
name = original_name + '_' + str(count)
return name
|
racker/torment
|
torment/fixtures/__init__.py
|
register
|
python
|
def register(namespace, base_classes: Tuple[type], properties: Dict[str, Any]) -> None:
'''Register a Fixture class in namespace with the given properties.
Creates a Fixture class (not object) and inserts it into the provided
namespace. The properties is a dict but allows functions to reference other
properties and acts like a small DSL (domain specific language). This is
really just a declarative way to compose data about a test fixture and make
it repeatable.
Files calling this function are expected to house one or more Fixtures and
have a name that ends with a UUID without its hyphens. For example:
foo_38de9ceec5694c96ace90c9ca37e5bcb.py. This UUID is used to uniquely
track the Fixture through the test suite and allow Fixtures to scale without
concern.
**Parameters**
:``namespace``: dictionary to insert the generated class into
:``base_classes``: list of classes the new class should inherit
:``properties``: dictionary of properties with their values
Properties can have the following forms:
:functions: invoked with the Fixture as it's argument
:classes: instantiated without any arguments (unless it subclasses
``torment.fixtures.Fixture`` in which case it's passed context)
:literals: any standard python type (i.e. int, str, dict)
.. note::
function execution may error (this will be emitted as a logging event).
functions will continually be tried until they resolve or the same set
of functions is continually erroring. These functions that failed to
resolve are left in tact for later processing.
Properties by the following names also have defined behavior:
:description: added to the Fixture's description as an addendum
:error: must be a dictionary with three keys:
:class: class to instantiate (usually an exception)
:args: arguments to pass to class initialization
:kwargs: keyword arguments to pass to class initialization
:mocks: dictionary mapping mock symbols to corresponding values
Properties by the following names are reserved and should not be used:
* name
'''
# ensure we have a clean copy of the data
# and won't stomp on re-uses elsewhere in
# someone's code
props = copy.deepcopy(properties)
desc = props.pop('description', None) # type: Union[str, None]
caller_frame = inspect.stack()[1]
caller_file = caller_frame[1]
caller_module = inspect.getmodule(caller_frame[0])
my_uuid = uuid.UUID(os.path.basename(caller_file).replace('.py', '').rsplit('_', 1)[-1])
class_name = _unique_class_name(namespace, my_uuid)
@property
def description(self) -> str:
_ = super(self.__class__, self).description
if desc is not None:
_ += '—' + desc
return _
def __init__(self, context: 'torment.TestContext') -> None:
super(self.__class__, self).__init__(context)
functions = {}
for name, value in props.items():
if name == 'error':
self.error = value['class'](*value.get('args', ()), **value.get('kwargs', {}))
continue
if inspect.isclass(value):
if issubclass(value, Fixture):
value = value(self.context)
else:
value = value()
if inspect.isfunction(value):
functions[name] = value
continue
setattr(self, name, value)
_resolve_functions(functions, self)
self.initialize()
def setup(self) -> None:
if hasattr(self, 'mocks'):
logger.debug('self.mocks: %s', self.mocks)
for mock_symbol, mock_result in self.mocks.items():
if _find_mocker(mock_symbol, self.context)():
_prepare_mock(self.context, mock_symbol, **mock_result)
super(self.__class__, self).setup()
namespace[class_name] = type(class_name, base_classes, {
'description': description,
'__init__': __init__,
'__module__': caller_module,
'setup': setup,
'uuid': my_uuid,
})
|
Register a Fixture class in namespace with the given properties.
Creates a Fixture class (not object) and inserts it into the provided
namespace. The properties is a dict but allows functions to reference other
properties and acts like a small DSL (domain specific language). This is
really just a declarative way to compose data about a test fixture and make
it repeatable.
Files calling this function are expected to house one or more Fixtures and
have a name that ends with a UUID without its hyphens. For example:
foo_38de9ceec5694c96ace90c9ca37e5bcb.py. This UUID is used to uniquely
track the Fixture through the test suite and allow Fixtures to scale without
concern.
**Parameters**
:``namespace``: dictionary to insert the generated class into
:``base_classes``: list of classes the new class should inherit
:``properties``: dictionary of properties with their values
Properties can have the following forms:
:functions: invoked with the Fixture as it's argument
:classes: instantiated without any arguments (unless it subclasses
``torment.fixtures.Fixture`` in which case it's passed context)
:literals: any standard python type (i.e. int, str, dict)
.. note::
function execution may error (this will be emitted as a logging event).
functions will continually be tried until they resolve or the same set
of functions is continually erroring. These functions that failed to
resolve are left in tact for later processing.
Properties by the following names also have defined behavior:
:description: added to the Fixture's description as an addendum
:error: must be a dictionary with three keys:
:class: class to instantiate (usually an exception)
:args: arguments to pass to class initialization
:kwargs: keyword arguments to pass to class initialization
:mocks: dictionary mapping mock symbols to corresponding values
Properties by the following names are reserved and should not be used:
* name
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/fixtures/__init__.py#L323-L438
|
[
"def _unique_class_name(namespace: Dict[str, Any], uuid: uuid.UUID) -> str:\n '''Generate unique to namespace name for a class using uuid.\n\n **Parameters**\n\n :``namespace``: the namespace to verify uniqueness against\n :``uuid``: the \"unique\" portion of the name\n\n **Return Value(s)**\n\n A unique string (in namespace) using uuid.\n\n '''\n\n count = 0\n\n name = original_name = 'f_' + uuid.hex\n while name in namespace:\n count += 1\n name = original_name + '_' + str(count)\n\n return name\n"
] |
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import inspect
import logging
import os
import sys
import typing # noqa (use mypy typing)
import uuid
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import Tuple
from typing import Union
from torment import decorators
logger = logging.getLogger(__name__)
class Fixture(object):
'''Collection of data and actions for a particular test case.
Intended as a base class for custom fixtures. Fixture provides an API
that simplifies writing scalable test cases.
Creating Fixture objects is broken into two parts. This keeps the logic for
a class of test cases separate from the data for particular cases while
allowing re-use of the data provided by a fixture.
The first part of Fixture object creation is crafting a proper subclass that
implements the necessary actions:
:``__init__``: pre-data population initialization
:``initialize``: post-data population initialization
:``setup``: pre-run setup
:``run``: REQUIRED—run code under test
:``check``: verify results of run
.. note::
``initialize`` is run during ``__init__`` and setup is run after;
otherwise, they serve the same function. The split allows different
actions to occur in different areas of the class heirarchy and generally
isn't necessary.
By default all actions are noops and simply do nothing but run is required.
These actions allow complex class hierarchies to provide nuanced testing
behavior. For example, Fixture provides the absolute bare minimum to test
any Fixture and no more. By adding a set of subclasses, common
initialization and checks can be performed at one layer while specific run
decisions and checks can happen at a lower layer.
The second part of Fixture object creation is crafting the data. Tying data
to a Fixture class should be done with ``torment.fixtures.register``. It
provides a declarative interface that binds a dictionary to a Fixture (keys
of dictionary become Fixture properties). ``torment.fixtures.register``
creates a subclass that the rest of the torment knows how to transform into
test cases that are compatible with nose.
**Examples**
Simplest Fixture subclass:
.. code-block:: python
class MyFixture(Fixture):
pass
Of course, to be useful the Fixture needs definitions of setup, run, and
check that actually test the code we're interested in checking:
.. code-block:: python
def add(x, y):
return x + y
class AddFixture(Fixture):
def run(self):
self.result = add(self.parameters['x'], self.parameters['y'])
def check(self):
self.context.assertEqual(self.result, self.expected)
This fixture uses a couple of conventions (not requirements):
#. ``self.parameters`` as a dictionary of parameter names to values
#. ``self.expected`` as the value we expect as a result
#. ``self.result`` as the holder inside the fixture between ``run`` and
``check``
This show-cases the ridiculity of using this testing framework for simple
functions that have few cases that require testing. This framework is
designed to allow many cases to be easily and declaritively defined.
The last component required to get these fixtures to actually run is hooking
them together with a context:
.. code-block:: python
from torment import contexts
class AddUnitTest(contexts.TestContext, metaclass = contexts.MetaContext):
fixture_classes = (
MyFixture,
AddFixture,
)
The context that wraps a Fixture subclass should eventually inherit from
TestContext (which inherits from ``unittest.TestCase`` and provides its assert
methods). In order for nose to find and execute this ``TestContext``, it
must have a name that contains Test.
**Properties**
* ``category``
* ``description`` (override)
* ``name`` (do **not** override)
**Methods To Override**
* ``__init__``
* ``check``
* ``initialize``
* ``run (required)``
* ``setup``
**Instance Variables**
:``context``: the ``torment.TestContext`` this case is running in which
provides the assertion methods of ``unittest.TestCase``.
'''
def __init__(self, context: 'torment.TestContext') -> None:
'''Create Fixture
Initializes the Fixture's context (can be changed like any other
property).
**Parameters**
:``context``: a subclass of ``torment.TestContext`` that provides
assertion methods and any other environmental information
for this test case
'''
self.context = context
@property
def category(self) -> str:
'''Fixture's category (the containing testing module name)
**Examples**
:module: test_torment.test_unit.test_fixtures.fixture_a44bc6dda6654b1395a8c2cbd55d964d
:category: fixtures
'''
logger.debug('dir(self.__module__): %s', dir(self.__module__))
return self.__module__.__name__.rsplit('.', 2)[-2].replace('test_', '')
@property
def description(self) -> str:
'''Test name in nose output (intended to be overridden).'''
return '{0.uuid.hex}—{1}'.format(self, self.context.module)
@property
def name(self) -> str:
'''Method name in nose runtime.'''
return 'test_' + self.__class__.__name__
def initialize(self) -> None:
'''Post-data population initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called during ``__init__`` and after properties have been populated by
``torment.fixtures.register``.
'''
pass
def setup(self) -> None:
'''Pre-run initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after properties have been populated by
``torment.fixtures.register``.
'''
pass
def check(self) -> None:
'''Check that run ran as expected.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after ``run`` and should be used to verify that run performed the
expected actions.
'''
pass
def _execute(self) -> None:
'''Run Fixture actions (setup, run, check).
Core test loop for Fixture. Executes setup, run, and check in order.
'''
if hasattr(self, '_last_resolver_exception'):
logger.warning('last exception from %s.%s:', self.__class__.__name__, self._last_resolver_exception[0], exc_info = self._last_resolver_exception[1])
self.setup()
self.run()
self.check()
class ErrorFixture(Fixture):
'''Common error checking for Fixture.
Intended as a mixin when registering a new Fixture (via register) that will
check an error case (one throwing an exception).
**Examples**
Using the AddFixture from the Examples in Fixture, we can create a Fixture
that handles (an obviously contrived) exception by either crafting a new
Fixture object or invoking register with the appropriate base classes.
New Fixture Object:
.. code-block:: python
class ErrorAddFixture(ErrorFixture, AddFixture):
pass
Via call to register:
.. code-block:: python
register(globals(), ( ErrorFixture, AddFixture, ), { … })
'''
@property
def description(self) -> str:
'''Test name in nose output (adds error reason as result portion).'''
return super().description + ' → {0.error}'.format(self)
def run(self) -> None:
'''Calls sibling with exception expectation.'''
with self.context.assertRaises(self.error.__class__) as error:
super().run()
self.exception = error.exception
@decorators.log
def of(fixture_classes: Iterable[type], context: Union[None, 'torment.TestContext'] = None) -> Iterable['torment.fixtures.Fixture']:
'''Obtain all Fixture objects of the provided classes.
**Parameters**
:``fixture_classes``: classes inheriting from ``torment.fixtures.Fixture``
:``context``: a ``torment.TestContext`` to initialize Fixtures with
**Return Value(s)**
Instantiated ``torment.fixtures.Fixture`` objects for each individual
fixture class that inherits from one of the provided classes.
'''
classes = list(copy.copy(fixture_classes))
fixtures = [] # type: Iterable[torment.fixtures.Fixture]
while len(classes):
current = classes.pop()
subclasses = current.__subclasses__()
if len(subclasses):
classes.extend(subclasses)
elif current not in fixture_classes:
fixtures.append(current(context))
return fixtures
def _prepare_mock(context: 'torment.contexts.TestContext', symbol: str, return_value = None, side_effect = None) -> None:
'''Sets return value or side effect of symbol's mock in context.
.. seealso:: :py:func:`_find_mocker`
**Parameters**
:``context``: the search context
:``symbol``: the symbol to be located
:``return_value``: pass through to mock ``return_value``
:``side_effect``: pass through to mock ``side_effect``
'''
methods = symbol.split('.')
index = len(methods)
mock = None
while index > 0:
name = 'mocked_' + '_'.join(methods[:index]).lower()
logger.debug('name: %s', name)
if hasattr(context, name):
mock = getattr(context, name)
break
index -= 1
logger.debug('mock: %s', mock)
if mock is not None:
mock = functools.reduce(getattr, methods[index:], mock)
logger.debug('mock: %s', mock)
if return_value is not None:
mock.return_value = return_value
if side_effect is not None:
mock.side_effect = side_effect
mock.reset_mock()
def _find_mocker(symbol: str, context: 'torment.contexts.TestContext') -> Callable[[], bool]:
'''Find method within the context that mocks symbol.
Given a symbol (i.e. ``tornado.httpclient.AsyncHTTPClient.fetch``), find
the shortest ``mock_`` method that resembles the symbol. Resembles means
the lowercased and periods replaced with underscores.
If no match is found, a dummy function (only returns False) is returned.
**Parameters**
:``symbol``: the symbol to be located
:``context``: the search context
**Return Value(s)**
The method used to mock the symbol.
**Examples**
Assuming the symbol is ``tornado.httpclient.AsyncHTTPClient.fetch``, the
first of the following methods would be returned:
* ``mock_tornado``
* ``mock_tornado_httpclient``
* ``mock_tornado_httpclient_asynchttpclient``
* ``mock_tornado_httpclient_asynchttpclient_fetch``
'''
components = []
method = None
for component in symbol.split('.'):
components.append(component.lower())
name = '_'.join([ 'mock' ] + components)
if hasattr(context, name):
method = getattr(context, name)
break
if method is None:
logger.warn('no mocker for %s', symbol)
def noop(*args, **kwargs):
return False
method = noop
return method
def _resolve_functions(functions: Dict[str, Callable[[Any], Any]], fixture: Fixture) -> None:
'''Apply functions and collect values as properties on fixture.
Call functions and apply their values as properteis on fixture.
Functions will continue to get applied until no more functions resolve.
All unresolved functions are logged and the last exception to have
occurred is also logged. This function does not return but adds the
results to fixture directly.
**Parameters**
:``functions``: dict mapping function names (property names) to
callable functions
:``fixture``: Fixture to add values to
'''
exc_info = last_function = None
function_count = float('inf')
while function_count > len(functions):
function_count = len(functions)
for name, function in copy.copy(functions).items():
try:
setattr(fixture, name, copy.deepcopy(function(fixture)))
del functions[name]
except:
exc_info = sys.exc_info()
logger.debug('name: %s', name)
logger.debug('exc_info: %s', exc_info)
last_function = name
if len(functions):
logger.warning('unprocessed Fixture properties: %s', ','.join(functions.keys()))
logger.warning('last exception from %s.%s:', fixture.name, last_function, exc_info = exc_info)
setattr(fixture, '_last_resolver_exception', ( last_function, exc_info, ))
for name, function in copy.copy(functions).items():
setattr(fixture, name, function)
def _unique_class_name(namespace: Dict[str, Any], uuid: uuid.UUID) -> str:
'''Generate unique to namespace name for a class using uuid.
**Parameters**
:``namespace``: the namespace to verify uniqueness against
:``uuid``: the "unique" portion of the name
**Return Value(s)**
A unique string (in namespace) using uuid.
'''
count = 0
name = original_name = 'f_' + uuid.hex
while name in namespace:
count += 1
name = original_name + '_' + str(count)
return name
|
racker/torment
|
torment/fixtures/__init__.py
|
_prepare_mock
|
python
|
def _prepare_mock(context: 'torment.contexts.TestContext', symbol: str, return_value = None, side_effect = None) -> None:
'''Sets return value or side effect of symbol's mock in context.
.. seealso:: :py:func:`_find_mocker`
**Parameters**
:``context``: the search context
:``symbol``: the symbol to be located
:``return_value``: pass through to mock ``return_value``
:``side_effect``: pass through to mock ``side_effect``
'''
methods = symbol.split('.')
index = len(methods)
mock = None
while index > 0:
name = 'mocked_' + '_'.join(methods[:index]).lower()
logger.debug('name: %s', name)
if hasattr(context, name):
mock = getattr(context, name)
break
index -= 1
logger.debug('mock: %s', mock)
if mock is not None:
mock = functools.reduce(getattr, methods[index:], mock)
logger.debug('mock: %s', mock)
if return_value is not None:
mock.return_value = return_value
if side_effect is not None:
mock.side_effect = side_effect
mock.reset_mock()
|
Sets return value or side effect of symbol's mock in context.
.. seealso:: :py:func:`_find_mocker`
**Parameters**
:``context``: the search context
:``symbol``: the symbol to be located
:``return_value``: pass through to mock ``return_value``
:``side_effect``: pass through to mock ``side_effect``
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/fixtures/__init__.py#L441-L482
| null |
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import inspect
import logging
import os
import sys
import typing # noqa (use mypy typing)
import uuid
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import Tuple
from typing import Union
from torment import decorators
logger = logging.getLogger(__name__)
class Fixture(object):
'''Collection of data and actions for a particular test case.
Intended as a base class for custom fixtures. Fixture provides an API
that simplifies writing scalable test cases.
Creating Fixture objects is broken into two parts. This keeps the logic for
a class of test cases separate from the data for particular cases while
allowing re-use of the data provided by a fixture.
The first part of Fixture object creation is crafting a proper subclass that
implements the necessary actions:
:``__init__``: pre-data population initialization
:``initialize``: post-data population initialization
:``setup``: pre-run setup
:``run``: REQUIRED—run code under test
:``check``: verify results of run
.. note::
``initialize`` is run during ``__init__`` and setup is run after;
otherwise, they serve the same function. The split allows different
actions to occur in different areas of the class heirarchy and generally
isn't necessary.
By default all actions are noops and simply do nothing but run is required.
These actions allow complex class hierarchies to provide nuanced testing
behavior. For example, Fixture provides the absolute bare minimum to test
any Fixture and no more. By adding a set of subclasses, common
initialization and checks can be performed at one layer while specific run
decisions and checks can happen at a lower layer.
The second part of Fixture object creation is crafting the data. Tying data
to a Fixture class should be done with ``torment.fixtures.register``. It
provides a declarative interface that binds a dictionary to a Fixture (keys
of dictionary become Fixture properties). ``torment.fixtures.register``
creates a subclass that the rest of the torment knows how to transform into
test cases that are compatible with nose.
**Examples**
Simplest Fixture subclass:
.. code-block:: python
class MyFixture(Fixture):
pass
Of course, to be useful the Fixture needs definitions of setup, run, and
check that actually test the code we're interested in checking:
.. code-block:: python
def add(x, y):
return x + y
class AddFixture(Fixture):
def run(self):
self.result = add(self.parameters['x'], self.parameters['y'])
def check(self):
self.context.assertEqual(self.result, self.expected)
This fixture uses a couple of conventions (not requirements):
#. ``self.parameters`` as a dictionary of parameter names to values
#. ``self.expected`` as the value we expect as a result
#. ``self.result`` as the holder inside the fixture between ``run`` and
``check``
This show-cases the ridiculity of using this testing framework for simple
functions that have few cases that require testing. This framework is
designed to allow many cases to be easily and declaritively defined.
The last component required to get these fixtures to actually run is hooking
them together with a context:
.. code-block:: python
from torment import contexts
class AddUnitTest(contexts.TestContext, metaclass = contexts.MetaContext):
fixture_classes = (
MyFixture,
AddFixture,
)
The context that wraps a Fixture subclass should eventually inherit from
TestContext (which inherits from ``unittest.TestCase`` and provides its assert
methods). In order for nose to find and execute this ``TestContext``, it
must have a name that contains Test.
**Properties**
* ``category``
* ``description`` (override)
* ``name`` (do **not** override)
**Methods To Override**
* ``__init__``
* ``check``
* ``initialize``
* ``run (required)``
* ``setup``
**Instance Variables**
:``context``: the ``torment.TestContext`` this case is running in which
provides the assertion methods of ``unittest.TestCase``.
'''
def __init__(self, context: 'torment.TestContext') -> None:
'''Create Fixture
Initializes the Fixture's context (can be changed like any other
property).
**Parameters**
:``context``: a subclass of ``torment.TestContext`` that provides
assertion methods and any other environmental information
for this test case
'''
self.context = context
@property
def category(self) -> str:
'''Fixture's category (the containing testing module name)
**Examples**
:module: test_torment.test_unit.test_fixtures.fixture_a44bc6dda6654b1395a8c2cbd55d964d
:category: fixtures
'''
logger.debug('dir(self.__module__): %s', dir(self.__module__))
return self.__module__.__name__.rsplit('.', 2)[-2].replace('test_', '')
@property
def description(self) -> str:
'''Test name in nose output (intended to be overridden).'''
return '{0.uuid.hex}—{1}'.format(self, self.context.module)
@property
def name(self) -> str:
'''Method name in nose runtime.'''
return 'test_' + self.__class__.__name__
def initialize(self) -> None:
'''Post-data population initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called during ``__init__`` and after properties have been populated by
``torment.fixtures.register``.
'''
pass
def setup(self) -> None:
'''Pre-run initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after properties have been populated by
``torment.fixtures.register``.
'''
pass
def check(self) -> None:
'''Check that run ran as expected.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after ``run`` and should be used to verify that run performed the
expected actions.
'''
pass
def _execute(self) -> None:
'''Run Fixture actions (setup, run, check).
Core test loop for Fixture. Executes setup, run, and check in order.
'''
if hasattr(self, '_last_resolver_exception'):
logger.warning('last exception from %s.%s:', self.__class__.__name__, self._last_resolver_exception[0], exc_info = self._last_resolver_exception[1])
self.setup()
self.run()
self.check()
class ErrorFixture(Fixture):
'''Common error checking for Fixture.
Intended as a mixin when registering a new Fixture (via register) that will
check an error case (one throwing an exception).
**Examples**
Using the AddFixture from the Examples in Fixture, we can create a Fixture
that handles (an obviously contrived) exception by either crafting a new
Fixture object or invoking register with the appropriate base classes.
New Fixture Object:
.. code-block:: python
class ErrorAddFixture(ErrorFixture, AddFixture):
pass
Via call to register:
.. code-block:: python
register(globals(), ( ErrorFixture, AddFixture, ), { … })
'''
@property
def description(self) -> str:
'''Test name in nose output (adds error reason as result portion).'''
return super().description + ' → {0.error}'.format(self)
def run(self) -> None:
'''Calls sibling with exception expectation.'''
with self.context.assertRaises(self.error.__class__) as error:
super().run()
self.exception = error.exception
@decorators.log
def of(fixture_classes: Iterable[type], context: Union[None, 'torment.TestContext'] = None) -> Iterable['torment.fixtures.Fixture']:
'''Obtain all Fixture objects of the provided classes.
**Parameters**
:``fixture_classes``: classes inheriting from ``torment.fixtures.Fixture``
:``context``: a ``torment.TestContext`` to initialize Fixtures with
**Return Value(s)**
Instantiated ``torment.fixtures.Fixture`` objects for each individual
fixture class that inherits from one of the provided classes.
'''
classes = list(copy.copy(fixture_classes))
fixtures = [] # type: Iterable[torment.fixtures.Fixture]
while len(classes):
current = classes.pop()
subclasses = current.__subclasses__()
if len(subclasses):
classes.extend(subclasses)
elif current not in fixture_classes:
fixtures.append(current(context))
return fixtures
def register(namespace, base_classes: Tuple[type], properties: Dict[str, Any]) -> None:
'''Register a Fixture class in namespace with the given properties.
Creates a Fixture class (not object) and inserts it into the provided
namespace. The properties is a dict but allows functions to reference other
properties and acts like a small DSL (domain specific language). This is
really just a declarative way to compose data about a test fixture and make
it repeatable.
Files calling this function are expected to house one or more Fixtures and
have a name that ends with a UUID without its hyphens. For example:
foo_38de9ceec5694c96ace90c9ca37e5bcb.py. This UUID is used to uniquely
track the Fixture through the test suite and allow Fixtures to scale without
concern.
**Parameters**
:``namespace``: dictionary to insert the generated class into
:``base_classes``: list of classes the new class should inherit
:``properties``: dictionary of properties with their values
Properties can have the following forms:
:functions: invoked with the Fixture as it's argument
:classes: instantiated without any arguments (unless it subclasses
``torment.fixtures.Fixture`` in which case it's passed context)
:literals: any standard python type (i.e. int, str, dict)
.. note::
function execution may error (this will be emitted as a logging event).
functions will continually be tried until they resolve or the same set
of functions is continually erroring. These functions that failed to
resolve are left in tact for later processing.
Properties by the following names also have defined behavior:
:description: added to the Fixture's description as an addendum
:error: must be a dictionary with three keys:
:class: class to instantiate (usually an exception)
:args: arguments to pass to class initialization
:kwargs: keyword arguments to pass to class initialization
:mocks: dictionary mapping mock symbols to corresponding values
Properties by the following names are reserved and should not be used:
* name
'''
# ensure we have a clean copy of the data
# and won't stomp on re-uses elsewhere in
# someone's code
props = copy.deepcopy(properties)
desc = props.pop('description', None) # type: Union[str, None]
caller_frame = inspect.stack()[1]
caller_file = caller_frame[1]
caller_module = inspect.getmodule(caller_frame[0])
my_uuid = uuid.UUID(os.path.basename(caller_file).replace('.py', '').rsplit('_', 1)[-1])
class_name = _unique_class_name(namespace, my_uuid)
@property
def description(self) -> str:
_ = super(self.__class__, self).description
if desc is not None:
_ += '—' + desc
return _
def __init__(self, context: 'torment.TestContext') -> None:
super(self.__class__, self).__init__(context)
functions = {}
for name, value in props.items():
if name == 'error':
self.error = value['class'](*value.get('args', ()), **value.get('kwargs', {}))
continue
if inspect.isclass(value):
if issubclass(value, Fixture):
value = value(self.context)
else:
value = value()
if inspect.isfunction(value):
functions[name] = value
continue
setattr(self, name, value)
_resolve_functions(functions, self)
self.initialize()
def setup(self) -> None:
if hasattr(self, 'mocks'):
logger.debug('self.mocks: %s', self.mocks)
for mock_symbol, mock_result in self.mocks.items():
if _find_mocker(mock_symbol, self.context)():
_prepare_mock(self.context, mock_symbol, **mock_result)
super(self.__class__, self).setup()
namespace[class_name] = type(class_name, base_classes, {
'description': description,
'__init__': __init__,
'__module__': caller_module,
'setup': setup,
'uuid': my_uuid,
})
def _find_mocker(symbol: str, context: 'torment.contexts.TestContext') -> Callable[[], bool]:
'''Find method within the context that mocks symbol.
Given a symbol (i.e. ``tornado.httpclient.AsyncHTTPClient.fetch``), find
the shortest ``mock_`` method that resembles the symbol. Resembles means
the lowercased and periods replaced with underscores.
If no match is found, a dummy function (only returns False) is returned.
**Parameters**
:``symbol``: the symbol to be located
:``context``: the search context
**Return Value(s)**
The method used to mock the symbol.
**Examples**
Assuming the symbol is ``tornado.httpclient.AsyncHTTPClient.fetch``, the
first of the following methods would be returned:
* ``mock_tornado``
* ``mock_tornado_httpclient``
* ``mock_tornado_httpclient_asynchttpclient``
* ``mock_tornado_httpclient_asynchttpclient_fetch``
'''
components = []
method = None
for component in symbol.split('.'):
components.append(component.lower())
name = '_'.join([ 'mock' ] + components)
if hasattr(context, name):
method = getattr(context, name)
break
if method is None:
logger.warn('no mocker for %s', symbol)
def noop(*args, **kwargs):
return False
method = noop
return method
def _resolve_functions(functions: Dict[str, Callable[[Any], Any]], fixture: Fixture) -> None:
'''Apply functions and collect values as properties on fixture.
Call functions and apply their values as properteis on fixture.
Functions will continue to get applied until no more functions resolve.
All unresolved functions are logged and the last exception to have
occurred is also logged. This function does not return but adds the
results to fixture directly.
**Parameters**
:``functions``: dict mapping function names (property names) to
callable functions
:``fixture``: Fixture to add values to
'''
exc_info = last_function = None
function_count = float('inf')
while function_count > len(functions):
function_count = len(functions)
for name, function in copy.copy(functions).items():
try:
setattr(fixture, name, copy.deepcopy(function(fixture)))
del functions[name]
except:
exc_info = sys.exc_info()
logger.debug('name: %s', name)
logger.debug('exc_info: %s', exc_info)
last_function = name
if len(functions):
logger.warning('unprocessed Fixture properties: %s', ','.join(functions.keys()))
logger.warning('last exception from %s.%s:', fixture.name, last_function, exc_info = exc_info)
setattr(fixture, '_last_resolver_exception', ( last_function, exc_info, ))
for name, function in copy.copy(functions).items():
setattr(fixture, name, function)
def _unique_class_name(namespace: Dict[str, Any], uuid: uuid.UUID) -> str:
'''Generate unique to namespace name for a class using uuid.
**Parameters**
:``namespace``: the namespace to verify uniqueness against
:``uuid``: the "unique" portion of the name
**Return Value(s)**
A unique string (in namespace) using uuid.
'''
count = 0
name = original_name = 'f_' + uuid.hex
while name in namespace:
count += 1
name = original_name + '_' + str(count)
return name
|
racker/torment
|
torment/fixtures/__init__.py
|
_find_mocker
|
python
|
def _find_mocker(symbol: str, context: 'torment.contexts.TestContext') -> Callable[[], bool]:
'''Find method within the context that mocks symbol.
Given a symbol (i.e. ``tornado.httpclient.AsyncHTTPClient.fetch``), find
the shortest ``mock_`` method that resembles the symbol. Resembles means
the lowercased and periods replaced with underscores.
If no match is found, a dummy function (only returns False) is returned.
**Parameters**
:``symbol``: the symbol to be located
:``context``: the search context
**Return Value(s)**
The method used to mock the symbol.
**Examples**
Assuming the symbol is ``tornado.httpclient.AsyncHTTPClient.fetch``, the
first of the following methods would be returned:
* ``mock_tornado``
* ``mock_tornado_httpclient``
* ``mock_tornado_httpclient_asynchttpclient``
* ``mock_tornado_httpclient_asynchttpclient_fetch``
'''
components = []
method = None
for component in symbol.split('.'):
components.append(component.lower())
name = '_'.join([ 'mock' ] + components)
if hasattr(context, name):
method = getattr(context, name)
break
if method is None:
logger.warn('no mocker for %s', symbol)
def noop(*args, **kwargs):
return False
method = noop
return method
|
Find method within the context that mocks symbol.
Given a symbol (i.e. ``tornado.httpclient.AsyncHTTPClient.fetch``), find
the shortest ``mock_`` method that resembles the symbol. Resembles means
the lowercased and periods replaced with underscores.
If no match is found, a dummy function (only returns False) is returned.
**Parameters**
:``symbol``: the symbol to be located
:``context``: the search context
**Return Value(s)**
The method used to mock the symbol.
**Examples**
Assuming the symbol is ``tornado.httpclient.AsyncHTTPClient.fetch``, the
first of the following methods would be returned:
* ``mock_tornado``
* ``mock_tornado_httpclient``
* ``mock_tornado_httpclient_asynchttpclient``
* ``mock_tornado_httpclient_asynchttpclient_fetch``
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/fixtures/__init__.py#L485-L534
| null |
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import inspect
import logging
import os
import sys
import typing # noqa (use mypy typing)
import uuid
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import Tuple
from typing import Union
from torment import decorators
logger = logging.getLogger(__name__)
class Fixture(object):
'''Collection of data and actions for a particular test case.
Intended as a base class for custom fixtures. Fixture provides an API
that simplifies writing scalable test cases.
Creating Fixture objects is broken into two parts. This keeps the logic for
a class of test cases separate from the data for particular cases while
allowing re-use of the data provided by a fixture.
The first part of Fixture object creation is crafting a proper subclass that
implements the necessary actions:
:``__init__``: pre-data population initialization
:``initialize``: post-data population initialization
:``setup``: pre-run setup
:``run``: REQUIRED—run code under test
:``check``: verify results of run
.. note::
``initialize`` is run during ``__init__`` and setup is run after;
otherwise, they serve the same function. The split allows different
actions to occur in different areas of the class heirarchy and generally
isn't necessary.
By default all actions are noops and simply do nothing but run is required.
These actions allow complex class hierarchies to provide nuanced testing
behavior. For example, Fixture provides the absolute bare minimum to test
any Fixture and no more. By adding a set of subclasses, common
initialization and checks can be performed at one layer while specific run
decisions and checks can happen at a lower layer.
The second part of Fixture object creation is crafting the data. Tying data
to a Fixture class should be done with ``torment.fixtures.register``. It
provides a declarative interface that binds a dictionary to a Fixture (keys
of dictionary become Fixture properties). ``torment.fixtures.register``
creates a subclass that the rest of the torment knows how to transform into
test cases that are compatible with nose.
**Examples**
Simplest Fixture subclass:
.. code-block:: python
class MyFixture(Fixture):
pass
Of course, to be useful the Fixture needs definitions of setup, run, and
check that actually test the code we're interested in checking:
.. code-block:: python
def add(x, y):
return x + y
class AddFixture(Fixture):
def run(self):
self.result = add(self.parameters['x'], self.parameters['y'])
def check(self):
self.context.assertEqual(self.result, self.expected)
This fixture uses a couple of conventions (not requirements):
#. ``self.parameters`` as a dictionary of parameter names to values
#. ``self.expected`` as the value we expect as a result
#. ``self.result`` as the holder inside the fixture between ``run`` and
``check``
This show-cases the ridiculity of using this testing framework for simple
functions that have few cases that require testing. This framework is
designed to allow many cases to be easily and declaritively defined.
The last component required to get these fixtures to actually run is hooking
them together with a context:
.. code-block:: python
from torment import contexts
class AddUnitTest(contexts.TestContext, metaclass = contexts.MetaContext):
fixture_classes = (
MyFixture,
AddFixture,
)
The context that wraps a Fixture subclass should eventually inherit from
TestContext (which inherits from ``unittest.TestCase`` and provides its assert
methods). In order for nose to find and execute this ``TestContext``, it
must have a name that contains Test.
**Properties**
* ``category``
* ``description`` (override)
* ``name`` (do **not** override)
**Methods To Override**
* ``__init__``
* ``check``
* ``initialize``
* ``run (required)``
* ``setup``
**Instance Variables**
:``context``: the ``torment.TestContext`` this case is running in which
provides the assertion methods of ``unittest.TestCase``.
'''
def __init__(self, context: 'torment.TestContext') -> None:
'''Create Fixture
Initializes the Fixture's context (can be changed like any other
property).
**Parameters**
:``context``: a subclass of ``torment.TestContext`` that provides
assertion methods and any other environmental information
for this test case
'''
self.context = context
@property
def category(self) -> str:
'''Fixture's category (the containing testing module name)
**Examples**
:module: test_torment.test_unit.test_fixtures.fixture_a44bc6dda6654b1395a8c2cbd55d964d
:category: fixtures
'''
logger.debug('dir(self.__module__): %s', dir(self.__module__))
return self.__module__.__name__.rsplit('.', 2)[-2].replace('test_', '')
@property
def description(self) -> str:
'''Test name in nose output (intended to be overridden).'''
return '{0.uuid.hex}—{1}'.format(self, self.context.module)
@property
def name(self) -> str:
'''Method name in nose runtime.'''
return 'test_' + self.__class__.__name__
def initialize(self) -> None:
'''Post-data population initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called during ``__init__`` and after properties have been populated by
``torment.fixtures.register``.
'''
pass
def setup(self) -> None:
'''Pre-run initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after properties have been populated by
``torment.fixtures.register``.
'''
pass
def check(self) -> None:
'''Check that run ran as expected.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after ``run`` and should be used to verify that run performed the
expected actions.
'''
pass
def _execute(self) -> None:
'''Run Fixture actions (setup, run, check).
Core test loop for Fixture. Executes setup, run, and check in order.
'''
if hasattr(self, '_last_resolver_exception'):
logger.warning('last exception from %s.%s:', self.__class__.__name__, self._last_resolver_exception[0], exc_info = self._last_resolver_exception[1])
self.setup()
self.run()
self.check()
class ErrorFixture(Fixture):
'''Common error checking for Fixture.
Intended as a mixin when registering a new Fixture (via register) that will
check an error case (one throwing an exception).
**Examples**
Using the AddFixture from the Examples in Fixture, we can create a Fixture
that handles (an obviously contrived) exception by either crafting a new
Fixture object or invoking register with the appropriate base classes.
New Fixture Object:
.. code-block:: python
class ErrorAddFixture(ErrorFixture, AddFixture):
pass
Via call to register:
.. code-block:: python
register(globals(), ( ErrorFixture, AddFixture, ), { … })
'''
@property
def description(self) -> str:
'''Test name in nose output (adds error reason as result portion).'''
return super().description + ' → {0.error}'.format(self)
def run(self) -> None:
'''Calls sibling with exception expectation.'''
with self.context.assertRaises(self.error.__class__) as error:
super().run()
self.exception = error.exception
@decorators.log
def of(fixture_classes: Iterable[type], context: Union[None, 'torment.TestContext'] = None) -> Iterable['torment.fixtures.Fixture']:
'''Obtain all Fixture objects of the provided classes.
**Parameters**
:``fixture_classes``: classes inheriting from ``torment.fixtures.Fixture``
:``context``: a ``torment.TestContext`` to initialize Fixtures with
**Return Value(s)**
Instantiated ``torment.fixtures.Fixture`` objects for each individual
fixture class that inherits from one of the provided classes.
'''
classes = list(copy.copy(fixture_classes))
fixtures = [] # type: Iterable[torment.fixtures.Fixture]
while len(classes):
current = classes.pop()
subclasses = current.__subclasses__()
if len(subclasses):
classes.extend(subclasses)
elif current not in fixture_classes:
fixtures.append(current(context))
return fixtures
def register(namespace, base_classes: Tuple[type], properties: Dict[str, Any]) -> None:
'''Register a Fixture class in namespace with the given properties.
Creates a Fixture class (not object) and inserts it into the provided
namespace. The properties is a dict but allows functions to reference other
properties and acts like a small DSL (domain specific language). This is
really just a declarative way to compose data about a test fixture and make
it repeatable.
Files calling this function are expected to house one or more Fixtures and
have a name that ends with a UUID without its hyphens. For example:
foo_38de9ceec5694c96ace90c9ca37e5bcb.py. This UUID is used to uniquely
track the Fixture through the test suite and allow Fixtures to scale without
concern.
**Parameters**
:``namespace``: dictionary to insert the generated class into
:``base_classes``: list of classes the new class should inherit
:``properties``: dictionary of properties with their values
Properties can have the following forms:
:functions: invoked with the Fixture as it's argument
:classes: instantiated without any arguments (unless it subclasses
``torment.fixtures.Fixture`` in which case it's passed context)
:literals: any standard python type (i.e. int, str, dict)
.. note::
function execution may error (this will be emitted as a logging event).
functions will continually be tried until they resolve or the same set
of functions is continually erroring. These functions that failed to
resolve are left in tact for later processing.
Properties by the following names also have defined behavior:
:description: added to the Fixture's description as an addendum
:error: must be a dictionary with three keys:
:class: class to instantiate (usually an exception)
:args: arguments to pass to class initialization
:kwargs: keyword arguments to pass to class initialization
:mocks: dictionary mapping mock symbols to corresponding values
Properties by the following names are reserved and should not be used:
* name
'''
# ensure we have a clean copy of the data
# and won't stomp on re-uses elsewhere in
# someone's code
props = copy.deepcopy(properties)
desc = props.pop('description', None) # type: Union[str, None]
caller_frame = inspect.stack()[1]
caller_file = caller_frame[1]
caller_module = inspect.getmodule(caller_frame[0])
my_uuid = uuid.UUID(os.path.basename(caller_file).replace('.py', '').rsplit('_', 1)[-1])
class_name = _unique_class_name(namespace, my_uuid)
@property
def description(self) -> str:
_ = super(self.__class__, self).description
if desc is not None:
_ += '—' + desc
return _
def __init__(self, context: 'torment.TestContext') -> None:
super(self.__class__, self).__init__(context)
functions = {}
for name, value in props.items():
if name == 'error':
self.error = value['class'](*value.get('args', ()), **value.get('kwargs', {}))
continue
if inspect.isclass(value):
if issubclass(value, Fixture):
value = value(self.context)
else:
value = value()
if inspect.isfunction(value):
functions[name] = value
continue
setattr(self, name, value)
_resolve_functions(functions, self)
self.initialize()
def setup(self) -> None:
if hasattr(self, 'mocks'):
logger.debug('self.mocks: %s', self.mocks)
for mock_symbol, mock_result in self.mocks.items():
if _find_mocker(mock_symbol, self.context)():
_prepare_mock(self.context, mock_symbol, **mock_result)
super(self.__class__, self).setup()
namespace[class_name] = type(class_name, base_classes, {
'description': description,
'__init__': __init__,
'__module__': caller_module,
'setup': setup,
'uuid': my_uuid,
})
def _prepare_mock(context: 'torment.contexts.TestContext', symbol: str, return_value = None, side_effect = None) -> None:
'''Sets return value or side effect of symbol's mock in context.
.. seealso:: :py:func:`_find_mocker`
**Parameters**
:``context``: the search context
:``symbol``: the symbol to be located
:``return_value``: pass through to mock ``return_value``
:``side_effect``: pass through to mock ``side_effect``
'''
methods = symbol.split('.')
index = len(methods)
mock = None
while index > 0:
name = 'mocked_' + '_'.join(methods[:index]).lower()
logger.debug('name: %s', name)
if hasattr(context, name):
mock = getattr(context, name)
break
index -= 1
logger.debug('mock: %s', mock)
if mock is not None:
mock = functools.reduce(getattr, methods[index:], mock)
logger.debug('mock: %s', mock)
if return_value is not None:
mock.return_value = return_value
if side_effect is not None:
mock.side_effect = side_effect
mock.reset_mock()
def _resolve_functions(functions: Dict[str, Callable[[Any], Any]], fixture: Fixture) -> None:
'''Apply functions and collect values as properties on fixture.
Call functions and apply their values as properteis on fixture.
Functions will continue to get applied until no more functions resolve.
All unresolved functions are logged and the last exception to have
occurred is also logged. This function does not return but adds the
results to fixture directly.
**Parameters**
:``functions``: dict mapping function names (property names) to
callable functions
:``fixture``: Fixture to add values to
'''
exc_info = last_function = None
function_count = float('inf')
while function_count > len(functions):
function_count = len(functions)
for name, function in copy.copy(functions).items():
try:
setattr(fixture, name, copy.deepcopy(function(fixture)))
del functions[name]
except:
exc_info = sys.exc_info()
logger.debug('name: %s', name)
logger.debug('exc_info: %s', exc_info)
last_function = name
if len(functions):
logger.warning('unprocessed Fixture properties: %s', ','.join(functions.keys()))
logger.warning('last exception from %s.%s:', fixture.name, last_function, exc_info = exc_info)
setattr(fixture, '_last_resolver_exception', ( last_function, exc_info, ))
for name, function in copy.copy(functions).items():
setattr(fixture, name, function)
def _unique_class_name(namespace: Dict[str, Any], uuid: uuid.UUID) -> str:
'''Generate unique to namespace name for a class using uuid.
**Parameters**
:``namespace``: the namespace to verify uniqueness against
:``uuid``: the "unique" portion of the name
**Return Value(s)**
A unique string (in namespace) using uuid.
'''
count = 0
name = original_name = 'f_' + uuid.hex
while name in namespace:
count += 1
name = original_name + '_' + str(count)
return name
|
racker/torment
|
torment/fixtures/__init__.py
|
_resolve_functions
|
python
|
def _resolve_functions(functions: Dict[str, Callable[[Any], Any]], fixture: Fixture) -> None:
'''Apply functions and collect values as properties on fixture.
Call functions and apply their values as properteis on fixture.
Functions will continue to get applied until no more functions resolve.
All unresolved functions are logged and the last exception to have
occurred is also logged. This function does not return but adds the
results to fixture directly.
**Parameters**
:``functions``: dict mapping function names (property names) to
callable functions
:``fixture``: Fixture to add values to
'''
exc_info = last_function = None
function_count = float('inf')
while function_count > len(functions):
function_count = len(functions)
for name, function in copy.copy(functions).items():
try:
setattr(fixture, name, copy.deepcopy(function(fixture)))
del functions[name]
except:
exc_info = sys.exc_info()
logger.debug('name: %s', name)
logger.debug('exc_info: %s', exc_info)
last_function = name
if len(functions):
logger.warning('unprocessed Fixture properties: %s', ','.join(functions.keys()))
logger.warning('last exception from %s.%s:', fixture.name, last_function, exc_info = exc_info)
setattr(fixture, '_last_resolver_exception', ( last_function, exc_info, ))
for name, function in copy.copy(functions).items():
setattr(fixture, name, function)
|
Apply functions and collect values as properties on fixture.
Call functions and apply their values as properteis on fixture.
Functions will continue to get applied until no more functions resolve.
All unresolved functions are logged and the last exception to have
occurred is also logged. This function does not return but adds the
results to fixture directly.
**Parameters**
:``functions``: dict mapping function names (property names) to
callable functions
:``fixture``: Fixture to add values to
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/fixtures/__init__.py#L537-L579
| null |
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import inspect
import logging
import os
import sys
import typing # noqa (use mypy typing)
import uuid
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import Tuple
from typing import Union
from torment import decorators
logger = logging.getLogger(__name__)
class Fixture(object):
'''Collection of data and actions for a particular test case.
Intended as a base class for custom fixtures. Fixture provides an API
that simplifies writing scalable test cases.
Creating Fixture objects is broken into two parts. This keeps the logic for
a class of test cases separate from the data for particular cases while
allowing re-use of the data provided by a fixture.
The first part of Fixture object creation is crafting a proper subclass that
implements the necessary actions:
:``__init__``: pre-data population initialization
:``initialize``: post-data population initialization
:``setup``: pre-run setup
:``run``: REQUIRED—run code under test
:``check``: verify results of run
.. note::
``initialize`` is run during ``__init__`` and setup is run after;
otherwise, they serve the same function. The split allows different
actions to occur in different areas of the class heirarchy and generally
isn't necessary.
By default all actions are noops and simply do nothing but run is required.
These actions allow complex class hierarchies to provide nuanced testing
behavior. For example, Fixture provides the absolute bare minimum to test
any Fixture and no more. By adding a set of subclasses, common
initialization and checks can be performed at one layer while specific run
decisions and checks can happen at a lower layer.
The second part of Fixture object creation is crafting the data. Tying data
to a Fixture class should be done with ``torment.fixtures.register``. It
provides a declarative interface that binds a dictionary to a Fixture (keys
of dictionary become Fixture properties). ``torment.fixtures.register``
creates a subclass that the rest of the torment knows how to transform into
test cases that are compatible with nose.
**Examples**
Simplest Fixture subclass:
.. code-block:: python
class MyFixture(Fixture):
pass
Of course, to be useful the Fixture needs definitions of setup, run, and
check that actually test the code we're interested in checking:
.. code-block:: python
def add(x, y):
return x + y
class AddFixture(Fixture):
def run(self):
self.result = add(self.parameters['x'], self.parameters['y'])
def check(self):
self.context.assertEqual(self.result, self.expected)
This fixture uses a couple of conventions (not requirements):
#. ``self.parameters`` as a dictionary of parameter names to values
#. ``self.expected`` as the value we expect as a result
#. ``self.result`` as the holder inside the fixture between ``run`` and
``check``
This show-cases the ridiculity of using this testing framework for simple
functions that have few cases that require testing. This framework is
designed to allow many cases to be easily and declaritively defined.
The last component required to get these fixtures to actually run is hooking
them together with a context:
.. code-block:: python
from torment import contexts
class AddUnitTest(contexts.TestContext, metaclass = contexts.MetaContext):
fixture_classes = (
MyFixture,
AddFixture,
)
The context that wraps a Fixture subclass should eventually inherit from
TestContext (which inherits from ``unittest.TestCase`` and provides its assert
methods). In order for nose to find and execute this ``TestContext``, it
must have a name that contains Test.
**Properties**
* ``category``
* ``description`` (override)
* ``name`` (do **not** override)
**Methods To Override**
* ``__init__``
* ``check``
* ``initialize``
* ``run (required)``
* ``setup``
**Instance Variables**
:``context``: the ``torment.TestContext`` this case is running in which
provides the assertion methods of ``unittest.TestCase``.
'''
def __init__(self, context: 'torment.TestContext') -> None:
'''Create Fixture
Initializes the Fixture's context (can be changed like any other
property).
**Parameters**
:``context``: a subclass of ``torment.TestContext`` that provides
assertion methods and any other environmental information
for this test case
'''
self.context = context
@property
def category(self) -> str:
'''Fixture's category (the containing testing module name)
**Examples**
:module: test_torment.test_unit.test_fixtures.fixture_a44bc6dda6654b1395a8c2cbd55d964d
:category: fixtures
'''
logger.debug('dir(self.__module__): %s', dir(self.__module__))
return self.__module__.__name__.rsplit('.', 2)[-2].replace('test_', '')
@property
def description(self) -> str:
'''Test name in nose output (intended to be overridden).'''
return '{0.uuid.hex}—{1}'.format(self, self.context.module)
@property
def name(self) -> str:
'''Method name in nose runtime.'''
return 'test_' + self.__class__.__name__
def initialize(self) -> None:
'''Post-data population initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called during ``__init__`` and after properties have been populated by
``torment.fixtures.register``.
'''
pass
def setup(self) -> None:
'''Pre-run initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after properties have been populated by
``torment.fixtures.register``.
'''
pass
def check(self) -> None:
'''Check that run ran as expected.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after ``run`` and should be used to verify that run performed the
expected actions.
'''
pass
def _execute(self) -> None:
'''Run Fixture actions (setup, run, check).
Core test loop for Fixture. Executes setup, run, and check in order.
'''
if hasattr(self, '_last_resolver_exception'):
logger.warning('last exception from %s.%s:', self.__class__.__name__, self._last_resolver_exception[0], exc_info = self._last_resolver_exception[1])
self.setup()
self.run()
self.check()
class ErrorFixture(Fixture):
'''Common error checking for Fixture.
Intended as a mixin when registering a new Fixture (via register) that will
check an error case (one throwing an exception).
**Examples**
Using the AddFixture from the Examples in Fixture, we can create a Fixture
that handles (an obviously contrived) exception by either crafting a new
Fixture object or invoking register with the appropriate base classes.
New Fixture Object:
.. code-block:: python
class ErrorAddFixture(ErrorFixture, AddFixture):
pass
Via call to register:
.. code-block:: python
register(globals(), ( ErrorFixture, AddFixture, ), { … })
'''
@property
def description(self) -> str:
'''Test name in nose output (adds error reason as result portion).'''
return super().description + ' → {0.error}'.format(self)
def run(self) -> None:
'''Calls sibling with exception expectation.'''
with self.context.assertRaises(self.error.__class__) as error:
super().run()
self.exception = error.exception
@decorators.log
def of(fixture_classes: Iterable[type], context: Union[None, 'torment.TestContext'] = None) -> Iterable['torment.fixtures.Fixture']:
'''Obtain all Fixture objects of the provided classes.
**Parameters**
:``fixture_classes``: classes inheriting from ``torment.fixtures.Fixture``
:``context``: a ``torment.TestContext`` to initialize Fixtures with
**Return Value(s)**
Instantiated ``torment.fixtures.Fixture`` objects for each individual
fixture class that inherits from one of the provided classes.
'''
classes = list(copy.copy(fixture_classes))
fixtures = [] # type: Iterable[torment.fixtures.Fixture]
while len(classes):
current = classes.pop()
subclasses = current.__subclasses__()
if len(subclasses):
classes.extend(subclasses)
elif current not in fixture_classes:
fixtures.append(current(context))
return fixtures
def register(namespace, base_classes: Tuple[type], properties: Dict[str, Any]) -> None:
'''Register a Fixture class in namespace with the given properties.
Creates a Fixture class (not object) and inserts it into the provided
namespace. The properties is a dict but allows functions to reference other
properties and acts like a small DSL (domain specific language). This is
really just a declarative way to compose data about a test fixture and make
it repeatable.
Files calling this function are expected to house one or more Fixtures and
have a name that ends with a UUID without its hyphens. For example:
foo_38de9ceec5694c96ace90c9ca37e5bcb.py. This UUID is used to uniquely
track the Fixture through the test suite and allow Fixtures to scale without
concern.
**Parameters**
:``namespace``: dictionary to insert the generated class into
:``base_classes``: list of classes the new class should inherit
:``properties``: dictionary of properties with their values
Properties can have the following forms:
:functions: invoked with the Fixture as it's argument
:classes: instantiated without any arguments (unless it subclasses
``torment.fixtures.Fixture`` in which case it's passed context)
:literals: any standard python type (i.e. int, str, dict)
.. note::
function execution may error (this will be emitted as a logging event).
functions will continually be tried until they resolve or the same set
of functions is continually erroring. These functions that failed to
resolve are left in tact for later processing.
Properties by the following names also have defined behavior:
:description: added to the Fixture's description as an addendum
:error: must be a dictionary with three keys:
:class: class to instantiate (usually an exception)
:args: arguments to pass to class initialization
:kwargs: keyword arguments to pass to class initialization
:mocks: dictionary mapping mock symbols to corresponding values
Properties by the following names are reserved and should not be used:
* name
'''
# ensure we have a clean copy of the data
# and won't stomp on re-uses elsewhere in
# someone's code
props = copy.deepcopy(properties)
desc = props.pop('description', None) # type: Union[str, None]
caller_frame = inspect.stack()[1]
caller_file = caller_frame[1]
caller_module = inspect.getmodule(caller_frame[0])
my_uuid = uuid.UUID(os.path.basename(caller_file).replace('.py', '').rsplit('_', 1)[-1])
class_name = _unique_class_name(namespace, my_uuid)
@property
def description(self) -> str:
_ = super(self.__class__, self).description
if desc is not None:
_ += '—' + desc
return _
def __init__(self, context: 'torment.TestContext') -> None:
super(self.__class__, self).__init__(context)
functions = {}
for name, value in props.items():
if name == 'error':
self.error = value['class'](*value.get('args', ()), **value.get('kwargs', {}))
continue
if inspect.isclass(value):
if issubclass(value, Fixture):
value = value(self.context)
else:
value = value()
if inspect.isfunction(value):
functions[name] = value
continue
setattr(self, name, value)
_resolve_functions(functions, self)
self.initialize()
def setup(self) -> None:
if hasattr(self, 'mocks'):
logger.debug('self.mocks: %s', self.mocks)
for mock_symbol, mock_result in self.mocks.items():
if _find_mocker(mock_symbol, self.context)():
_prepare_mock(self.context, mock_symbol, **mock_result)
super(self.__class__, self).setup()
namespace[class_name] = type(class_name, base_classes, {
'description': description,
'__init__': __init__,
'__module__': caller_module,
'setup': setup,
'uuid': my_uuid,
})
def _prepare_mock(context: 'torment.contexts.TestContext', symbol: str, return_value = None, side_effect = None) -> None:
'''Sets return value or side effect of symbol's mock in context.
.. seealso:: :py:func:`_find_mocker`
**Parameters**
:``context``: the search context
:``symbol``: the symbol to be located
:``return_value``: pass through to mock ``return_value``
:``side_effect``: pass through to mock ``side_effect``
'''
methods = symbol.split('.')
index = len(methods)
mock = None
while index > 0:
name = 'mocked_' + '_'.join(methods[:index]).lower()
logger.debug('name: %s', name)
if hasattr(context, name):
mock = getattr(context, name)
break
index -= 1
logger.debug('mock: %s', mock)
if mock is not None:
mock = functools.reduce(getattr, methods[index:], mock)
logger.debug('mock: %s', mock)
if return_value is not None:
mock.return_value = return_value
if side_effect is not None:
mock.side_effect = side_effect
mock.reset_mock()
def _find_mocker(symbol: str, context: 'torment.contexts.TestContext') -> Callable[[], bool]:
'''Find method within the context that mocks symbol.
Given a symbol (i.e. ``tornado.httpclient.AsyncHTTPClient.fetch``), find
the shortest ``mock_`` method that resembles the symbol. Resembles means
the lowercased and periods replaced with underscores.
If no match is found, a dummy function (only returns False) is returned.
**Parameters**
:``symbol``: the symbol to be located
:``context``: the search context
**Return Value(s)**
The method used to mock the symbol.
**Examples**
Assuming the symbol is ``tornado.httpclient.AsyncHTTPClient.fetch``, the
first of the following methods would be returned:
* ``mock_tornado``
* ``mock_tornado_httpclient``
* ``mock_tornado_httpclient_asynchttpclient``
* ``mock_tornado_httpclient_asynchttpclient_fetch``
'''
components = []
method = None
for component in symbol.split('.'):
components.append(component.lower())
name = '_'.join([ 'mock' ] + components)
if hasattr(context, name):
method = getattr(context, name)
break
if method is None:
logger.warn('no mocker for %s', symbol)
def noop(*args, **kwargs):
return False
method = noop
return method
def _unique_class_name(namespace: Dict[str, Any], uuid: uuid.UUID) -> str:
'''Generate unique to namespace name for a class using uuid.
**Parameters**
:``namespace``: the namespace to verify uniqueness against
:``uuid``: the "unique" portion of the name
**Return Value(s)**
A unique string (in namespace) using uuid.
'''
count = 0
name = original_name = 'f_' + uuid.hex
while name in namespace:
count += 1
name = original_name + '_' + str(count)
return name
|
racker/torment
|
torment/fixtures/__init__.py
|
_unique_class_name
|
python
|
def _unique_class_name(namespace: Dict[str, Any], uuid: uuid.UUID) -> str:
'''Generate unique to namespace name for a class using uuid.
**Parameters**
:``namespace``: the namespace to verify uniqueness against
:``uuid``: the "unique" portion of the name
**Return Value(s)**
A unique string (in namespace) using uuid.
'''
count = 0
name = original_name = 'f_' + uuid.hex
while name in namespace:
count += 1
name = original_name + '_' + str(count)
return name
|
Generate unique to namespace name for a class using uuid.
**Parameters**
:``namespace``: the namespace to verify uniqueness against
:``uuid``: the "unique" portion of the name
**Return Value(s)**
A unique string (in namespace) using uuid.
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/fixtures/__init__.py#L582-L603
| null |
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import inspect
import logging
import os
import sys
import typing # noqa (use mypy typing)
import uuid
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import Tuple
from typing import Union
from torment import decorators
logger = logging.getLogger(__name__)
class Fixture(object):
'''Collection of data and actions for a particular test case.
Intended as a base class for custom fixtures. Fixture provides an API
that simplifies writing scalable test cases.
Creating Fixture objects is broken into two parts. This keeps the logic for
a class of test cases separate from the data for particular cases while
allowing re-use of the data provided by a fixture.
The first part of Fixture object creation is crafting a proper subclass that
implements the necessary actions:
:``__init__``: pre-data population initialization
:``initialize``: post-data population initialization
:``setup``: pre-run setup
:``run``: REQUIRED—run code under test
:``check``: verify results of run
.. note::
``initialize`` is run during ``__init__`` and setup is run after;
otherwise, they serve the same function. The split allows different
actions to occur in different areas of the class heirarchy and generally
isn't necessary.
By default all actions are noops and simply do nothing but run is required.
These actions allow complex class hierarchies to provide nuanced testing
behavior. For example, Fixture provides the absolute bare minimum to test
any Fixture and no more. By adding a set of subclasses, common
initialization and checks can be performed at one layer while specific run
decisions and checks can happen at a lower layer.
The second part of Fixture object creation is crafting the data. Tying data
to a Fixture class should be done with ``torment.fixtures.register``. It
provides a declarative interface that binds a dictionary to a Fixture (keys
of dictionary become Fixture properties). ``torment.fixtures.register``
creates a subclass that the rest of the torment knows how to transform into
test cases that are compatible with nose.
**Examples**
Simplest Fixture subclass:
.. code-block:: python
class MyFixture(Fixture):
pass
Of course, to be useful the Fixture needs definitions of setup, run, and
check that actually test the code we're interested in checking:
.. code-block:: python
def add(x, y):
return x + y
class AddFixture(Fixture):
def run(self):
self.result = add(self.parameters['x'], self.parameters['y'])
def check(self):
self.context.assertEqual(self.result, self.expected)
This fixture uses a couple of conventions (not requirements):
#. ``self.parameters`` as a dictionary of parameter names to values
#. ``self.expected`` as the value we expect as a result
#. ``self.result`` as the holder inside the fixture between ``run`` and
``check``
This show-cases the ridiculity of using this testing framework for simple
functions that have few cases that require testing. This framework is
designed to allow many cases to be easily and declaritively defined.
The last component required to get these fixtures to actually run is hooking
them together with a context:
.. code-block:: python
from torment import contexts
class AddUnitTest(contexts.TestContext, metaclass = contexts.MetaContext):
fixture_classes = (
MyFixture,
AddFixture,
)
The context that wraps a Fixture subclass should eventually inherit from
TestContext (which inherits from ``unittest.TestCase`` and provides its assert
methods). In order for nose to find and execute this ``TestContext``, it
must have a name that contains Test.
**Properties**
* ``category``
* ``description`` (override)
* ``name`` (do **not** override)
**Methods To Override**
* ``__init__``
* ``check``
* ``initialize``
* ``run (required)``
* ``setup``
**Instance Variables**
:``context``: the ``torment.TestContext`` this case is running in which
provides the assertion methods of ``unittest.TestCase``.
'''
def __init__(self, context: 'torment.TestContext') -> None:
'''Create Fixture
Initializes the Fixture's context (can be changed like any other
property).
**Parameters**
:``context``: a subclass of ``torment.TestContext`` that provides
assertion methods and any other environmental information
for this test case
'''
self.context = context
@property
def category(self) -> str:
'''Fixture's category (the containing testing module name)
**Examples**
:module: test_torment.test_unit.test_fixtures.fixture_a44bc6dda6654b1395a8c2cbd55d964d
:category: fixtures
'''
logger.debug('dir(self.__module__): %s', dir(self.__module__))
return self.__module__.__name__.rsplit('.', 2)[-2].replace('test_', '')
@property
def description(self) -> str:
'''Test name in nose output (intended to be overridden).'''
return '{0.uuid.hex}—{1}'.format(self, self.context.module)
@property
def name(self) -> str:
'''Method name in nose runtime.'''
return 'test_' + self.__class__.__name__
def initialize(self) -> None:
'''Post-data population initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called during ``__init__`` and after properties have been populated by
``torment.fixtures.register``.
'''
pass
def setup(self) -> None:
'''Pre-run initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after properties have been populated by
``torment.fixtures.register``.
'''
pass
def check(self) -> None:
'''Check that run ran as expected.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after ``run`` and should be used to verify that run performed the
expected actions.
'''
pass
def _execute(self) -> None:
'''Run Fixture actions (setup, run, check).
Core test loop for Fixture. Executes setup, run, and check in order.
'''
if hasattr(self, '_last_resolver_exception'):
logger.warning('last exception from %s.%s:', self.__class__.__name__, self._last_resolver_exception[0], exc_info = self._last_resolver_exception[1])
self.setup()
self.run()
self.check()
class ErrorFixture(Fixture):
'''Common error checking for Fixture.
Intended as a mixin when registering a new Fixture (via register) that will
check an error case (one throwing an exception).
**Examples**
Using the AddFixture from the Examples in Fixture, we can create a Fixture
that handles (an obviously contrived) exception by either crafting a new
Fixture object or invoking register with the appropriate base classes.
New Fixture Object:
.. code-block:: python
class ErrorAddFixture(ErrorFixture, AddFixture):
pass
Via call to register:
.. code-block:: python
register(globals(), ( ErrorFixture, AddFixture, ), { … })
'''
@property
def description(self) -> str:
'''Test name in nose output (adds error reason as result portion).'''
return super().description + ' → {0.error}'.format(self)
def run(self) -> None:
'''Calls sibling with exception expectation.'''
with self.context.assertRaises(self.error.__class__) as error:
super().run()
self.exception = error.exception
@decorators.log
def of(fixture_classes: Iterable[type], context: Union[None, 'torment.TestContext'] = None) -> Iterable['torment.fixtures.Fixture']:
'''Obtain all Fixture objects of the provided classes.
**Parameters**
:``fixture_classes``: classes inheriting from ``torment.fixtures.Fixture``
:``context``: a ``torment.TestContext`` to initialize Fixtures with
**Return Value(s)**
Instantiated ``torment.fixtures.Fixture`` objects for each individual
fixture class that inherits from one of the provided classes.
'''
classes = list(copy.copy(fixture_classes))
fixtures = [] # type: Iterable[torment.fixtures.Fixture]
while len(classes):
current = classes.pop()
subclasses = current.__subclasses__()
if len(subclasses):
classes.extend(subclasses)
elif current not in fixture_classes:
fixtures.append(current(context))
return fixtures
def register(namespace, base_classes: Tuple[type], properties: Dict[str, Any]) -> None:
'''Register a Fixture class in namespace with the given properties.
Creates a Fixture class (not object) and inserts it into the provided
namespace. The properties is a dict but allows functions to reference other
properties and acts like a small DSL (domain specific language). This is
really just a declarative way to compose data about a test fixture and make
it repeatable.
Files calling this function are expected to house one or more Fixtures and
have a name that ends with a UUID without its hyphens. For example:
foo_38de9ceec5694c96ace90c9ca37e5bcb.py. This UUID is used to uniquely
track the Fixture through the test suite and allow Fixtures to scale without
concern.
**Parameters**
:``namespace``: dictionary to insert the generated class into
:``base_classes``: list of classes the new class should inherit
:``properties``: dictionary of properties with their values
Properties can have the following forms:
:functions: invoked with the Fixture as it's argument
:classes: instantiated without any arguments (unless it subclasses
``torment.fixtures.Fixture`` in which case it's passed context)
:literals: any standard python type (i.e. int, str, dict)
.. note::
function execution may error (this will be emitted as a logging event).
functions will continually be tried until they resolve or the same set
of functions is continually erroring. These functions that failed to
resolve are left in tact for later processing.
Properties by the following names also have defined behavior:
:description: added to the Fixture's description as an addendum
:error: must be a dictionary with three keys:
:class: class to instantiate (usually an exception)
:args: arguments to pass to class initialization
:kwargs: keyword arguments to pass to class initialization
:mocks: dictionary mapping mock symbols to corresponding values
Properties by the following names are reserved and should not be used:
* name
'''
# ensure we have a clean copy of the data
# and won't stomp on re-uses elsewhere in
# someone's code
props = copy.deepcopy(properties)
desc = props.pop('description', None) # type: Union[str, None]
caller_frame = inspect.stack()[1]
caller_file = caller_frame[1]
caller_module = inspect.getmodule(caller_frame[0])
my_uuid = uuid.UUID(os.path.basename(caller_file).replace('.py', '').rsplit('_', 1)[-1])
class_name = _unique_class_name(namespace, my_uuid)
@property
def description(self) -> str:
_ = super(self.__class__, self).description
if desc is not None:
_ += '—' + desc
return _
def __init__(self, context: 'torment.TestContext') -> None:
super(self.__class__, self).__init__(context)
functions = {}
for name, value in props.items():
if name == 'error':
self.error = value['class'](*value.get('args', ()), **value.get('kwargs', {}))
continue
if inspect.isclass(value):
if issubclass(value, Fixture):
value = value(self.context)
else:
value = value()
if inspect.isfunction(value):
functions[name] = value
continue
setattr(self, name, value)
_resolve_functions(functions, self)
self.initialize()
def setup(self) -> None:
if hasattr(self, 'mocks'):
logger.debug('self.mocks: %s', self.mocks)
for mock_symbol, mock_result in self.mocks.items():
if _find_mocker(mock_symbol, self.context)():
_prepare_mock(self.context, mock_symbol, **mock_result)
super(self.__class__, self).setup()
namespace[class_name] = type(class_name, base_classes, {
'description': description,
'__init__': __init__,
'__module__': caller_module,
'setup': setup,
'uuid': my_uuid,
})
def _prepare_mock(context: 'torment.contexts.TestContext', symbol: str, return_value = None, side_effect = None) -> None:
'''Sets return value or side effect of symbol's mock in context.
.. seealso:: :py:func:`_find_mocker`
**Parameters**
:``context``: the search context
:``symbol``: the symbol to be located
:``return_value``: pass through to mock ``return_value``
:``side_effect``: pass through to mock ``side_effect``
'''
methods = symbol.split('.')
index = len(methods)
mock = None
while index > 0:
name = 'mocked_' + '_'.join(methods[:index]).lower()
logger.debug('name: %s', name)
if hasattr(context, name):
mock = getattr(context, name)
break
index -= 1
logger.debug('mock: %s', mock)
if mock is not None:
mock = functools.reduce(getattr, methods[index:], mock)
logger.debug('mock: %s', mock)
if return_value is not None:
mock.return_value = return_value
if side_effect is not None:
mock.side_effect = side_effect
mock.reset_mock()
def _find_mocker(symbol: str, context: 'torment.contexts.TestContext') -> Callable[[], bool]:
'''Find method within the context that mocks symbol.
Given a symbol (i.e. ``tornado.httpclient.AsyncHTTPClient.fetch``), find
the shortest ``mock_`` method that resembles the symbol. Resembles means
the lowercased and periods replaced with underscores.
If no match is found, a dummy function (only returns False) is returned.
**Parameters**
:``symbol``: the symbol to be located
:``context``: the search context
**Return Value(s)**
The method used to mock the symbol.
**Examples**
Assuming the symbol is ``tornado.httpclient.AsyncHTTPClient.fetch``, the
first of the following methods would be returned:
* ``mock_tornado``
* ``mock_tornado_httpclient``
* ``mock_tornado_httpclient_asynchttpclient``
* ``mock_tornado_httpclient_asynchttpclient_fetch``
'''
components = []
method = None
for component in symbol.split('.'):
components.append(component.lower())
name = '_'.join([ 'mock' ] + components)
if hasattr(context, name):
method = getattr(context, name)
break
if method is None:
logger.warn('no mocker for %s', symbol)
def noop(*args, **kwargs):
return False
method = noop
return method
def _resolve_functions(functions: Dict[str, Callable[[Any], Any]], fixture: Fixture) -> None:
'''Apply functions and collect values as properties on fixture.
Call functions and apply their values as properteis on fixture.
Functions will continue to get applied until no more functions resolve.
All unresolved functions are logged and the last exception to have
occurred is also logged. This function does not return but adds the
results to fixture directly.
**Parameters**
:``functions``: dict mapping function names (property names) to
callable functions
:``fixture``: Fixture to add values to
'''
exc_info = last_function = None
function_count = float('inf')
while function_count > len(functions):
function_count = len(functions)
for name, function in copy.copy(functions).items():
try:
setattr(fixture, name, copy.deepcopy(function(fixture)))
del functions[name]
except:
exc_info = sys.exc_info()
logger.debug('name: %s', name)
logger.debug('exc_info: %s', exc_info)
last_function = name
if len(functions):
logger.warning('unprocessed Fixture properties: %s', ','.join(functions.keys()))
logger.warning('last exception from %s.%s:', fixture.name, last_function, exc_info = exc_info)
setattr(fixture, '_last_resolver_exception', ( last_function, exc_info, ))
for name, function in copy.copy(functions).items():
setattr(fixture, name, function)
|
racker/torment
|
torment/fixtures/__init__.py
|
Fixture.category
|
python
|
def category(self) -> str:
'''Fixture's category (the containing testing module name)
**Examples**
:module: test_torment.test_unit.test_fixtures.fixture_a44bc6dda6654b1395a8c2cbd55d964d
:category: fixtures
'''
logger.debug('dir(self.__module__): %s', dir(self.__module__))
return self.__module__.__name__.rsplit('.', 2)[-2].replace('test_', '')
|
Fixture's category (the containing testing module name)
**Examples**
:module: test_torment.test_unit.test_fixtures.fixture_a44bc6dda6654b1395a8c2cbd55d964d
:category: fixtures
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/fixtures/__init__.py#L166-L178
| null |
class Fixture(object):
'''Collection of data and actions for a particular test case.
Intended as a base class for custom fixtures. Fixture provides an API
that simplifies writing scalable test cases.
Creating Fixture objects is broken into two parts. This keeps the logic for
a class of test cases separate from the data for particular cases while
allowing re-use of the data provided by a fixture.
The first part of Fixture object creation is crafting a proper subclass that
implements the necessary actions:
:``__init__``: pre-data population initialization
:``initialize``: post-data population initialization
:``setup``: pre-run setup
:``run``: REQUIRED—run code under test
:``check``: verify results of run
.. note::
``initialize`` is run during ``__init__`` and setup is run after;
otherwise, they serve the same function. The split allows different
actions to occur in different areas of the class heirarchy and generally
isn't necessary.
By default all actions are noops and simply do nothing but run is required.
These actions allow complex class hierarchies to provide nuanced testing
behavior. For example, Fixture provides the absolute bare minimum to test
any Fixture and no more. By adding a set of subclasses, common
initialization and checks can be performed at one layer while specific run
decisions and checks can happen at a lower layer.
The second part of Fixture object creation is crafting the data. Tying data
to a Fixture class should be done with ``torment.fixtures.register``. It
provides a declarative interface that binds a dictionary to a Fixture (keys
of dictionary become Fixture properties). ``torment.fixtures.register``
creates a subclass that the rest of the torment knows how to transform into
test cases that are compatible with nose.
**Examples**
Simplest Fixture subclass:
.. code-block:: python
class MyFixture(Fixture):
pass
Of course, to be useful the Fixture needs definitions of setup, run, and
check that actually test the code we're interested in checking:
.. code-block:: python
def add(x, y):
return x + y
class AddFixture(Fixture):
def run(self):
self.result = add(self.parameters['x'], self.parameters['y'])
def check(self):
self.context.assertEqual(self.result, self.expected)
This fixture uses a couple of conventions (not requirements):
#. ``self.parameters`` as a dictionary of parameter names to values
#. ``self.expected`` as the value we expect as a result
#. ``self.result`` as the holder inside the fixture between ``run`` and
``check``
This show-cases the ridiculity of using this testing framework for simple
functions that have few cases that require testing. This framework is
designed to allow many cases to be easily and declaritively defined.
The last component required to get these fixtures to actually run is hooking
them together with a context:
.. code-block:: python
from torment import contexts
class AddUnitTest(contexts.TestContext, metaclass = contexts.MetaContext):
fixture_classes = (
MyFixture,
AddFixture,
)
The context that wraps a Fixture subclass should eventually inherit from
TestContext (which inherits from ``unittest.TestCase`` and provides its assert
methods). In order for nose to find and execute this ``TestContext``, it
must have a name that contains Test.
**Properties**
* ``category``
* ``description`` (override)
* ``name`` (do **not** override)
**Methods To Override**
* ``__init__``
* ``check``
* ``initialize``
* ``run (required)``
* ``setup``
**Instance Variables**
:``context``: the ``torment.TestContext`` this case is running in which
provides the assertion methods of ``unittest.TestCase``.
'''
def __init__(self, context: 'torment.TestContext') -> None:
'''Create Fixture
Initializes the Fixture's context (can be changed like any other
property).
**Parameters**
:``context``: a subclass of ``torment.TestContext`` that provides
assertion methods and any other environmental information
for this test case
'''
self.context = context
@property
@property
def description(self) -> str:
'''Test name in nose output (intended to be overridden).'''
return '{0.uuid.hex}—{1}'.format(self, self.context.module)
@property
def name(self) -> str:
'''Method name in nose runtime.'''
return 'test_' + self.__class__.__name__
def initialize(self) -> None:
'''Post-data population initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called during ``__init__`` and after properties have been populated by
``torment.fixtures.register``.
'''
pass
def setup(self) -> None:
'''Pre-run initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after properties have been populated by
``torment.fixtures.register``.
'''
pass
def check(self) -> None:
'''Check that run ran as expected.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after ``run`` and should be used to verify that run performed the
expected actions.
'''
pass
def _execute(self) -> None:
'''Run Fixture actions (setup, run, check).
Core test loop for Fixture. Executes setup, run, and check in order.
'''
if hasattr(self, '_last_resolver_exception'):
logger.warning('last exception from %s.%s:', self.__class__.__name__, self._last_resolver_exception[0], exc_info = self._last_resolver_exception[1])
self.setup()
self.run()
self.check()
|
racker/torment
|
torment/fixtures/__init__.py
|
Fixture.description
|
python
|
def description(self) -> str:
'''Test name in nose output (intended to be overridden).'''
return '{0.uuid.hex}—{1}'.format(self, self.context.module)
|
Test name in nose output (intended to be overridden).
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/fixtures/__init__.py#L181-L184
| null |
class Fixture(object):
'''Collection of data and actions for a particular test case.
Intended as a base class for custom fixtures. Fixture provides an API
that simplifies writing scalable test cases.
Creating Fixture objects is broken into two parts. This keeps the logic for
a class of test cases separate from the data for particular cases while
allowing re-use of the data provided by a fixture.
The first part of Fixture object creation is crafting a proper subclass that
implements the necessary actions:
:``__init__``: pre-data population initialization
:``initialize``: post-data population initialization
:``setup``: pre-run setup
:``run``: REQUIRED—run code under test
:``check``: verify results of run
.. note::
``initialize`` is run during ``__init__`` and setup is run after;
otherwise, they serve the same function. The split allows different
actions to occur in different areas of the class heirarchy and generally
isn't necessary.
By default all actions are noops and simply do nothing but run is required.
These actions allow complex class hierarchies to provide nuanced testing
behavior. For example, Fixture provides the absolute bare minimum to test
any Fixture and no more. By adding a set of subclasses, common
initialization and checks can be performed at one layer while specific run
decisions and checks can happen at a lower layer.
The second part of Fixture object creation is crafting the data. Tying data
to a Fixture class should be done with ``torment.fixtures.register``. It
provides a declarative interface that binds a dictionary to a Fixture (keys
of dictionary become Fixture properties). ``torment.fixtures.register``
creates a subclass that the rest of the torment knows how to transform into
test cases that are compatible with nose.
**Examples**
Simplest Fixture subclass:
.. code-block:: python
class MyFixture(Fixture):
pass
Of course, to be useful the Fixture needs definitions of setup, run, and
check that actually test the code we're interested in checking:
.. code-block:: python
def add(x, y):
return x + y
class AddFixture(Fixture):
def run(self):
self.result = add(self.parameters['x'], self.parameters['y'])
def check(self):
self.context.assertEqual(self.result, self.expected)
This fixture uses a couple of conventions (not requirements):
#. ``self.parameters`` as a dictionary of parameter names to values
#. ``self.expected`` as the value we expect as a result
#. ``self.result`` as the holder inside the fixture between ``run`` and
``check``
This show-cases the ridiculity of using this testing framework for simple
functions that have few cases that require testing. This framework is
designed to allow many cases to be easily and declaritively defined.
The last component required to get these fixtures to actually run is hooking
them together with a context:
.. code-block:: python
from torment import contexts
class AddUnitTest(contexts.TestContext, metaclass = contexts.MetaContext):
fixture_classes = (
MyFixture,
AddFixture,
)
The context that wraps a Fixture subclass should eventually inherit from
TestContext (which inherits from ``unittest.TestCase`` and provides its assert
methods). In order for nose to find and execute this ``TestContext``, it
must have a name that contains Test.
**Properties**
* ``category``
* ``description`` (override)
* ``name`` (do **not** override)
**Methods To Override**
* ``__init__``
* ``check``
* ``initialize``
* ``run (required)``
* ``setup``
**Instance Variables**
:``context``: the ``torment.TestContext`` this case is running in which
provides the assertion methods of ``unittest.TestCase``.
'''
def __init__(self, context: 'torment.TestContext') -> None:
'''Create Fixture
Initializes the Fixture's context (can be changed like any other
property).
**Parameters**
:``context``: a subclass of ``torment.TestContext`` that provides
assertion methods and any other environmental information
for this test case
'''
self.context = context
@property
def category(self) -> str:
'''Fixture's category (the containing testing module name)
**Examples**
:module: test_torment.test_unit.test_fixtures.fixture_a44bc6dda6654b1395a8c2cbd55d964d
:category: fixtures
'''
logger.debug('dir(self.__module__): %s', dir(self.__module__))
return self.__module__.__name__.rsplit('.', 2)[-2].replace('test_', '')
@property
@property
def name(self) -> str:
'''Method name in nose runtime.'''
return 'test_' + self.__class__.__name__
def initialize(self) -> None:
'''Post-data population initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called during ``__init__`` and after properties have been populated by
``torment.fixtures.register``.
'''
pass
def setup(self) -> None:
'''Pre-run initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after properties have been populated by
``torment.fixtures.register``.
'''
pass
def check(self) -> None:
'''Check that run ran as expected.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after ``run`` and should be used to verify that run performed the
expected actions.
'''
pass
def _execute(self) -> None:
'''Run Fixture actions (setup, run, check).
Core test loop for Fixture. Executes setup, run, and check in order.
'''
if hasattr(self, '_last_resolver_exception'):
logger.warning('last exception from %s.%s:', self.__class__.__name__, self._last_resolver_exception[0], exc_info = self._last_resolver_exception[1])
self.setup()
self.run()
self.check()
|
racker/torment
|
torment/fixtures/__init__.py
|
Fixture._execute
|
python
|
def _execute(self) -> None:
'''Run Fixture actions (setup, run, check).
Core test loop for Fixture. Executes setup, run, and check in order.
'''
if hasattr(self, '_last_resolver_exception'):
logger.warning('last exception from %s.%s:', self.__class__.__name__, self._last_resolver_exception[0], exc_info = self._last_resolver_exception[1])
self.setup()
self.run()
self.check()
|
Run Fixture actions (setup, run, check).
Core test loop for Fixture. Executes setup, run, and check in order.
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/fixtures/__init__.py#L235-L247
| null |
class Fixture(object):
'''Collection of data and actions for a particular test case.
Intended as a base class for custom fixtures. Fixture provides an API
that simplifies writing scalable test cases.
Creating Fixture objects is broken into two parts. This keeps the logic for
a class of test cases separate from the data for particular cases while
allowing re-use of the data provided by a fixture.
The first part of Fixture object creation is crafting a proper subclass that
implements the necessary actions:
:``__init__``: pre-data population initialization
:``initialize``: post-data population initialization
:``setup``: pre-run setup
:``run``: REQUIRED—run code under test
:``check``: verify results of run
.. note::
``initialize`` is run during ``__init__`` and setup is run after;
otherwise, they serve the same function. The split allows different
actions to occur in different areas of the class heirarchy and generally
isn't necessary.
By default all actions are noops and simply do nothing but run is required.
These actions allow complex class hierarchies to provide nuanced testing
behavior. For example, Fixture provides the absolute bare minimum to test
any Fixture and no more. By adding a set of subclasses, common
initialization and checks can be performed at one layer while specific run
decisions and checks can happen at a lower layer.
The second part of Fixture object creation is crafting the data. Tying data
to a Fixture class should be done with ``torment.fixtures.register``. It
provides a declarative interface that binds a dictionary to a Fixture (keys
of dictionary become Fixture properties). ``torment.fixtures.register``
creates a subclass that the rest of the torment knows how to transform into
test cases that are compatible with nose.
**Examples**
Simplest Fixture subclass:
.. code-block:: python
class MyFixture(Fixture):
pass
Of course, to be useful the Fixture needs definitions of setup, run, and
check that actually test the code we're interested in checking:
.. code-block:: python
def add(x, y):
return x + y
class AddFixture(Fixture):
def run(self):
self.result = add(self.parameters['x'], self.parameters['y'])
def check(self):
self.context.assertEqual(self.result, self.expected)
This fixture uses a couple of conventions (not requirements):
#. ``self.parameters`` as a dictionary of parameter names to values
#. ``self.expected`` as the value we expect as a result
#. ``self.result`` as the holder inside the fixture between ``run`` and
``check``
This show-cases the ridiculity of using this testing framework for simple
functions that have few cases that require testing. This framework is
designed to allow many cases to be easily and declaritively defined.
The last component required to get these fixtures to actually run is hooking
them together with a context:
.. code-block:: python
from torment import contexts
class AddUnitTest(contexts.TestContext, metaclass = contexts.MetaContext):
fixture_classes = (
MyFixture,
AddFixture,
)
The context that wraps a Fixture subclass should eventually inherit from
TestContext (which inherits from ``unittest.TestCase`` and provides its assert
methods). In order for nose to find and execute this ``TestContext``, it
must have a name that contains Test.
**Properties**
* ``category``
* ``description`` (override)
* ``name`` (do **not** override)
**Methods To Override**
* ``__init__``
* ``check``
* ``initialize``
* ``run (required)``
* ``setup``
**Instance Variables**
:``context``: the ``torment.TestContext`` this case is running in which
provides the assertion methods of ``unittest.TestCase``.
'''
def __init__(self, context: 'torment.TestContext') -> None:
'''Create Fixture
Initializes the Fixture's context (can be changed like any other
property).
**Parameters**
:``context``: a subclass of ``torment.TestContext`` that provides
assertion methods and any other environmental information
for this test case
'''
self.context = context
@property
def category(self) -> str:
'''Fixture's category (the containing testing module name)
**Examples**
:module: test_torment.test_unit.test_fixtures.fixture_a44bc6dda6654b1395a8c2cbd55d964d
:category: fixtures
'''
logger.debug('dir(self.__module__): %s', dir(self.__module__))
return self.__module__.__name__.rsplit('.', 2)[-2].replace('test_', '')
@property
def description(self) -> str:
'''Test name in nose output (intended to be overridden).'''
return '{0.uuid.hex}—{1}'.format(self, self.context.module)
@property
def name(self) -> str:
'''Method name in nose runtime.'''
return 'test_' + self.__class__.__name__
def initialize(self) -> None:
'''Post-data population initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called during ``__init__`` and after properties have been populated by
``torment.fixtures.register``.
'''
pass
def setup(self) -> None:
'''Pre-run initialization hook.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after properties have been populated by
``torment.fixtures.register``.
'''
pass
def check(self) -> None:
'''Check that run ran as expected.
.. note::
Override as necessary. Default provided so re-defenition is not
necessary.
Called after ``run`` and should be used to verify that run performed the
expected actions.
'''
pass
|
racker/torment
|
torment/fixtures/__init__.py
|
ErrorFixture.run
|
python
|
def run(self) -> None:
'''Calls sibling with exception expectation.'''
with self.context.assertRaises(self.error.__class__) as error:
super().run()
self.exception = error.exception
|
Calls sibling with exception expectation.
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/fixtures/__init__.py#L283-L289
| null |
class ErrorFixture(Fixture):
'''Common error checking for Fixture.
Intended as a mixin when registering a new Fixture (via register) that will
check an error case (one throwing an exception).
**Examples**
Using the AddFixture from the Examples in Fixture, we can create a Fixture
that handles (an obviously contrived) exception by either crafting a new
Fixture object or invoking register with the appropriate base classes.
New Fixture Object:
.. code-block:: python
class ErrorAddFixture(ErrorFixture, AddFixture):
pass
Via call to register:
.. code-block:: python
register(globals(), ( ErrorFixture, AddFixture, ), { … })
'''
@property
def description(self) -> str:
'''Test name in nose output (adds error reason as result portion).'''
return super().description + ' → {0.error}'.format(self)
|
racker/torment
|
torment/helpers.py
|
evert
|
python
|
def evert(iterable: Iterable[Dict[str, Tuple]]) -> Iterable[Iterable[Dict[str, Any]]]:
'''Evert dictionaries with tuples.
Iterates over the list of dictionaries and everts them with their tuple
values. For example:
``[ { 'a': ( 1, 2, ), }, ]``
becomes
``[ ( { 'a': 1, }, ), ( { 'a', 2, }, ) ]``
The resulting iterable contains the same number of tuples as the
initial iterable had tuple elements. The number of dictionaries is the same
as the cartesian product of the initial iterable's tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries.
'''
keys = list(itertools.chain.from_iterable([ _.keys() for _ in iterable ]))
for values in itertools.product(*[ list(*_.values()) for _ in iterable ]):
yield [ dict(( pair, )) for pair in zip(keys, values) ]
|
Evert dictionaries with tuples.
Iterates over the list of dictionaries and everts them with their tuple
values. For example:
``[ { 'a': ( 1, 2, ), }, ]``
becomes
``[ ( { 'a': 1, }, ), ( { 'a', 2, }, ) ]``
The resulting iterable contains the same number of tuples as the
initial iterable had tuple elements. The number of dictionaries is the same
as the cartesian product of the initial iterable's tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries.
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/helpers.py#L32-L63
| null |
# Copyright 2015 Alex Brandt <alex.brandt@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import itertools
import logging
import os
import typing # noqa (use mypy typing)
from typing import Any
from typing import Dict
from typing import Iterable
from typing import Tuple
from torment import decorators
logger = logging.getLogger(__name__)
def extend(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Extend base by updating with the extension.
**Arguments**
:``base``: dictionary to have keys updated or added
:``extension``: dictionary to update base with
**Return Value(s)**
Resulting dictionary from updating base with extension.
'''
_ = copy.deepcopy(base)
_.update(extension)
return _
def merge(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Merge extension into base recursively.
**Argumetnts**
:``base``: dictionary to overlay values onto
:``extension``: dictionary to overlay with
**Return Value(s)**
Resulting dictionary from overlaying extension on base.
'''
_ = copy.deepcopy(base)
for key, value in extension.items():
if isinstance(value, Dict) and key in _:
_[key] = merge(_[key], value)
else:
_[key] = value
return _
@decorators.log
def import_directory(module_basename: str, directory: str, sort_key = None) -> None:
'''Load all python modules in directory and directory's children.
Parameters
----------
:``module_basename``: module name prefix for loaded modules
:``directory``: directory to load python modules from
:``sort_key``: function to sort module names with before loading
'''
logger.info('loading submodules of %s', module_basename)
logger.info('loading modules from %s', directory)
filenames = itertools.chain(*[ [ os.path.join(_[0], filename) for filename in _[2] ] for _ in os.walk(directory) if len(_[2]) ])
modulenames = _filenames_to_modulenames(filenames, module_basename, directory)
for modulename in sorted(modulenames, key = sort_key):
try:
importlib.import_module(modulename)
except ImportError:
logger.warning('failed loading %s', modulename)
logger.exception('module loading failure')
else:
logger.info('successfully loaded %s', modulename)
def powerset(iterable: Iterable[Any]) -> Iterable[Iterable[Any]]:
'''Powerset of iterable.
**Parameters**
:``iterable``: set to generate powerset
**Return Value(s)**
Generator that produces all subsets of iterable.
'''
s = list(iterable)
return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s) + 1))
@decorators.log
def _filenames_to_modulenames(filenames: Iterable[str], modulename_prefix: str, filename_prefix: str = '') -> Iterable[str]:
'''Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names).
'''
modulenames = [] # type: Iterable[str]
for filename in filenames:
if not filename.endswith('.py'):
continue
name = filename
name = name.replace(filename_prefix, '')
name = name.replace('__init__.py', '')
name = name.replace('.py', '')
name = name.replace('/', '.')
name = name.strip('.')
if not len(name):
continue
if not modulename_prefix.endswith('.'):
modulename_prefix += '.'
name = modulename_prefix + name
known_symbols = set()
name = '.'.join([ _ for _ in name.split('.') if _ not in known_symbols and not known_symbols.add(_) ])
if len(name):
modulenames.append(name)
return modulenames
|
racker/torment
|
torment/helpers.py
|
extend
|
python
|
def extend(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Extend base by updating with the extension.
**Arguments**
:``base``: dictionary to have keys updated or added
:``extension``: dictionary to update base with
**Return Value(s)**
Resulting dictionary from updating base with extension.
'''
_ = copy.deepcopy(base)
_.update(extension)
return _
|
Extend base by updating with the extension.
**Arguments**
:``base``: dictionary to have keys updated or added
:``extension``: dictionary to update base with
**Return Value(s)**
Resulting dictionary from updating base with extension.
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/helpers.py#L66-L83
| null |
# Copyright 2015 Alex Brandt <alex.brandt@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import itertools
import logging
import os
import typing # noqa (use mypy typing)
from typing import Any
from typing import Dict
from typing import Iterable
from typing import Tuple
from torment import decorators
logger = logging.getLogger(__name__)
def evert(iterable: Iterable[Dict[str, Tuple]]) -> Iterable[Iterable[Dict[str, Any]]]:
'''Evert dictionaries with tuples.
Iterates over the list of dictionaries and everts them with their tuple
values. For example:
``[ { 'a': ( 1, 2, ), }, ]``
becomes
``[ ( { 'a': 1, }, ), ( { 'a', 2, }, ) ]``
The resulting iterable contains the same number of tuples as the
initial iterable had tuple elements. The number of dictionaries is the same
as the cartesian product of the initial iterable's tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries.
'''
keys = list(itertools.chain.from_iterable([ _.keys() for _ in iterable ]))
for values in itertools.product(*[ list(*_.values()) for _ in iterable ]):
yield [ dict(( pair, )) for pair in zip(keys, values) ]
def merge(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Merge extension into base recursively.
**Argumetnts**
:``base``: dictionary to overlay values onto
:``extension``: dictionary to overlay with
**Return Value(s)**
Resulting dictionary from overlaying extension on base.
'''
_ = copy.deepcopy(base)
for key, value in extension.items():
if isinstance(value, Dict) and key in _:
_[key] = merge(_[key], value)
else:
_[key] = value
return _
@decorators.log
def import_directory(module_basename: str, directory: str, sort_key = None) -> None:
'''Load all python modules in directory and directory's children.
Parameters
----------
:``module_basename``: module name prefix for loaded modules
:``directory``: directory to load python modules from
:``sort_key``: function to sort module names with before loading
'''
logger.info('loading submodules of %s', module_basename)
logger.info('loading modules from %s', directory)
filenames = itertools.chain(*[ [ os.path.join(_[0], filename) for filename in _[2] ] for _ in os.walk(directory) if len(_[2]) ])
modulenames = _filenames_to_modulenames(filenames, module_basename, directory)
for modulename in sorted(modulenames, key = sort_key):
try:
importlib.import_module(modulename)
except ImportError:
logger.warning('failed loading %s', modulename)
logger.exception('module loading failure')
else:
logger.info('successfully loaded %s', modulename)
def powerset(iterable: Iterable[Any]) -> Iterable[Iterable[Any]]:
'''Powerset of iterable.
**Parameters**
:``iterable``: set to generate powerset
**Return Value(s)**
Generator that produces all subsets of iterable.
'''
s = list(iterable)
return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s) + 1))
@decorators.log
def _filenames_to_modulenames(filenames: Iterable[str], modulename_prefix: str, filename_prefix: str = '') -> Iterable[str]:
'''Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names).
'''
modulenames = [] # type: Iterable[str]
for filename in filenames:
if not filename.endswith('.py'):
continue
name = filename
name = name.replace(filename_prefix, '')
name = name.replace('__init__.py', '')
name = name.replace('.py', '')
name = name.replace('/', '.')
name = name.strip('.')
if not len(name):
continue
if not modulename_prefix.endswith('.'):
modulename_prefix += '.'
name = modulename_prefix + name
known_symbols = set()
name = '.'.join([ _ for _ in name.split('.') if _ not in known_symbols and not known_symbols.add(_) ])
if len(name):
modulenames.append(name)
return modulenames
|
racker/torment
|
torment/helpers.py
|
merge
|
python
|
def merge(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Merge extension into base recursively.
**Argumetnts**
:``base``: dictionary to overlay values onto
:``extension``: dictionary to overlay with
**Return Value(s)**
Resulting dictionary from overlaying extension on base.
'''
_ = copy.deepcopy(base)
for key, value in extension.items():
if isinstance(value, Dict) and key in _:
_[key] = merge(_[key], value)
else:
_[key] = value
return _
|
Merge extension into base recursively.
**Argumetnts**
:``base``: dictionary to overlay values onto
:``extension``: dictionary to overlay with
**Return Value(s)**
Resulting dictionary from overlaying extension on base.
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/helpers.py#L86-L108
|
[
"def merge(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:\n '''Merge extension into base recursively.\n\n **Argumetnts**\n\n :``base``: dictionary to overlay values onto\n :``extension``: dictionary to overlay with\n\n **Return Value(s)**\n\n Resulting dictionary from overlaying extension on base.\n\n '''\n\n _ = copy.deepcopy(base)\n\n for key, value in extension.items():\n if isinstance(value, Dict) and key in _:\n _[key] = merge(_[key], value)\n else:\n _[key] = value\n\n return _\n"
] |
# Copyright 2015 Alex Brandt <alex.brandt@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import itertools
import logging
import os
import typing # noqa (use mypy typing)
from typing import Any
from typing import Dict
from typing import Iterable
from typing import Tuple
from torment import decorators
logger = logging.getLogger(__name__)
def evert(iterable: Iterable[Dict[str, Tuple]]) -> Iterable[Iterable[Dict[str, Any]]]:
'''Evert dictionaries with tuples.
Iterates over the list of dictionaries and everts them with their tuple
values. For example:
``[ { 'a': ( 1, 2, ), }, ]``
becomes
``[ ( { 'a': 1, }, ), ( { 'a', 2, }, ) ]``
The resulting iterable contains the same number of tuples as the
initial iterable had tuple elements. The number of dictionaries is the same
as the cartesian product of the initial iterable's tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries.
'''
keys = list(itertools.chain.from_iterable([ _.keys() for _ in iterable ]))
for values in itertools.product(*[ list(*_.values()) for _ in iterable ]):
yield [ dict(( pair, )) for pair in zip(keys, values) ]
def extend(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Extend base by updating with the extension.
**Arguments**
:``base``: dictionary to have keys updated or added
:``extension``: dictionary to update base with
**Return Value(s)**
Resulting dictionary from updating base with extension.
'''
_ = copy.deepcopy(base)
_.update(extension)
return _
@decorators.log
def import_directory(module_basename: str, directory: str, sort_key = None) -> None:
'''Load all python modules in directory and directory's children.
Parameters
----------
:``module_basename``: module name prefix for loaded modules
:``directory``: directory to load python modules from
:``sort_key``: function to sort module names with before loading
'''
logger.info('loading submodules of %s', module_basename)
logger.info('loading modules from %s', directory)
filenames = itertools.chain(*[ [ os.path.join(_[0], filename) for filename in _[2] ] for _ in os.walk(directory) if len(_[2]) ])
modulenames = _filenames_to_modulenames(filenames, module_basename, directory)
for modulename in sorted(modulenames, key = sort_key):
try:
importlib.import_module(modulename)
except ImportError:
logger.warning('failed loading %s', modulename)
logger.exception('module loading failure')
else:
logger.info('successfully loaded %s', modulename)
def powerset(iterable: Iterable[Any]) -> Iterable[Iterable[Any]]:
'''Powerset of iterable.
**Parameters**
:``iterable``: set to generate powerset
**Return Value(s)**
Generator that produces all subsets of iterable.
'''
s = list(iterable)
return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s) + 1))
@decorators.log
def _filenames_to_modulenames(filenames: Iterable[str], modulename_prefix: str, filename_prefix: str = '') -> Iterable[str]:
'''Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names).
'''
modulenames = [] # type: Iterable[str]
for filename in filenames:
if not filename.endswith('.py'):
continue
name = filename
name = name.replace(filename_prefix, '')
name = name.replace('__init__.py', '')
name = name.replace('.py', '')
name = name.replace('/', '.')
name = name.strip('.')
if not len(name):
continue
if not modulename_prefix.endswith('.'):
modulename_prefix += '.'
name = modulename_prefix + name
known_symbols = set()
name = '.'.join([ _ for _ in name.split('.') if _ not in known_symbols and not known_symbols.add(_) ])
if len(name):
modulenames.append(name)
return modulenames
|
racker/torment
|
torment/helpers.py
|
import_directory
|
python
|
def import_directory(module_basename: str, directory: str, sort_key = None) -> None:
'''Load all python modules in directory and directory's children.
Parameters
----------
:``module_basename``: module name prefix for loaded modules
:``directory``: directory to load python modules from
:``sort_key``: function to sort module names with before loading
'''
logger.info('loading submodules of %s', module_basename)
logger.info('loading modules from %s', directory)
filenames = itertools.chain(*[ [ os.path.join(_[0], filename) for filename in _[2] ] for _ in os.walk(directory) if len(_[2]) ])
modulenames = _filenames_to_modulenames(filenames, module_basename, directory)
for modulename in sorted(modulenames, key = sort_key):
try:
importlib.import_module(modulename)
except ImportError:
logger.warning('failed loading %s', modulename)
logger.exception('module loading failure')
else:
logger.info('successfully loaded %s', modulename)
|
Load all python modules in directory and directory's children.
Parameters
----------
:``module_basename``: module name prefix for loaded modules
:``directory``: directory to load python modules from
:``sort_key``: function to sort module names with before loading
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/helpers.py#L112-L137
|
[
"def _(function):\n @functools.wraps(function, assigned = functools.WRAPPER_ASSIGNMENTS + ( '__file__', ))\n def wrapper(*args, **kwargs):\n name, my_args = function.__name__, args\n\n if inspect.ismethod(function):\n name = function.__self__.__class__.__name__ + '.' + function.__name__\n elif len(args):\n members = dict(inspect.getmembers(args[0], predicate = lambda _: inspect.ismethod(_) and _.__name__ == function.__name__))\n logger.debug('members.keys(): %s', members.keys())\n\n if len(members):\n name, my_args = args[0].__class__.__name__ + '.' + function.__name__, args[1:]\n\n format_args = (\n prefix + name,\n ', '.join(list(map(str, my_args)) + [ ' = '.join(map(str, item)) for item in kwargs.items() ]),\n )\n\n logger.info('STARTING: %s(%s)', *format_args)\n\n try:\n return function(*args, **kwargs)\n except:\n logger.exception('EXCEPTION: %s(%s)', *format_args)\n raise\n finally:\n logger.info('STOPPING: %s(%s)', *format_args)\n\n return wrapper\n"
] |
# Copyright 2015 Alex Brandt <alex.brandt@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import itertools
import logging
import os
import typing # noqa (use mypy typing)
from typing import Any
from typing import Dict
from typing import Iterable
from typing import Tuple
from torment import decorators
logger = logging.getLogger(__name__)
def evert(iterable: Iterable[Dict[str, Tuple]]) -> Iterable[Iterable[Dict[str, Any]]]:
'''Evert dictionaries with tuples.
Iterates over the list of dictionaries and everts them with their tuple
values. For example:
``[ { 'a': ( 1, 2, ), }, ]``
becomes
``[ ( { 'a': 1, }, ), ( { 'a', 2, }, ) ]``
The resulting iterable contains the same number of tuples as the
initial iterable had tuple elements. The number of dictionaries is the same
as the cartesian product of the initial iterable's tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries.
'''
keys = list(itertools.chain.from_iterable([ _.keys() for _ in iterable ]))
for values in itertools.product(*[ list(*_.values()) for _ in iterable ]):
yield [ dict(( pair, )) for pair in zip(keys, values) ]
def extend(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Extend base by updating with the extension.
**Arguments**
:``base``: dictionary to have keys updated or added
:``extension``: dictionary to update base with
**Return Value(s)**
Resulting dictionary from updating base with extension.
'''
_ = copy.deepcopy(base)
_.update(extension)
return _
def merge(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Merge extension into base recursively.
**Argumetnts**
:``base``: dictionary to overlay values onto
:``extension``: dictionary to overlay with
**Return Value(s)**
Resulting dictionary from overlaying extension on base.
'''
_ = copy.deepcopy(base)
for key, value in extension.items():
if isinstance(value, Dict) and key in _:
_[key] = merge(_[key], value)
else:
_[key] = value
return _
@decorators.log
def powerset(iterable: Iterable[Any]) -> Iterable[Iterable[Any]]:
'''Powerset of iterable.
**Parameters**
:``iterable``: set to generate powerset
**Return Value(s)**
Generator that produces all subsets of iterable.
'''
s = list(iterable)
return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s) + 1))
@decorators.log
def _filenames_to_modulenames(filenames: Iterable[str], modulename_prefix: str, filename_prefix: str = '') -> Iterable[str]:
'''Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names).
'''
modulenames = [] # type: Iterable[str]
for filename in filenames:
if not filename.endswith('.py'):
continue
name = filename
name = name.replace(filename_prefix, '')
name = name.replace('__init__.py', '')
name = name.replace('.py', '')
name = name.replace('/', '.')
name = name.strip('.')
if not len(name):
continue
if not modulename_prefix.endswith('.'):
modulename_prefix += '.'
name = modulename_prefix + name
known_symbols = set()
name = '.'.join([ _ for _ in name.split('.') if _ not in known_symbols and not known_symbols.add(_) ])
if len(name):
modulenames.append(name)
return modulenames
|
racker/torment
|
torment/helpers.py
|
_filenames_to_modulenames
|
python
|
def _filenames_to_modulenames(filenames: Iterable[str], modulename_prefix: str, filename_prefix: str = '') -> Iterable[str]:
'''Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names).
'''
modulenames = [] # type: Iterable[str]
for filename in filenames:
if not filename.endswith('.py'):
continue
name = filename
name = name.replace(filename_prefix, '')
name = name.replace('__init__.py', '')
name = name.replace('.py', '')
name = name.replace('/', '.')
name = name.strip('.')
if not len(name):
continue
if not modulename_prefix.endswith('.'):
modulename_prefix += '.'
name = modulename_prefix + name
known_symbols = set()
name = '.'.join([ _ for _ in name.split('.') if _ not in known_symbols and not known_symbols.add(_) ])
if len(name):
modulenames.append(name)
return modulenames
|
Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names).
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/helpers.py#L158-L208
| null |
# Copyright 2015 Alex Brandt <alex.brandt@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import itertools
import logging
import os
import typing # noqa (use mypy typing)
from typing import Any
from typing import Dict
from typing import Iterable
from typing import Tuple
from torment import decorators
logger = logging.getLogger(__name__)
def evert(iterable: Iterable[Dict[str, Tuple]]) -> Iterable[Iterable[Dict[str, Any]]]:
'''Evert dictionaries with tuples.
Iterates over the list of dictionaries and everts them with their tuple
values. For example:
``[ { 'a': ( 1, 2, ), }, ]``
becomes
``[ ( { 'a': 1, }, ), ( { 'a', 2, }, ) ]``
The resulting iterable contains the same number of tuples as the
initial iterable had tuple elements. The number of dictionaries is the same
as the cartesian product of the initial iterable's tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries.
'''
keys = list(itertools.chain.from_iterable([ _.keys() for _ in iterable ]))
for values in itertools.product(*[ list(*_.values()) for _ in iterable ]):
yield [ dict(( pair, )) for pair in zip(keys, values) ]
def extend(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Extend base by updating with the extension.
**Arguments**
:``base``: dictionary to have keys updated or added
:``extension``: dictionary to update base with
**Return Value(s)**
Resulting dictionary from updating base with extension.
'''
_ = copy.deepcopy(base)
_.update(extension)
return _
def merge(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Merge extension into base recursively.
**Argumetnts**
:``base``: dictionary to overlay values onto
:``extension``: dictionary to overlay with
**Return Value(s)**
Resulting dictionary from overlaying extension on base.
'''
_ = copy.deepcopy(base)
for key, value in extension.items():
if isinstance(value, Dict) and key in _:
_[key] = merge(_[key], value)
else:
_[key] = value
return _
@decorators.log
def import_directory(module_basename: str, directory: str, sort_key = None) -> None:
'''Load all python modules in directory and directory's children.
Parameters
----------
:``module_basename``: module name prefix for loaded modules
:``directory``: directory to load python modules from
:``sort_key``: function to sort module names with before loading
'''
logger.info('loading submodules of %s', module_basename)
logger.info('loading modules from %s', directory)
filenames = itertools.chain(*[ [ os.path.join(_[0], filename) for filename in _[2] ] for _ in os.walk(directory) if len(_[2]) ])
modulenames = _filenames_to_modulenames(filenames, module_basename, directory)
for modulename in sorted(modulenames, key = sort_key):
try:
importlib.import_module(modulename)
except ImportError:
logger.warning('failed loading %s', modulename)
logger.exception('module loading failure')
else:
logger.info('successfully loaded %s', modulename)
def powerset(iterable: Iterable[Any]) -> Iterable[Iterable[Any]]:
'''Powerset of iterable.
**Parameters**
:``iterable``: set to generate powerset
**Return Value(s)**
Generator that produces all subsets of iterable.
'''
s = list(iterable)
return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s) + 1))
@decorators.log
|
racker/torment
|
torment/decorators.py
|
log
|
python
|
def log(prefix = ''):
'''Add start and stop logging messages to the function.
Parameters
----------
:``prefix``: a prefix for the function name (optional)
'''
function = None
if inspect.isfunction(prefix):
prefix, function = '', prefix
def _(function):
@functools.wraps(function, assigned = functools.WRAPPER_ASSIGNMENTS + ( '__file__', ))
def wrapper(*args, **kwargs):
name, my_args = function.__name__, args
if inspect.ismethod(function):
name = function.__self__.__class__.__name__ + '.' + function.__name__
elif len(args):
members = dict(inspect.getmembers(args[0], predicate = lambda _: inspect.ismethod(_) and _.__name__ == function.__name__))
logger.debug('members.keys(): %s', members.keys())
if len(members):
name, my_args = args[0].__class__.__name__ + '.' + function.__name__, args[1:]
format_args = (
prefix + name,
', '.join(list(map(str, my_args)) + [ ' = '.join(map(str, item)) for item in kwargs.items() ]),
)
logger.info('STARTING: %s(%s)', *format_args)
try:
return function(*args, **kwargs)
except:
logger.exception('EXCEPTION: %s(%s)', *format_args)
raise
finally:
logger.info('STOPPING: %s(%s)', *format_args)
return wrapper
if function is not None:
_ = _(function)
return _
|
Add start and stop logging messages to the function.
Parameters
----------
:``prefix``: a prefix for the function name (optional)
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/decorators.py#L28-L77
| null |
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
import logging
import typing # noqa (use mypy typing)
from typing import Any
from typing import Callable
logger = logging.getLogger(__name__)
logger.propogate = False
logger.addHandler(logging.NullHandler())
def mock(name: str) -> Callable[[Any], None]:
'''Setup properties indicating status of name mock.
This is designed to decorate ``torment.TestContext`` methods and is used to
provide a consistent interface for determining if name is mocked once and
only once.
Parameters
----------
:``name``: symbol in context's module to mock
Return Value(s)
---------------
True if name is mocked; otherwise, False. Also, creates a property on the
method's self, is_mocked_name, with this value.
'''
def _(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
logger.info('STARTING: mock ' + name)
is_mocked = False
sanitized_name = name.replace('.', '_').strip('_')
if name in self.mocks_mask:
logger.info('STOPPING: mock ' + name + '—MASKED')
elif getattr(self, '_is_mocked_' + sanitized_name, False):
is_mocked = True
logger.info('STOPPING: mock ' + name + '—EXISTS')
else:
func(self, *args, **kwargs)
is_mocked = True
logger.info('STOPPING: mock ' + name)
setattr(self, '_is_mocked_' + sanitized_name, is_mocked)
return is_mocked
return wrapper
return _
|
racker/torment
|
torment/decorators.py
|
mock
|
python
|
def mock(name: str) -> Callable[[Any], None]:
'''Setup properties indicating status of name mock.
This is designed to decorate ``torment.TestContext`` methods and is used to
provide a consistent interface for determining if name is mocked once and
only once.
Parameters
----------
:``name``: symbol in context's module to mock
Return Value(s)
---------------
True if name is mocked; otherwise, False. Also, creates a property on the
method's self, is_mocked_name, with this value.
'''
def _(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
logger.info('STARTING: mock ' + name)
is_mocked = False
sanitized_name = name.replace('.', '_').strip('_')
if name in self.mocks_mask:
logger.info('STOPPING: mock ' + name + '—MASKED')
elif getattr(self, '_is_mocked_' + sanitized_name, False):
is_mocked = True
logger.info('STOPPING: mock ' + name + '—EXISTS')
else:
func(self, *args, **kwargs)
is_mocked = True
logger.info('STOPPING: mock ' + name)
setattr(self, '_is_mocked_' + sanitized_name, is_mocked)
return is_mocked
return wrapper
return _
|
Setup properties indicating status of name mock.
This is designed to decorate ``torment.TestContext`` methods and is used to
provide a consistent interface for determining if name is mocked once and
only once.
Parameters
----------
:``name``: symbol in context's module to mock
Return Value(s)
---------------
True if name is mocked; otherwise, False. Also, creates a property on the
method's self, is_mocked_name, with this value.
|
train
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/decorators.py#L80-L128
| null |
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
import logging
import typing # noqa (use mypy typing)
from typing import Any
from typing import Callable
logger = logging.getLogger(__name__)
logger.propogate = False
logger.addHandler(logging.NullHandler())
def log(prefix = ''):
'''Add start and stop logging messages to the function.
Parameters
----------
:``prefix``: a prefix for the function name (optional)
'''
function = None
if inspect.isfunction(prefix):
prefix, function = '', prefix
def _(function):
@functools.wraps(function, assigned = functools.WRAPPER_ASSIGNMENTS + ( '__file__', ))
def wrapper(*args, **kwargs):
name, my_args = function.__name__, args
if inspect.ismethod(function):
name = function.__self__.__class__.__name__ + '.' + function.__name__
elif len(args):
members = dict(inspect.getmembers(args[0], predicate = lambda _: inspect.ismethod(_) and _.__name__ == function.__name__))
logger.debug('members.keys(): %s', members.keys())
if len(members):
name, my_args = args[0].__class__.__name__ + '.' + function.__name__, args[1:]
format_args = (
prefix + name,
', '.join(list(map(str, my_args)) + [ ' = '.join(map(str, item)) for item in kwargs.items() ]),
)
logger.info('STARTING: %s(%s)', *format_args)
try:
return function(*args, **kwargs)
except:
logger.exception('EXCEPTION: %s(%s)', *format_args)
raise
finally:
logger.info('STOPPING: %s(%s)', *format_args)
return wrapper
if function is not None:
_ = _(function)
return _
|
carlosp420/dataset-creator
|
dataset_creator/base_dataset.py
|
DatasetBlock.split_data
|
python
|
def split_data(self):
this_gene_code = None
for seq_record in self.data.seq_records:
if this_gene_code is None or this_gene_code != seq_record.gene_code:
this_gene_code = seq_record.gene_code
self._blocks.append([])
list_length = len(self._blocks)
self._blocks[list_length - 1].append(seq_record)
|
Splits the list of SeqRecordExpanded objects into lists, which are
kept into a bigger list.
If the file_format is Nexus, then it is only partitioned by gene. If it
is FASTA, then it needs partitioning by codon positions if required.
Example:
>>> blocks = [
... [SeqRecord1, SeqRecord2], # for gene 1
... [SeqRecord1, SeqRecord2], # for gene 2
... [SeqRecord1, SeqRecord2], # for gene 3
... [SeqRecord1, SeqRecord2], # for gene 4
... ]
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L61-L84
| null |
class DatasetBlock(object):
"""
By default, the data sequences block generated is NEXUS and we use BioPython
tools to convert it to other formats such as FASTA.
However, sometimes the blo
Parameters:
data (named tuple): containing:
* gene_codes: list
* number_chars: string
* number_taxa: string
* seq_records: list of SeqRecordExpanded objects
* gene_codes_and_lengths: OrderedDict
codon_positions (str): str. Can be 1st, 2nd, 3rd, 1st-2nd, ALL (default).
partitioning (str):
aminoacids (boolean):
degenerate (str):
format (str): NEXUS, PHYLIP or FASTA.
outgroup (str): Specimen code of taxon that should be used as outgroup.
"""
def __init__(self, data, codon_positions, partitioning, aminoacids=None,
degenerate=None, format=None, outgroup=None):
self.warnings = []
self.data = data
self.codon_positions = codon_positions
self.partitioning = partitioning
self.aminoacids = aminoacids
self.degenerate = degenerate
self.format = format
self.outgroup = outgroup
self._blocks = []
def dataset_block(self):
"""Creates the block with taxon names and their sequences.
Override this function if the dataset block needs to be different
due to file format.
Example:
CP100_10_Aus_aus ACGATRGACGATRA...
CP100_11_Aus_bus ACGATRGACGATRA...
...
"""
self.split_data()
out = []
for block in self._blocks:
out.append(self.convert_to_string(block))
return '\n'.join(out).strip() + '\n;\nEND;'
def convert_to_string(self, block):
"""Makes gene_block as str from list of SeqRecordExpanded objects of a gene_code.
Override this function if the dataset block needs to be different
due to file format.
This block will need to be split further if the dataset is FASTA or
TNT and the partitioning scheme is 1st-2nd, 3rd.
As the dataset is split into several blocks due to 1st-2nd, 3rd
we cannot translate to aminoacids or degenerate the sequences.
"""
if self.partitioning != '1st-2nd, 3rd':
return self.make_datablock_by_gene(block)
else:
if self.format == 'FASTA':
return self.make_datablock_considering_codon_positions_as_fasta_format(block)
else:
return self.make_datablock_by_gene(block)
def make_datablock_considering_codon_positions_as_fasta_format(self, block):
block_1st2nd = OrderedDict()
block_1st = OrderedDict()
block_2nd = OrderedDict()
block_3rd = OrderedDict()
for seq_record in block: # splitting each block in two
if seq_record.gene_code not in block_1st2nd:
block_1st2nd[seq_record.gene_code] = []
if seq_record.gene_code not in block_1st:
block_1st[seq_record.gene_code] = []
if seq_record.gene_code not in block_2nd:
block_2nd[seq_record.gene_code] = []
if seq_record.gene_code not in block_3rd:
block_3rd[seq_record.gene_code] = []
taxonomy_as_string = self.flatten_taxonomy(seq_record)
taxon_id = '>{0}{1}'.format(seq_record.voucher_code,
taxonomy_as_string)
taxon_id = taxon_id[0:54]
block_1st2nd[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.first_and_second_codon_positions()))
block_1st[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.first_codon_position()))
block_2nd[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.second_codon_position()))
block_3rd[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.third_codon_position()))
out = self.convert_block_dicts_to_string(block_1st2nd, block_1st, block_2nd, block_3rd)
return out
def convert_block_dicts_to_string(self, block_1st2nd, block_1st, block_2nd, block_3rd):
"""Takes into account whether we need to output all codon positions."""
out = ""
# We need 1st and 2nd positions
if self.codon_positions in ['ALL', '1st-2nd']:
for gene_code, seqs in block_1st2nd.items():
out += '>{0}_1st-2nd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
elif self.codon_positions == '1st':
for gene_code, seqs in block_1st.items():
out += '>{0}_1st\n----\n'.format(gene_code)
for seq in seqs:
out += seq
elif self.codon_positions == '2nd':
for gene_code, seqs in block_2nd.items():
out += '>{0}_2nd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
# We also need 3rd positions
if self.codon_positions in ['ALL', '3rd']:
for gene_code, seqs in block_3rd.items():
out += '\n>{0}_3rd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
return out
def make_datablock_by_gene(self, block):
out = None
for seq_record in block:
if not out:
out = '[{0}]\n'.format(seq_record.gene_code)
taxonomy_as_string = self.flatten_taxonomy(seq_record)
taxon_id = '{0}{1}'.format(seq_record.voucher_code,
taxonomy_as_string)
taxon_id = taxon_id[0:54]
sequence = get_seq(seq_record, self.codon_positions,
aminoacids=self.aminoacids,
degenerate=self.degenerate)
seq = sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
out += '{0}{1}\n'.format(taxon_id.ljust(55), seq)
return out
def flatten_taxonomy(self, seq_record):
out = ''
if seq_record.taxonomy is None:
return out
else:
try:
out += "_" + seq_record.taxonomy['orden']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['superfamily']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['family']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['subfamily']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['tribe']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['subtribe']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['genus']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['species']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['subspecies']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['author']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['hostorg']
except KeyError:
pass
out = out.replace(" ", "_")
out = re.sub("_$", "", out)
return re.sub('_+', '_', out)
|
carlosp420/dataset-creator
|
dataset_creator/base_dataset.py
|
DatasetBlock.convert_to_string
|
python
|
def convert_to_string(self, block):
if self.partitioning != '1st-2nd, 3rd':
return self.make_datablock_by_gene(block)
else:
if self.format == 'FASTA':
return self.make_datablock_considering_codon_positions_as_fasta_format(block)
else:
return self.make_datablock_by_gene(block)
|
Makes gene_block as str from list of SeqRecordExpanded objects of a gene_code.
Override this function if the dataset block needs to be different
due to file format.
This block will need to be split further if the dataset is FASTA or
TNT and the partitioning scheme is 1st-2nd, 3rd.
As the dataset is split into several blocks due to 1st-2nd, 3rd
we cannot translate to aminoacids or degenerate the sequences.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L86-L105
|
[
"def make_datablock_considering_codon_positions_as_fasta_format(self, block):\n block_1st2nd = OrderedDict()\n block_1st = OrderedDict()\n block_2nd = OrderedDict()\n block_3rd = OrderedDict()\n\n for seq_record in block: # splitting each block in two\n if seq_record.gene_code not in block_1st2nd:\n block_1st2nd[seq_record.gene_code] = []\n if seq_record.gene_code not in block_1st:\n block_1st[seq_record.gene_code] = []\n if seq_record.gene_code not in block_2nd:\n block_2nd[seq_record.gene_code] = []\n if seq_record.gene_code not in block_3rd:\n block_3rd[seq_record.gene_code] = []\n\n taxonomy_as_string = self.flatten_taxonomy(seq_record)\n taxon_id = '>{0}{1}'.format(seq_record.voucher_code,\n taxonomy_as_string)\n taxon_id = taxon_id[0:54]\n\n block_1st2nd[seq_record.gene_code].append('{0}\\n{1}\\n'.format(taxon_id,\n seq_record.first_and_second_codon_positions()))\n block_1st[seq_record.gene_code].append('{0}\\n{1}\\n'.format(taxon_id,\n seq_record.first_codon_position()))\n block_2nd[seq_record.gene_code].append('{0}\\n{1}\\n'.format(taxon_id,\n seq_record.second_codon_position()))\n block_3rd[seq_record.gene_code].append('{0}\\n{1}\\n'.format(taxon_id,\n seq_record.third_codon_position()))\n out = self.convert_block_dicts_to_string(block_1st2nd, block_1st, block_2nd, block_3rd)\n return out\n",
"def make_datablock_by_gene(self, block):\n out = None\n for seq_record in block:\n if not out:\n out = '[{0}]\\n'.format(seq_record.gene_code)\n taxonomy_as_string = self.flatten_taxonomy(seq_record)\n taxon_id = '{0}{1}'.format(seq_record.voucher_code,\n taxonomy_as_string)\n taxon_id = taxon_id[0:54]\n\n sequence = get_seq(seq_record, self.codon_positions,\n aminoacids=self.aminoacids,\n degenerate=self.degenerate)\n seq = sequence.seq\n if sequence.warning:\n self.warnings.append(sequence.warning)\n\n out += '{0}{1}\\n'.format(taxon_id.ljust(55), seq)\n return out\n"
] |
class DatasetBlock(object):
"""
By default, the data sequences block generated is NEXUS and we use BioPython
tools to convert it to other formats such as FASTA.
However, sometimes the blo
Parameters:
data (named tuple): containing:
* gene_codes: list
* number_chars: string
* number_taxa: string
* seq_records: list of SeqRecordExpanded objects
* gene_codes_and_lengths: OrderedDict
codon_positions (str): str. Can be 1st, 2nd, 3rd, 1st-2nd, ALL (default).
partitioning (str):
aminoacids (boolean):
degenerate (str):
format (str): NEXUS, PHYLIP or FASTA.
outgroup (str): Specimen code of taxon that should be used as outgroup.
"""
def __init__(self, data, codon_positions, partitioning, aminoacids=None,
degenerate=None, format=None, outgroup=None):
self.warnings = []
self.data = data
self.codon_positions = codon_positions
self.partitioning = partitioning
self.aminoacids = aminoacids
self.degenerate = degenerate
self.format = format
self.outgroup = outgroup
self._blocks = []
def dataset_block(self):
"""Creates the block with taxon names and their sequences.
Override this function if the dataset block needs to be different
due to file format.
Example:
CP100_10_Aus_aus ACGATRGACGATRA...
CP100_11_Aus_bus ACGATRGACGATRA...
...
"""
self.split_data()
out = []
for block in self._blocks:
out.append(self.convert_to_string(block))
return '\n'.join(out).strip() + '\n;\nEND;'
def split_data(self):
"""Splits the list of SeqRecordExpanded objects into lists, which are
kept into a bigger list.
If the file_format is Nexus, then it is only partitioned by gene. If it
is FASTA, then it needs partitioning by codon positions if required.
Example:
>>> blocks = [
... [SeqRecord1, SeqRecord2], # for gene 1
... [SeqRecord1, SeqRecord2], # for gene 2
... [SeqRecord1, SeqRecord2], # for gene 3
... [SeqRecord1, SeqRecord2], # for gene 4
... ]
"""
this_gene_code = None
for seq_record in self.data.seq_records:
if this_gene_code is None or this_gene_code != seq_record.gene_code:
this_gene_code = seq_record.gene_code
self._blocks.append([])
list_length = len(self._blocks)
self._blocks[list_length - 1].append(seq_record)
def make_datablock_considering_codon_positions_as_fasta_format(self, block):
block_1st2nd = OrderedDict()
block_1st = OrderedDict()
block_2nd = OrderedDict()
block_3rd = OrderedDict()
for seq_record in block: # splitting each block in two
if seq_record.gene_code not in block_1st2nd:
block_1st2nd[seq_record.gene_code] = []
if seq_record.gene_code not in block_1st:
block_1st[seq_record.gene_code] = []
if seq_record.gene_code not in block_2nd:
block_2nd[seq_record.gene_code] = []
if seq_record.gene_code not in block_3rd:
block_3rd[seq_record.gene_code] = []
taxonomy_as_string = self.flatten_taxonomy(seq_record)
taxon_id = '>{0}{1}'.format(seq_record.voucher_code,
taxonomy_as_string)
taxon_id = taxon_id[0:54]
block_1st2nd[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.first_and_second_codon_positions()))
block_1st[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.first_codon_position()))
block_2nd[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.second_codon_position()))
block_3rd[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.third_codon_position()))
out = self.convert_block_dicts_to_string(block_1st2nd, block_1st, block_2nd, block_3rd)
return out
def convert_block_dicts_to_string(self, block_1st2nd, block_1st, block_2nd, block_3rd):
"""Takes into account whether we need to output all codon positions."""
out = ""
# We need 1st and 2nd positions
if self.codon_positions in ['ALL', '1st-2nd']:
for gene_code, seqs in block_1st2nd.items():
out += '>{0}_1st-2nd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
elif self.codon_positions == '1st':
for gene_code, seqs in block_1st.items():
out += '>{0}_1st\n----\n'.format(gene_code)
for seq in seqs:
out += seq
elif self.codon_positions == '2nd':
for gene_code, seqs in block_2nd.items():
out += '>{0}_2nd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
# We also need 3rd positions
if self.codon_positions in ['ALL', '3rd']:
for gene_code, seqs in block_3rd.items():
out += '\n>{0}_3rd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
return out
def make_datablock_by_gene(self, block):
out = None
for seq_record in block:
if not out:
out = '[{0}]\n'.format(seq_record.gene_code)
taxonomy_as_string = self.flatten_taxonomy(seq_record)
taxon_id = '{0}{1}'.format(seq_record.voucher_code,
taxonomy_as_string)
taxon_id = taxon_id[0:54]
sequence = get_seq(seq_record, self.codon_positions,
aminoacids=self.aminoacids,
degenerate=self.degenerate)
seq = sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
out += '{0}{1}\n'.format(taxon_id.ljust(55), seq)
return out
def flatten_taxonomy(self, seq_record):
out = ''
if seq_record.taxonomy is None:
return out
else:
try:
out += "_" + seq_record.taxonomy['orden']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['superfamily']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['family']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['subfamily']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['tribe']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['subtribe']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['genus']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['species']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['subspecies']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['author']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['hostorg']
except KeyError:
pass
out = out.replace(" ", "_")
out = re.sub("_$", "", out)
return re.sub('_+', '_', out)
|
carlosp420/dataset-creator
|
dataset_creator/base_dataset.py
|
DatasetBlock.convert_block_dicts_to_string
|
python
|
def convert_block_dicts_to_string(self, block_1st2nd, block_1st, block_2nd, block_3rd):
out = ""
# We need 1st and 2nd positions
if self.codon_positions in ['ALL', '1st-2nd']:
for gene_code, seqs in block_1st2nd.items():
out += '>{0}_1st-2nd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
elif self.codon_positions == '1st':
for gene_code, seqs in block_1st.items():
out += '>{0}_1st\n----\n'.format(gene_code)
for seq in seqs:
out += seq
elif self.codon_positions == '2nd':
for gene_code, seqs in block_2nd.items():
out += '>{0}_2nd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
# We also need 3rd positions
if self.codon_positions in ['ALL', '3rd']:
for gene_code, seqs in block_3rd.items():
out += '\n>{0}_3rd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
return out
|
Takes into account whether we need to output all codon positions.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L139-L165
| null |
class DatasetBlock(object):
"""
By default, the data sequences block generated is NEXUS and we use BioPython
tools to convert it to other formats such as FASTA.
However, sometimes the blo
Parameters:
data (named tuple): containing:
* gene_codes: list
* number_chars: string
* number_taxa: string
* seq_records: list of SeqRecordExpanded objects
* gene_codes_and_lengths: OrderedDict
codon_positions (str): str. Can be 1st, 2nd, 3rd, 1st-2nd, ALL (default).
partitioning (str):
aminoacids (boolean):
degenerate (str):
format (str): NEXUS, PHYLIP or FASTA.
outgroup (str): Specimen code of taxon that should be used as outgroup.
"""
def __init__(self, data, codon_positions, partitioning, aminoacids=None,
degenerate=None, format=None, outgroup=None):
self.warnings = []
self.data = data
self.codon_positions = codon_positions
self.partitioning = partitioning
self.aminoacids = aminoacids
self.degenerate = degenerate
self.format = format
self.outgroup = outgroup
self._blocks = []
def dataset_block(self):
"""Creates the block with taxon names and their sequences.
Override this function if the dataset block needs to be different
due to file format.
Example:
CP100_10_Aus_aus ACGATRGACGATRA...
CP100_11_Aus_bus ACGATRGACGATRA...
...
"""
self.split_data()
out = []
for block in self._blocks:
out.append(self.convert_to_string(block))
return '\n'.join(out).strip() + '\n;\nEND;'
def split_data(self):
"""Splits the list of SeqRecordExpanded objects into lists, which are
kept into a bigger list.
If the file_format is Nexus, then it is only partitioned by gene. If it
is FASTA, then it needs partitioning by codon positions if required.
Example:
>>> blocks = [
... [SeqRecord1, SeqRecord2], # for gene 1
... [SeqRecord1, SeqRecord2], # for gene 2
... [SeqRecord1, SeqRecord2], # for gene 3
... [SeqRecord1, SeqRecord2], # for gene 4
... ]
"""
this_gene_code = None
for seq_record in self.data.seq_records:
if this_gene_code is None or this_gene_code != seq_record.gene_code:
this_gene_code = seq_record.gene_code
self._blocks.append([])
list_length = len(self._blocks)
self._blocks[list_length - 1].append(seq_record)
def convert_to_string(self, block):
"""Makes gene_block as str from list of SeqRecordExpanded objects of a gene_code.
Override this function if the dataset block needs to be different
due to file format.
This block will need to be split further if the dataset is FASTA or
TNT and the partitioning scheme is 1st-2nd, 3rd.
As the dataset is split into several blocks due to 1st-2nd, 3rd
we cannot translate to aminoacids or degenerate the sequences.
"""
if self.partitioning != '1st-2nd, 3rd':
return self.make_datablock_by_gene(block)
else:
if self.format == 'FASTA':
return self.make_datablock_considering_codon_positions_as_fasta_format(block)
else:
return self.make_datablock_by_gene(block)
def make_datablock_considering_codon_positions_as_fasta_format(self, block):
block_1st2nd = OrderedDict()
block_1st = OrderedDict()
block_2nd = OrderedDict()
block_3rd = OrderedDict()
for seq_record in block: # splitting each block in two
if seq_record.gene_code not in block_1st2nd:
block_1st2nd[seq_record.gene_code] = []
if seq_record.gene_code not in block_1st:
block_1st[seq_record.gene_code] = []
if seq_record.gene_code not in block_2nd:
block_2nd[seq_record.gene_code] = []
if seq_record.gene_code not in block_3rd:
block_3rd[seq_record.gene_code] = []
taxonomy_as_string = self.flatten_taxonomy(seq_record)
taxon_id = '>{0}{1}'.format(seq_record.voucher_code,
taxonomy_as_string)
taxon_id = taxon_id[0:54]
block_1st2nd[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.first_and_second_codon_positions()))
block_1st[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.first_codon_position()))
block_2nd[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.second_codon_position()))
block_3rd[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.third_codon_position()))
out = self.convert_block_dicts_to_string(block_1st2nd, block_1st, block_2nd, block_3rd)
return out
def make_datablock_by_gene(self, block):
out = None
for seq_record in block:
if not out:
out = '[{0}]\n'.format(seq_record.gene_code)
taxonomy_as_string = self.flatten_taxonomy(seq_record)
taxon_id = '{0}{1}'.format(seq_record.voucher_code,
taxonomy_as_string)
taxon_id = taxon_id[0:54]
sequence = get_seq(seq_record, self.codon_positions,
aminoacids=self.aminoacids,
degenerate=self.degenerate)
seq = sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
out += '{0}{1}\n'.format(taxon_id.ljust(55), seq)
return out
def flatten_taxonomy(self, seq_record):
out = ''
if seq_record.taxonomy is None:
return out
else:
try:
out += "_" + seq_record.taxonomy['orden']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['superfamily']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['family']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['subfamily']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['tribe']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['subtribe']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['genus']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['species']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['subspecies']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['author']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['hostorg']
except KeyError:
pass
out = out.replace(" ", "_")
out = re.sub("_$", "", out)
return re.sub('_+', '_', out)
|
carlosp420/dataset-creator
|
dataset_creator/base_dataset.py
|
DatasetFooter.make_charsets
|
python
|
def make_charsets(self):
count_start = 1
out = ''
for gene_code, lengths in self.data.gene_codes_and_lengths.items():
count_end = lengths[0] + count_start - 1
out += self.format_charset_line(gene_code, count_start, count_end)
count_start = count_end + 1
return out
|
Override this function for Phylip dataset as the content is different and
goes into a separate file.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L308-L319
|
[
"def format_charset_line(self, gene_code, count_start, count_end):\n slash_number = self.make_slash_number()\n suffixes = self.make_gene_code_suffixes()\n corrected_count = self.correct_count_using_reading_frames(gene_code, count_start, count_end)\n\n out = ''\n for index, val in enumerate(suffixes):\n out += ' charset {0}{1} = {2}{3};\\n'.format(gene_code, suffixes[index],\n corrected_count[index], slash_number)\n return out\n"
] |
class DatasetFooter(object):
"""Builds charset block:
Parameters:
data (namedtuple): with necessary info for dataset creation.
codon_positions (str): `1st`, `2nd`, `3rd`, `1st-2nd`, `ALL`.
partitioning (str): `by gene`, `by codon position`, `1st-2nd, 3rd`.
outgroup (str): voucher code to be used as outgroup for NEXUS
and TNT files.
Example:
>>>
begin mrbayes;
charset ArgKin = 1-596;
charset COI-begin = 597-1265;
charset COI_end = 1266-2071;
charset ef1a = 2072-3311;
charset RpS2 = 3312-3722;
charset RpS5 = 3723-4339;
charset wingless = 4340-4739;
set autoclose=yes;
prset applyto=(all) ratepr=variable brlensp=unconstrained:Exp(100.0) shapepr=exp(1.0) tratiopr=beta(2.0,1.0);
lset applyto=(all) nst=mixed rates=gamma [invgamma];
unlink statefreq=(all);
unlink shape=(all) revmat=(all) tratio=(all) [pinvar=(all)];
mcmc ngen=10000000 printfreq=1000 samplefreq=1000 nchains=4 nruns=2 savebrlens=yes [temp=0.11];
sump relburnin=yes [no] burninfrac=0.25 [2500];
sumt relburnin=yes [no] burninfrac=0.25 [2500] contype=halfcompat [allcompat];
END;
"""
def __init__(self, data, codon_positions=None, partitioning=None,
outgroup=None):
self.data = data
self.codon_positions = codon_positions
self.partitioning = partitioning
self.outgroup = outgroup
self._validate_partitioning(partitioning)
self._validate_codon_positions(codon_positions)
self.charset_block = self.make_charset_block()
self.partition_line = self.make_partition_line()
def _validate_partitioning(self, partitioning):
if partitioning is None:
self.partitioning = 'by gene'
elif partitioning not in ['by gene', 'by codon position', '1st-2nd, 3rd']:
raise AttributeError("Partitioning parameter should be one of these: "
"None, 'by gene', 'by codon position', '1st-2nd, 3rd")
def _validate_codon_positions(self, codon_positions):
if codon_positions is None:
self.codon_positions = 'ALL'
elif codon_positions not in ['1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise AttributeError("Codon positions parameter should be one of these: "
"None, '1st', '2nd', '3rd', '1st-2nd', 'ALL'")
def make_charset_block(self):
"""
Override this function for Phylip dataset as the content is different and
goes into a separate file.
"""
out = 'begin mrbayes;\n'
out += self.make_charsets()
return out.strip()
def format_charset_line(self, gene_code, count_start, count_end):
slash_number = self.make_slash_number()
suffixes = self.make_gene_code_suffixes()
corrected_count = self.correct_count_using_reading_frames(gene_code, count_start, count_end)
out = ''
for index, val in enumerate(suffixes):
out += ' charset {0}{1} = {2}{3};\n'.format(gene_code, suffixes[index],
corrected_count[index], slash_number)
return out
def make_slash_number(self):
"""
Charset lines have \2 or \3 depending on type of partitioning and codon
positions requested for our dataset.
:return:
"""
if self.partitioning == 'by codon position' and self.codon_positions == '1st-2nd':
return '\\2'
elif self.partitioning in ['by codon position', '1st-2nd, 3rd'] and self.codon_positions in ['ALL', None]:
return '\\3'
else:
return ''
def make_gene_code_suffixes(self):
try:
return self.suffix_for_one_codon_position()
except KeyError:
return self.suffix_for_several_codon_positions()
def suffix_for_one_codon_position(self):
sufixes = {
'1st': '_pos1',
'2nd': '_pos2',
'3rd': '_pos3',
}
return [sufixes[self.codon_positions]]
def suffix_for_several_codon_positions(self):
if self.codon_positions == 'ALL' and self.partitioning == 'by gene':
return ['']
elif self.codon_positions == '1st-2nd' and self.partitioning in ['by gene', '1st-2nd, 3rd']:
return ['_pos12']
elif self.codon_positions == '1st-2nd' and self.partitioning == 'by codon position':
return ['_pos1', '_pos2']
if self.partitioning == 'by codon position':
return ['_pos1', '_pos2', '_pos3']
elif self.partitioning == '1st-2nd, 3rd':
return ['_pos12', '_pos3']
def correct_count_using_reading_frames(self, gene_code, count_start, count_end):
reading_frame = self.data.reading_frames[gene_code]
bp = BasePairCount(reading_frame, self.codon_positions, self.partitioning, count_start, count_end)
return bp.get_corrected_count()
def make_partition_line(self):
out = 'partition GENES = {0}: '.format(len(self.data.gene_codes) * len(self.make_gene_code_suffixes()))
out += ', '.join(self.add_suffixes_to_gene_codes())
out += ';'
out += '\n\nset partition = GENES;'
return out
def add_suffixes_to_gene_codes(self):
"""Appends pos1, pos2, etc to the gene_code if needed."""
out = []
for gene_code in self.data.gene_codes:
for sufix in self.make_gene_code_suffixes():
out.append('{0}{1}'.format(gene_code, sufix))
return out
def dataset_footer(self):
return self.make_footer()
def make_footer(self):
outgroup = self.get_outgroup()
footer = """{0}\n{1}
set autoclose=yes;{2}
prset applyto=(all) ratepr=variable brlensp=unconstrained:Exp(100.0) shapepr=exp(1.0) tratiopr=beta(2.0,1.0);
lset applyto=(all) nst=mixed rates=gamma [invgamma];
unlink statefreq=(all);
unlink shape=(all) revmat=(all) tratio=(all) [pinvar=(all)];
mcmc ngen=10000000 printfreq=1000 samplefreq=1000 nchains=4 nruns=2 savebrlens=yes [temp=0.11];
sump relburnin=yes [no] burninfrac=0.25 [2500];
sumt relburnin=yes [no] burninfrac=0.25 [2500] contype=halfcompat [allcompat];
END;
""".format(self.charset_block, self.partition_line, outgroup)
return footer.strip()
def get_outgroup(self):
"""Generates the outgroup line from the voucher code specified by the
user.
"""
if self.outgroup is not None:
outgroup_taxonomy = ''
for i in self.data.seq_records:
if self.outgroup == i.voucher_code:
outgroup_taxonomy = '{0}_{1}'.format(i.taxonomy['genus'],
i.taxonomy['species'])
break
outgroup = '\noutgroup {0}_{1};'.format(self.outgroup,
outgroup_taxonomy)
else:
outgroup = ''
return outgroup
|
carlosp420/dataset-creator
|
dataset_creator/base_dataset.py
|
DatasetFooter.make_slash_number
|
python
|
def make_slash_number(self):
if self.partitioning == 'by codon position' and self.codon_positions == '1st-2nd':
return '\\2'
elif self.partitioning in ['by codon position', '1st-2nd, 3rd'] and self.codon_positions in ['ALL', None]:
return '\\3'
else:
return ''
|
Charset lines have \2 or \3 depending on type of partitioning and codon
positions requested for our dataset.
:return:
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L332-L344
| null |
class DatasetFooter(object):
"""Builds charset block:
Parameters:
data (namedtuple): with necessary info for dataset creation.
codon_positions (str): `1st`, `2nd`, `3rd`, `1st-2nd`, `ALL`.
partitioning (str): `by gene`, `by codon position`, `1st-2nd, 3rd`.
outgroup (str): voucher code to be used as outgroup for NEXUS
and TNT files.
Example:
>>>
begin mrbayes;
charset ArgKin = 1-596;
charset COI-begin = 597-1265;
charset COI_end = 1266-2071;
charset ef1a = 2072-3311;
charset RpS2 = 3312-3722;
charset RpS5 = 3723-4339;
charset wingless = 4340-4739;
set autoclose=yes;
prset applyto=(all) ratepr=variable brlensp=unconstrained:Exp(100.0) shapepr=exp(1.0) tratiopr=beta(2.0,1.0);
lset applyto=(all) nst=mixed rates=gamma [invgamma];
unlink statefreq=(all);
unlink shape=(all) revmat=(all) tratio=(all) [pinvar=(all)];
mcmc ngen=10000000 printfreq=1000 samplefreq=1000 nchains=4 nruns=2 savebrlens=yes [temp=0.11];
sump relburnin=yes [no] burninfrac=0.25 [2500];
sumt relburnin=yes [no] burninfrac=0.25 [2500] contype=halfcompat [allcompat];
END;
"""
def __init__(self, data, codon_positions=None, partitioning=None,
outgroup=None):
self.data = data
self.codon_positions = codon_positions
self.partitioning = partitioning
self.outgroup = outgroup
self._validate_partitioning(partitioning)
self._validate_codon_positions(codon_positions)
self.charset_block = self.make_charset_block()
self.partition_line = self.make_partition_line()
def _validate_partitioning(self, partitioning):
if partitioning is None:
self.partitioning = 'by gene'
elif partitioning not in ['by gene', 'by codon position', '1st-2nd, 3rd']:
raise AttributeError("Partitioning parameter should be one of these: "
"None, 'by gene', 'by codon position', '1st-2nd, 3rd")
def _validate_codon_positions(self, codon_positions):
if codon_positions is None:
self.codon_positions = 'ALL'
elif codon_positions not in ['1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise AttributeError("Codon positions parameter should be one of these: "
"None, '1st', '2nd', '3rd', '1st-2nd', 'ALL'")
def make_charset_block(self):
"""
Override this function for Phylip dataset as the content is different and
goes into a separate file.
"""
out = 'begin mrbayes;\n'
out += self.make_charsets()
return out.strip()
def make_charsets(self):
"""
Override this function for Phylip dataset as the content is different and
goes into a separate file.
"""
count_start = 1
out = ''
for gene_code, lengths in self.data.gene_codes_and_lengths.items():
count_end = lengths[0] + count_start - 1
out += self.format_charset_line(gene_code, count_start, count_end)
count_start = count_end + 1
return out
def format_charset_line(self, gene_code, count_start, count_end):
slash_number = self.make_slash_number()
suffixes = self.make_gene_code_suffixes()
corrected_count = self.correct_count_using_reading_frames(gene_code, count_start, count_end)
out = ''
for index, val in enumerate(suffixes):
out += ' charset {0}{1} = {2}{3};\n'.format(gene_code, suffixes[index],
corrected_count[index], slash_number)
return out
def make_gene_code_suffixes(self):
try:
return self.suffix_for_one_codon_position()
except KeyError:
return self.suffix_for_several_codon_positions()
def suffix_for_one_codon_position(self):
sufixes = {
'1st': '_pos1',
'2nd': '_pos2',
'3rd': '_pos3',
}
return [sufixes[self.codon_positions]]
def suffix_for_several_codon_positions(self):
if self.codon_positions == 'ALL' and self.partitioning == 'by gene':
return ['']
elif self.codon_positions == '1st-2nd' and self.partitioning in ['by gene', '1st-2nd, 3rd']:
return ['_pos12']
elif self.codon_positions == '1st-2nd' and self.partitioning == 'by codon position':
return ['_pos1', '_pos2']
if self.partitioning == 'by codon position':
return ['_pos1', '_pos2', '_pos3']
elif self.partitioning == '1st-2nd, 3rd':
return ['_pos12', '_pos3']
def correct_count_using_reading_frames(self, gene_code, count_start, count_end):
reading_frame = self.data.reading_frames[gene_code]
bp = BasePairCount(reading_frame, self.codon_positions, self.partitioning, count_start, count_end)
return bp.get_corrected_count()
def make_partition_line(self):
out = 'partition GENES = {0}: '.format(len(self.data.gene_codes) * len(self.make_gene_code_suffixes()))
out += ', '.join(self.add_suffixes_to_gene_codes())
out += ';'
out += '\n\nset partition = GENES;'
return out
def add_suffixes_to_gene_codes(self):
"""Appends pos1, pos2, etc to the gene_code if needed."""
out = []
for gene_code in self.data.gene_codes:
for sufix in self.make_gene_code_suffixes():
out.append('{0}{1}'.format(gene_code, sufix))
return out
def dataset_footer(self):
return self.make_footer()
def make_footer(self):
outgroup = self.get_outgroup()
footer = """{0}\n{1}
set autoclose=yes;{2}
prset applyto=(all) ratepr=variable brlensp=unconstrained:Exp(100.0) shapepr=exp(1.0) tratiopr=beta(2.0,1.0);
lset applyto=(all) nst=mixed rates=gamma [invgamma];
unlink statefreq=(all);
unlink shape=(all) revmat=(all) tratio=(all) [pinvar=(all)];
mcmc ngen=10000000 printfreq=1000 samplefreq=1000 nchains=4 nruns=2 savebrlens=yes [temp=0.11];
sump relburnin=yes [no] burninfrac=0.25 [2500];
sumt relburnin=yes [no] burninfrac=0.25 [2500] contype=halfcompat [allcompat];
END;
""".format(self.charset_block, self.partition_line, outgroup)
return footer.strip()
def get_outgroup(self):
"""Generates the outgroup line from the voucher code specified by the
user.
"""
if self.outgroup is not None:
outgroup_taxonomy = ''
for i in self.data.seq_records:
if self.outgroup == i.voucher_code:
outgroup_taxonomy = '{0}_{1}'.format(i.taxonomy['genus'],
i.taxonomy['species'])
break
outgroup = '\noutgroup {0}_{1};'.format(self.outgroup,
outgroup_taxonomy)
else:
outgroup = ''
return outgroup
|
carlosp420/dataset-creator
|
dataset_creator/base_dataset.py
|
DatasetFooter.add_suffixes_to_gene_codes
|
python
|
def add_suffixes_to_gene_codes(self):
out = []
for gene_code in self.data.gene_codes:
for sufix in self.make_gene_code_suffixes():
out.append('{0}{1}'.format(gene_code, sufix))
return out
|
Appends pos1, pos2, etc to the gene_code if needed.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L385-L391
|
[
"def make_gene_code_suffixes(self):\n try:\n return self.suffix_for_one_codon_position()\n except KeyError:\n return self.suffix_for_several_codon_positions()\n"
] |
class DatasetFooter(object):
"""Builds charset block:
Parameters:
data (namedtuple): with necessary info for dataset creation.
codon_positions (str): `1st`, `2nd`, `3rd`, `1st-2nd`, `ALL`.
partitioning (str): `by gene`, `by codon position`, `1st-2nd, 3rd`.
outgroup (str): voucher code to be used as outgroup for NEXUS
and TNT files.
Example:
>>>
begin mrbayes;
charset ArgKin = 1-596;
charset COI-begin = 597-1265;
charset COI_end = 1266-2071;
charset ef1a = 2072-3311;
charset RpS2 = 3312-3722;
charset RpS5 = 3723-4339;
charset wingless = 4340-4739;
set autoclose=yes;
prset applyto=(all) ratepr=variable brlensp=unconstrained:Exp(100.0) shapepr=exp(1.0) tratiopr=beta(2.0,1.0);
lset applyto=(all) nst=mixed rates=gamma [invgamma];
unlink statefreq=(all);
unlink shape=(all) revmat=(all) tratio=(all) [pinvar=(all)];
mcmc ngen=10000000 printfreq=1000 samplefreq=1000 nchains=4 nruns=2 savebrlens=yes [temp=0.11];
sump relburnin=yes [no] burninfrac=0.25 [2500];
sumt relburnin=yes [no] burninfrac=0.25 [2500] contype=halfcompat [allcompat];
END;
"""
def __init__(self, data, codon_positions=None, partitioning=None,
outgroup=None):
self.data = data
self.codon_positions = codon_positions
self.partitioning = partitioning
self.outgroup = outgroup
self._validate_partitioning(partitioning)
self._validate_codon_positions(codon_positions)
self.charset_block = self.make_charset_block()
self.partition_line = self.make_partition_line()
def _validate_partitioning(self, partitioning):
if partitioning is None:
self.partitioning = 'by gene'
elif partitioning not in ['by gene', 'by codon position', '1st-2nd, 3rd']:
raise AttributeError("Partitioning parameter should be one of these: "
"None, 'by gene', 'by codon position', '1st-2nd, 3rd")
def _validate_codon_positions(self, codon_positions):
if codon_positions is None:
self.codon_positions = 'ALL'
elif codon_positions not in ['1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise AttributeError("Codon positions parameter should be one of these: "
"None, '1st', '2nd', '3rd', '1st-2nd', 'ALL'")
def make_charset_block(self):
"""
Override this function for Phylip dataset as the content is different and
goes into a separate file.
"""
out = 'begin mrbayes;\n'
out += self.make_charsets()
return out.strip()
def make_charsets(self):
"""
Override this function for Phylip dataset as the content is different and
goes into a separate file.
"""
count_start = 1
out = ''
for gene_code, lengths in self.data.gene_codes_and_lengths.items():
count_end = lengths[0] + count_start - 1
out += self.format_charset_line(gene_code, count_start, count_end)
count_start = count_end + 1
return out
def format_charset_line(self, gene_code, count_start, count_end):
slash_number = self.make_slash_number()
suffixes = self.make_gene_code_suffixes()
corrected_count = self.correct_count_using_reading_frames(gene_code, count_start, count_end)
out = ''
for index, val in enumerate(suffixes):
out += ' charset {0}{1} = {2}{3};\n'.format(gene_code, suffixes[index],
corrected_count[index], slash_number)
return out
def make_slash_number(self):
"""
Charset lines have \2 or \3 depending on type of partitioning and codon
positions requested for our dataset.
:return:
"""
if self.partitioning == 'by codon position' and self.codon_positions == '1st-2nd':
return '\\2'
elif self.partitioning in ['by codon position', '1st-2nd, 3rd'] and self.codon_positions in ['ALL', None]:
return '\\3'
else:
return ''
def make_gene_code_suffixes(self):
try:
return self.suffix_for_one_codon_position()
except KeyError:
return self.suffix_for_several_codon_positions()
def suffix_for_one_codon_position(self):
sufixes = {
'1st': '_pos1',
'2nd': '_pos2',
'3rd': '_pos3',
}
return [sufixes[self.codon_positions]]
def suffix_for_several_codon_positions(self):
if self.codon_positions == 'ALL' and self.partitioning == 'by gene':
return ['']
elif self.codon_positions == '1st-2nd' and self.partitioning in ['by gene', '1st-2nd, 3rd']:
return ['_pos12']
elif self.codon_positions == '1st-2nd' and self.partitioning == 'by codon position':
return ['_pos1', '_pos2']
if self.partitioning == 'by codon position':
return ['_pos1', '_pos2', '_pos3']
elif self.partitioning == '1st-2nd, 3rd':
return ['_pos12', '_pos3']
def correct_count_using_reading_frames(self, gene_code, count_start, count_end):
reading_frame = self.data.reading_frames[gene_code]
bp = BasePairCount(reading_frame, self.codon_positions, self.partitioning, count_start, count_end)
return bp.get_corrected_count()
def make_partition_line(self):
out = 'partition GENES = {0}: '.format(len(self.data.gene_codes) * len(self.make_gene_code_suffixes()))
out += ', '.join(self.add_suffixes_to_gene_codes())
out += ';'
out += '\n\nset partition = GENES;'
return out
def dataset_footer(self):
return self.make_footer()
def make_footer(self):
outgroup = self.get_outgroup()
footer = """{0}\n{1}
set autoclose=yes;{2}
prset applyto=(all) ratepr=variable brlensp=unconstrained:Exp(100.0) shapepr=exp(1.0) tratiopr=beta(2.0,1.0);
lset applyto=(all) nst=mixed rates=gamma [invgamma];
unlink statefreq=(all);
unlink shape=(all) revmat=(all) tratio=(all) [pinvar=(all)];
mcmc ngen=10000000 printfreq=1000 samplefreq=1000 nchains=4 nruns=2 savebrlens=yes [temp=0.11];
sump relburnin=yes [no] burninfrac=0.25 [2500];
sumt relburnin=yes [no] burninfrac=0.25 [2500] contype=halfcompat [allcompat];
END;
""".format(self.charset_block, self.partition_line, outgroup)
return footer.strip()
def get_outgroup(self):
"""Generates the outgroup line from the voucher code specified by the
user.
"""
if self.outgroup is not None:
outgroup_taxonomy = ''
for i in self.data.seq_records:
if self.outgroup == i.voucher_code:
outgroup_taxonomy = '{0}_{1}'.format(i.taxonomy['genus'],
i.taxonomy['species'])
break
outgroup = '\noutgroup {0}_{1};'.format(self.outgroup,
outgroup_taxonomy)
else:
outgroup = ''
return outgroup
|
carlosp420/dataset-creator
|
dataset_creator/base_dataset.py
|
DatasetFooter.get_outgroup
|
python
|
def get_outgroup(self):
if self.outgroup is not None:
outgroup_taxonomy = ''
for i in self.data.seq_records:
if self.outgroup == i.voucher_code:
outgroup_taxonomy = '{0}_{1}'.format(i.taxonomy['genus'],
i.taxonomy['species'])
break
outgroup = '\noutgroup {0}_{1};'.format(self.outgroup,
outgroup_taxonomy)
else:
outgroup = ''
return outgroup
|
Generates the outgroup line from the voucher code specified by the
user.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L413-L428
| null |
class DatasetFooter(object):
"""Builds charset block:
Parameters:
data (namedtuple): with necessary info for dataset creation.
codon_positions (str): `1st`, `2nd`, `3rd`, `1st-2nd`, `ALL`.
partitioning (str): `by gene`, `by codon position`, `1st-2nd, 3rd`.
outgroup (str): voucher code to be used as outgroup for NEXUS
and TNT files.
Example:
>>>
begin mrbayes;
charset ArgKin = 1-596;
charset COI-begin = 597-1265;
charset COI_end = 1266-2071;
charset ef1a = 2072-3311;
charset RpS2 = 3312-3722;
charset RpS5 = 3723-4339;
charset wingless = 4340-4739;
set autoclose=yes;
prset applyto=(all) ratepr=variable brlensp=unconstrained:Exp(100.0) shapepr=exp(1.0) tratiopr=beta(2.0,1.0);
lset applyto=(all) nst=mixed rates=gamma [invgamma];
unlink statefreq=(all);
unlink shape=(all) revmat=(all) tratio=(all) [pinvar=(all)];
mcmc ngen=10000000 printfreq=1000 samplefreq=1000 nchains=4 nruns=2 savebrlens=yes [temp=0.11];
sump relburnin=yes [no] burninfrac=0.25 [2500];
sumt relburnin=yes [no] burninfrac=0.25 [2500] contype=halfcompat [allcompat];
END;
"""
def __init__(self, data, codon_positions=None, partitioning=None,
outgroup=None):
self.data = data
self.codon_positions = codon_positions
self.partitioning = partitioning
self.outgroup = outgroup
self._validate_partitioning(partitioning)
self._validate_codon_positions(codon_positions)
self.charset_block = self.make_charset_block()
self.partition_line = self.make_partition_line()
def _validate_partitioning(self, partitioning):
if partitioning is None:
self.partitioning = 'by gene'
elif partitioning not in ['by gene', 'by codon position', '1st-2nd, 3rd']:
raise AttributeError("Partitioning parameter should be one of these: "
"None, 'by gene', 'by codon position', '1st-2nd, 3rd")
def _validate_codon_positions(self, codon_positions):
if codon_positions is None:
self.codon_positions = 'ALL'
elif codon_positions not in ['1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise AttributeError("Codon positions parameter should be one of these: "
"None, '1st', '2nd', '3rd', '1st-2nd', 'ALL'")
def make_charset_block(self):
"""
Override this function for Phylip dataset as the content is different and
goes into a separate file.
"""
out = 'begin mrbayes;\n'
out += self.make_charsets()
return out.strip()
def make_charsets(self):
"""
Override this function for Phylip dataset as the content is different and
goes into a separate file.
"""
count_start = 1
out = ''
for gene_code, lengths in self.data.gene_codes_and_lengths.items():
count_end = lengths[0] + count_start - 1
out += self.format_charset_line(gene_code, count_start, count_end)
count_start = count_end + 1
return out
def format_charset_line(self, gene_code, count_start, count_end):
slash_number = self.make_slash_number()
suffixes = self.make_gene_code_suffixes()
corrected_count = self.correct_count_using_reading_frames(gene_code, count_start, count_end)
out = ''
for index, val in enumerate(suffixes):
out += ' charset {0}{1} = {2}{3};\n'.format(gene_code, suffixes[index],
corrected_count[index], slash_number)
return out
def make_slash_number(self):
"""
Charset lines have \2 or \3 depending on type of partitioning and codon
positions requested for our dataset.
:return:
"""
if self.partitioning == 'by codon position' and self.codon_positions == '1st-2nd':
return '\\2'
elif self.partitioning in ['by codon position', '1st-2nd, 3rd'] and self.codon_positions in ['ALL', None]:
return '\\3'
else:
return ''
def make_gene_code_suffixes(self):
try:
return self.suffix_for_one_codon_position()
except KeyError:
return self.suffix_for_several_codon_positions()
def suffix_for_one_codon_position(self):
sufixes = {
'1st': '_pos1',
'2nd': '_pos2',
'3rd': '_pos3',
}
return [sufixes[self.codon_positions]]
def suffix_for_several_codon_positions(self):
if self.codon_positions == 'ALL' and self.partitioning == 'by gene':
return ['']
elif self.codon_positions == '1st-2nd' and self.partitioning in ['by gene', '1st-2nd, 3rd']:
return ['_pos12']
elif self.codon_positions == '1st-2nd' and self.partitioning == 'by codon position':
return ['_pos1', '_pos2']
if self.partitioning == 'by codon position':
return ['_pos1', '_pos2', '_pos3']
elif self.partitioning == '1st-2nd, 3rd':
return ['_pos12', '_pos3']
def correct_count_using_reading_frames(self, gene_code, count_start, count_end):
reading_frame = self.data.reading_frames[gene_code]
bp = BasePairCount(reading_frame, self.codon_positions, self.partitioning, count_start, count_end)
return bp.get_corrected_count()
def make_partition_line(self):
out = 'partition GENES = {0}: '.format(len(self.data.gene_codes) * len(self.make_gene_code_suffixes()))
out += ', '.join(self.add_suffixes_to_gene_codes())
out += ';'
out += '\n\nset partition = GENES;'
return out
def add_suffixes_to_gene_codes(self):
"""Appends pos1, pos2, etc to the gene_code if needed."""
out = []
for gene_code in self.data.gene_codes:
for sufix in self.make_gene_code_suffixes():
out.append('{0}{1}'.format(gene_code, sufix))
return out
def dataset_footer(self):
return self.make_footer()
def make_footer(self):
outgroup = self.get_outgroup()
footer = """{0}\n{1}
set autoclose=yes;{2}
prset applyto=(all) ratepr=variable brlensp=unconstrained:Exp(100.0) shapepr=exp(1.0) tratiopr=beta(2.0,1.0);
lset applyto=(all) nst=mixed rates=gamma [invgamma];
unlink statefreq=(all);
unlink shape=(all) revmat=(all) tratio=(all) [pinvar=(all)];
mcmc ngen=10000000 printfreq=1000 samplefreq=1000 nchains=4 nruns=2 savebrlens=yes [temp=0.11];
sump relburnin=yes [no] burninfrac=0.25 [2500];
sumt relburnin=yes [no] burninfrac=0.25 [2500] contype=halfcompat [allcompat];
END;
""".format(self.charset_block, self.partition_line, outgroup)
return footer.strip()
|
carlosp420/dataset-creator
|
dataset_creator/phylip.py
|
PhylipDatasetFooter.make_charsets
|
python
|
def make_charsets(self):
count_start = 1
out = ''
for gene_code, lengths in self.data.gene_codes_and_lengths.items():
count_end = lengths[0] + count_start - 1
formatted_line = self.format_charset_line(gene_code, count_start, count_end)
converted_line = formatted_line.replace(' charset', 'DNA,').replace(';', '')
out += converted_line
count_start = count_end + 1
return out
|
Overridden function for Phylip dataset as the content is different and
goes into a separate file.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/phylip.py#L13-L26
|
[
"def format_charset_line(self, gene_code, count_start, count_end):\n slash_number = self.make_slash_number()\n suffixes = self.make_gene_code_suffixes()\n corrected_count = self.correct_count_using_reading_frames(gene_code, count_start, count_end)\n\n out = ''\n for index, val in enumerate(suffixes):\n out += ' charset {0}{1} = {2}{3};\\n'.format(gene_code, suffixes[index],\n corrected_count[index], slash_number)\n return out\n"
] |
class PhylipDatasetFooter(DatasetFooter):
def make_charset_block(self):
"""
Overridden function for Phylip dataset as the content is different and
goes into a separate file.
"""
out = self.make_charsets()
return out.strip()
|
carlosp420/dataset-creator
|
dataset_creator/genbank_fasta.py
|
GenBankFASTADatasetBlock.convert_to_string
|
python
|
def convert_to_string(self, block):
out = ""
for seq_record in block:
taxon_id = ">{0}_{1}_{2} [org={0} {1}] [Specimen-voucher={2}] " \
"[note={3} gene, partial cds.] [Lineage={4}]".format(
seq_record.taxonomy['genus'],
seq_record.taxonomy['species'],
seq_record.voucher_code,
seq_record.gene_code,
seq_record.lineage,
)
sequence = get_seq(seq_record, self.codon_positions, self.aminoacids,
self.degenerate)
seq = sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
n = 60
seq = [seq[i:i + n] for i in range(0, len(seq), n)]
out += '{0}\n{1}\n'.format(taxon_id, "\n".join(seq))
return out
|
Takes a list of SeqRecordExpanded objects corresponding to a gene_code
and produces the gene_block as string.
:param block:
:return: str.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/genbank_fasta.py#L6-L33
|
[
"def get_seq(seq_record, codon_positions, aminoacids=False, degenerate=None):\n \"\"\"\n Checks parameters such as codon_positions, aminoacids... to return the\n required sequence as string.\n\n Parameters:\n seq_record (SeqRecordExpanded object):\n codon_positions (str):\n aminoacids (boolean):\n\n Returns:\n Namedtuple containing ``seq (str)`` and ``warning (str)``.\n \"\"\"\n Sequence = namedtuple('Sequence', ['seq', 'warning'])\n\n if codon_positions not in [None, '1st', '2nd', '3rd', '1st-2nd', 'ALL']:\n raise WrongParameterFormat(\"`codon_positions` argument should be any of the following\"\n \": 1st, 2nd, 3rd, 1st-2nd or ALL\")\n if aminoacids:\n aa = seq_record.translate()\n if '*' in aa:\n warning = \"Gene {0}, sequence {1} contains stop codons '*'\".format(seq_record.gene_code,\n seq_record.voucher_code)\n else:\n warning = None\n return Sequence(seq=aa, warning=warning)\n\n if degenerate:\n return Sequence(seq=seq_record.degenerate(degenerate), warning=None)\n\n if codon_positions == '1st':\n return Sequence(seq=seq_record.first_codon_position(), warning=None)\n elif codon_positions == '2nd':\n return Sequence(seq=seq_record.second_codon_position(), warning=None)\n elif codon_positions == '3rd':\n return Sequence(seq=seq_record.third_codon_position(), warning=None)\n elif codon_positions == '1st-2nd':\n return Sequence(seq=seq_record.first_and_second_codon_positions(), warning=None)\n else: # None and ALL\n return Sequence(seq=str(seq_record.seq), warning=None)\n"
] |
class GenBankFASTADatasetBlock(DatasetBlock):
|
carlosp420/dataset-creator
|
dataset_creator/utils.py
|
get_seq
|
python
|
def get_seq(seq_record, codon_positions, aminoacids=False, degenerate=None):
Sequence = namedtuple('Sequence', ['seq', 'warning'])
if codon_positions not in [None, '1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise WrongParameterFormat("`codon_positions` argument should be any of the following"
": 1st, 2nd, 3rd, 1st-2nd or ALL")
if aminoacids:
aa = seq_record.translate()
if '*' in aa:
warning = "Gene {0}, sequence {1} contains stop codons '*'".format(seq_record.gene_code,
seq_record.voucher_code)
else:
warning = None
return Sequence(seq=aa, warning=warning)
if degenerate:
return Sequence(seq=seq_record.degenerate(degenerate), warning=None)
if codon_positions == '1st':
return Sequence(seq=seq_record.first_codon_position(), warning=None)
elif codon_positions == '2nd':
return Sequence(seq=seq_record.second_codon_position(), warning=None)
elif codon_positions == '3rd':
return Sequence(seq=seq_record.third_codon_position(), warning=None)
elif codon_positions == '1st-2nd':
return Sequence(seq=seq_record.first_and_second_codon_positions(), warning=None)
else: # None and ALL
return Sequence(seq=str(seq_record.seq), warning=None)
|
Checks parameters such as codon_positions, aminoacids... to return the
required sequence as string.
Parameters:
seq_record (SeqRecordExpanded object):
codon_positions (str):
aminoacids (boolean):
Returns:
Namedtuple containing ``seq (str)`` and ``warning (str)``.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/utils.py#L17-L56
| null |
# -*- coding: UTF-8 -*-
import six
if six.PY2:
from StringIO import StringIO
else:
from io import StringIO
import os
from collections import namedtuple
import uuid
from Bio import AlignIO
from .exceptions import WrongParameterFormat
def convert_nexus_to_format(dataset_as_nexus, dataset_format):
"""
Converts nexus format to Phylip and Fasta using Biopython tools.
:param dataset_as_nexus:
:param dataset_format:
:return:
"""
fake_handle = StringIO(dataset_as_nexus)
nexus_al = AlignIO.parse(fake_handle, 'nexus')
tmp_file = make_random_filename()
AlignIO.write(nexus_al, tmp_file, dataset_format)
dataset_as_fasta = read_and_delete_tmp_file(tmp_file)
return dataset_as_fasta
def make_random_filename():
return '{0}.txt'.format(uuid.uuid4().hex)
def read_and_delete_tmp_file(filename):
with open(filename, "r") as handle:
contents = handle.read()
if os.path.isfile(filename):
os.remove(filename)
return contents
def make_dataset_header(data, file_format, aminoacids):
"""Creates the dataset header for NEXUS files from ``#NEXUS`` to ``MATRIX``.
Parameters:
data (namedtuple): with necessary info for dataset creation.
file_format (str): TNT, PHYLIP, NEXUS, FASTA
aminoacids (boolean): If ``aminoacids is True`` the header will show
``DATATYPE=PROTEIN`` otherwise it will be ``DNA``.
"""
if aminoacids:
datatype = 'PROTEIN'
else:
datatype = 'DNA'
if file_format in ['NEXUS', 'PHYLIP', 'FASTA']:
header = """
#NEXUS
BEGIN DATA;
DIMENSIONS NTAX={0} NCHAR={1};
FORMAT INTERLEAVE DATATYPE={2} MISSING=? GAP=-;
MATRIX
""".format(data.number_taxa, data.number_chars, datatype)
elif file_format == 'MEGA':
return "#MEGA\n!TITLE title;"
else: # file_format: TNT
if aminoacids:
molecule_type = "prot"
else:
molecule_type = "dna"
header = """
nstates {0};
xread
{1} {2}""".format(molecule_type, data.number_chars, data.number_taxa)
return header.strip()
|
carlosp420/dataset-creator
|
dataset_creator/utils.py
|
convert_nexus_to_format
|
python
|
def convert_nexus_to_format(dataset_as_nexus, dataset_format):
fake_handle = StringIO(dataset_as_nexus)
nexus_al = AlignIO.parse(fake_handle, 'nexus')
tmp_file = make_random_filename()
AlignIO.write(nexus_al, tmp_file, dataset_format)
dataset_as_fasta = read_and_delete_tmp_file(tmp_file)
return dataset_as_fasta
|
Converts nexus format to Phylip and Fasta using Biopython tools.
:param dataset_as_nexus:
:param dataset_format:
:return:
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/utils.py#L59-L72
|
[
"def make_random_filename():\n return '{0}.txt'.format(uuid.uuid4().hex)\n",
"def read_and_delete_tmp_file(filename):\n with open(filename, \"r\") as handle:\n contents = handle.read()\n\n if os.path.isfile(filename):\n os.remove(filename)\n\n return contents\n"
] |
# -*- coding: UTF-8 -*-
import six
if six.PY2:
from StringIO import StringIO
else:
from io import StringIO
import os
from collections import namedtuple
import uuid
from Bio import AlignIO
from .exceptions import WrongParameterFormat
def get_seq(seq_record, codon_positions, aminoacids=False, degenerate=None):
"""
Checks parameters such as codon_positions, aminoacids... to return the
required sequence as string.
Parameters:
seq_record (SeqRecordExpanded object):
codon_positions (str):
aminoacids (boolean):
Returns:
Namedtuple containing ``seq (str)`` and ``warning (str)``.
"""
Sequence = namedtuple('Sequence', ['seq', 'warning'])
if codon_positions not in [None, '1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise WrongParameterFormat("`codon_positions` argument should be any of the following"
": 1st, 2nd, 3rd, 1st-2nd or ALL")
if aminoacids:
aa = seq_record.translate()
if '*' in aa:
warning = "Gene {0}, sequence {1} contains stop codons '*'".format(seq_record.gene_code,
seq_record.voucher_code)
else:
warning = None
return Sequence(seq=aa, warning=warning)
if degenerate:
return Sequence(seq=seq_record.degenerate(degenerate), warning=None)
if codon_positions == '1st':
return Sequence(seq=seq_record.first_codon_position(), warning=None)
elif codon_positions == '2nd':
return Sequence(seq=seq_record.second_codon_position(), warning=None)
elif codon_positions == '3rd':
return Sequence(seq=seq_record.third_codon_position(), warning=None)
elif codon_positions == '1st-2nd':
return Sequence(seq=seq_record.first_and_second_codon_positions(), warning=None)
else: # None and ALL
return Sequence(seq=str(seq_record.seq), warning=None)
def make_random_filename():
return '{0}.txt'.format(uuid.uuid4().hex)
def read_and_delete_tmp_file(filename):
with open(filename, "r") as handle:
contents = handle.read()
if os.path.isfile(filename):
os.remove(filename)
return contents
def make_dataset_header(data, file_format, aminoacids):
"""Creates the dataset header for NEXUS files from ``#NEXUS`` to ``MATRIX``.
Parameters:
data (namedtuple): with necessary info for dataset creation.
file_format (str): TNT, PHYLIP, NEXUS, FASTA
aminoacids (boolean): If ``aminoacids is True`` the header will show
``DATATYPE=PROTEIN`` otherwise it will be ``DNA``.
"""
if aminoacids:
datatype = 'PROTEIN'
else:
datatype = 'DNA'
if file_format in ['NEXUS', 'PHYLIP', 'FASTA']:
header = """
#NEXUS
BEGIN DATA;
DIMENSIONS NTAX={0} NCHAR={1};
FORMAT INTERLEAVE DATATYPE={2} MISSING=? GAP=-;
MATRIX
""".format(data.number_taxa, data.number_chars, datatype)
elif file_format == 'MEGA':
return "#MEGA\n!TITLE title;"
else: # file_format: TNT
if aminoacids:
molecule_type = "prot"
else:
molecule_type = "dna"
header = """
nstates {0};
xread
{1} {2}""".format(molecule_type, data.number_chars, data.number_taxa)
return header.strip()
|
carlosp420/dataset-creator
|
dataset_creator/utils.py
|
make_dataset_header
|
python
|
def make_dataset_header(data, file_format, aminoacids):
if aminoacids:
datatype = 'PROTEIN'
else:
datatype = 'DNA'
if file_format in ['NEXUS', 'PHYLIP', 'FASTA']:
header = """
#NEXUS
BEGIN DATA;
DIMENSIONS NTAX={0} NCHAR={1};
FORMAT INTERLEAVE DATATYPE={2} MISSING=? GAP=-;
MATRIX
""".format(data.number_taxa, data.number_chars, datatype)
elif file_format == 'MEGA':
return "#MEGA\n!TITLE title;"
else: # file_format: TNT
if aminoacids:
molecule_type = "prot"
else:
molecule_type = "dna"
header = """
nstates {0};
xread
{1} {2}""".format(molecule_type, data.number_chars, data.number_taxa)
return header.strip()
|
Creates the dataset header for NEXUS files from ``#NEXUS`` to ``MATRIX``.
Parameters:
data (namedtuple): with necessary info for dataset creation.
file_format (str): TNT, PHYLIP, NEXUS, FASTA
aminoacids (boolean): If ``aminoacids is True`` the header will show
``DATATYPE=PROTEIN`` otherwise it will be ``DNA``.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/utils.py#L89-L126
| null |
# -*- coding: UTF-8 -*-
import six
if six.PY2:
from StringIO import StringIO
else:
from io import StringIO
import os
from collections import namedtuple
import uuid
from Bio import AlignIO
from .exceptions import WrongParameterFormat
def get_seq(seq_record, codon_positions, aminoacids=False, degenerate=None):
"""
Checks parameters such as codon_positions, aminoacids... to return the
required sequence as string.
Parameters:
seq_record (SeqRecordExpanded object):
codon_positions (str):
aminoacids (boolean):
Returns:
Namedtuple containing ``seq (str)`` and ``warning (str)``.
"""
Sequence = namedtuple('Sequence', ['seq', 'warning'])
if codon_positions not in [None, '1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise WrongParameterFormat("`codon_positions` argument should be any of the following"
": 1st, 2nd, 3rd, 1st-2nd or ALL")
if aminoacids:
aa = seq_record.translate()
if '*' in aa:
warning = "Gene {0}, sequence {1} contains stop codons '*'".format(seq_record.gene_code,
seq_record.voucher_code)
else:
warning = None
return Sequence(seq=aa, warning=warning)
if degenerate:
return Sequence(seq=seq_record.degenerate(degenerate), warning=None)
if codon_positions == '1st':
return Sequence(seq=seq_record.first_codon_position(), warning=None)
elif codon_positions == '2nd':
return Sequence(seq=seq_record.second_codon_position(), warning=None)
elif codon_positions == '3rd':
return Sequence(seq=seq_record.third_codon_position(), warning=None)
elif codon_positions == '1st-2nd':
return Sequence(seq=seq_record.first_and_second_codon_positions(), warning=None)
else: # None and ALL
return Sequence(seq=str(seq_record.seq), warning=None)
def convert_nexus_to_format(dataset_as_nexus, dataset_format):
"""
Converts nexus format to Phylip and Fasta using Biopython tools.
:param dataset_as_nexus:
:param dataset_format:
:return:
"""
fake_handle = StringIO(dataset_as_nexus)
nexus_al = AlignIO.parse(fake_handle, 'nexus')
tmp_file = make_random_filename()
AlignIO.write(nexus_al, tmp_file, dataset_format)
dataset_as_fasta = read_and_delete_tmp_file(tmp_file)
return dataset_as_fasta
def make_random_filename():
return '{0}.txt'.format(uuid.uuid4().hex)
def read_and_delete_tmp_file(filename):
with open(filename, "r") as handle:
contents = handle.read()
if os.path.isfile(filename):
os.remove(filename)
return contents
|
carlosp420/dataset-creator
|
dataset_creator/tnt.py
|
TntDatasetBlock.convert_to_string
|
python
|
def convert_to_string(self, block):
if self.aminoacids:
molecule_type = "protein"
else:
molecule_type = "dna"
out = None
for seq_record in block:
if not out:
out = '&[{0}]\n'.format(molecule_type, seq_record.gene_code)
taxon_id = '{0}_{1}_{2}'.format(seq_record.voucher_code,
seq_record.taxonomy['genus'],
seq_record.taxonomy['species'],
)
sequence = get_seq(seq_record, self.codon_positions, self.aminoacids,
self.degenerate)
seq = sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
out += '{0}{1}\n'.format(taxon_id.ljust(55), seq)
return out
|
Takes a list of SeqRecordExpanded objects corresponding to a gene_code
and produces the gene_block as string.
:param block:
:return: str.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/tnt.py#L25-L53
|
[
"def get_seq(seq_record, codon_positions, aminoacids=False, degenerate=None):\n \"\"\"\n Checks parameters such as codon_positions, aminoacids... to return the\n required sequence as string.\n\n Parameters:\n seq_record (SeqRecordExpanded object):\n codon_positions (str):\n aminoacids (boolean):\n\n Returns:\n Namedtuple containing ``seq (str)`` and ``warning (str)``.\n \"\"\"\n Sequence = namedtuple('Sequence', ['seq', 'warning'])\n\n if codon_positions not in [None, '1st', '2nd', '3rd', '1st-2nd', 'ALL']:\n raise WrongParameterFormat(\"`codon_positions` argument should be any of the following\"\n \": 1st, 2nd, 3rd, 1st-2nd or ALL\")\n if aminoacids:\n aa = seq_record.translate()\n if '*' in aa:\n warning = \"Gene {0}, sequence {1} contains stop codons '*'\".format(seq_record.gene_code,\n seq_record.voucher_code)\n else:\n warning = None\n return Sequence(seq=aa, warning=warning)\n\n if degenerate:\n return Sequence(seq=seq_record.degenerate(degenerate), warning=None)\n\n if codon_positions == '1st':\n return Sequence(seq=seq_record.first_codon_position(), warning=None)\n elif codon_positions == '2nd':\n return Sequence(seq=seq_record.second_codon_position(), warning=None)\n elif codon_positions == '3rd':\n return Sequence(seq=seq_record.third_codon_position(), warning=None)\n elif codon_positions == '1st-2nd':\n return Sequence(seq=seq_record.first_and_second_codon_positions(), warning=None)\n else: # None and ALL\n return Sequence(seq=str(seq_record.seq), warning=None)\n"
] |
class TntDatasetBlock(DatasetBlock):
def dataset_block(self):
self.split_data()
out = []
for block in self._blocks:
if self.outgroup is not None:
block = self.put_outgroup_at_start_of_block(block)
out.append(self.convert_to_string(block))
return '\n'.join(out).strip() + '\n;\nproc/;'
def put_outgroup_at_start_of_block(self, block):
other_sequences = []
for seq_record in block:
if seq_record.voucher_code == self.outgroup:
outgroup_sequence = seq_record
else:
other_sequences.append(seq_record)
return [outgroup_sequence] + other_sequences
|
carlosp420/dataset-creator
|
dataset_creator/dataset.py
|
Dataset.sort_seq_records
|
python
|
def sort_seq_records(self, seq_records):
for seq_record in seq_records:
seq_record.voucher_code = seq_record.voucher_code.replace("-", "_")
unsorted_gene_codes = set([i.gene_code for i in seq_records])
sorted_gene_codes = list(unsorted_gene_codes)
sorted_gene_codes.sort(key=lambda x: x.lower())
unsorted_voucher_codes = set([i.voucher_code for i in seq_records])
sorted_voucher_codes = list(unsorted_voucher_codes)
sorted_voucher_codes.sort(key=lambda x: x.lower())
sorted_seq_records = []
for gene_code in sorted_gene_codes:
for voucher_code in sorted_voucher_codes:
for seq_record in seq_records:
should_be_done = (
seq_record.gene_code == gene_code and
seq_record.voucher_code == voucher_code
)
if should_be_done:
sorted_seq_records.append(seq_record)
return sorted_seq_records
|
Checks that SeqExpandedRecords are sorted by gene_code and then by voucher code.
The dashes in taxon names need to be converted to underscores so the
dataset will be accepted by Biopython to do format conversions.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/dataset.py#L81-L109
| null |
class Dataset(object):
"""User's class for making datasets of several formats. It needs as input
a list of SeqRecord-expanded objects with as much info as possible:
Parameters:
seq_records (list): SeqRecordExpanded objects. The list should be
sorted by gene_code and then voucher code.
format (str): NEXUS, PHYLIP, TNT, MEGA, GenBankFASTA.
partitioning (str): Partitioning scheme: ``by gene`` (default),
``by codon position`` (each) and ``1st-2nd, 3rd``.
codon_positions (str): Can be ``1st``, ``2nd``, ``3rd``, ``1st-2nd``,
``ALL`` (default).
aminoacids (boolean): Returns the dataset as aminoacid sequences.
degenerate (str): Method to degenerate nucleotide sequences,
following Zwick et al. Can be ``S``, ``Z``,
``SZ`` and ``normal``.
outgroup (str): voucher code to be used as outgroup for NEXUS
and TNT files.
Attributes:
_gene_codes_and_lengths (dict): in the form ``gene_code: list``
The list contains sequence lengths for its
sequences. We assume the longest to be the
real gene_code sequence length.
Example:
>>> dataset = Dataset(seq_records, format='NEXUS', codon_positions='1st',
... partitioning='by gene',
... )
>>> print(dataset.dataset_str)
'#NEXUS
blah blah
'
>>> dataset = Dataset(seq_records, format='PHYLIP', codon_positions='ALL',
... partitioning='by gene',
... )
>>> print(dataset.dataset_str)
'100 10
blah blah
'
"""
def __init__(self, seq_records, format=None, partitioning=None,
codon_positions=None, aminoacids=None, degenerate=None,
outgroup=None):
self.warnings = []
self.seq_records = self.sort_seq_records(seq_records)
self.gene_codes = None
self.number_taxa = None
self.number_chars = None
self.reading_frames = {}
self.format = format
self.partitioning = partitioning
self.codon_positions = codon_positions
self.aminoacids = aminoacids
self.degenerate = degenerate
self.outgroup = None
self._validate_codon_positions(codon_positions)
self._validate_partitioning(partitioning)
self._validate_outgroup(outgroup)
self.data = None
self._gene_codes_and_lengths = OrderedDict()
self._prepare_data()
self.extra_dataset_str = None
self.dataset_str = self._create_dataset()
def _validate_partitioning(self, partitioning):
if partitioning is None:
self.partitioning = 'by gene'
elif partitioning not in ['by gene', 'by codon position', '1st-2nd, 3rd']:
raise AttributeError("Partitioning parameter should be one of these: "
"None, 'by gene', 'by codon position', '1st-2nd, 3rd")
elif partitioning in ['by codon position', '1st-2nd, 3rd'] \
and self.degenerate:
raise ValueError("Cannot degenerate if partitions scheme is {0!r}".format(
partitioning))
elif partitioning in ['by codon position', '1st-2nd, 3rd'] and self.format == 'MEGA':
raise ValueError("Cannot produce MEGA dataset with codon positions in different partitions")
def _validate_codon_positions(self, codon_positions):
if codon_positions is None:
self.codon_positions = 'ALL'
elif codon_positions not in ['1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise AttributeError("Codon positions parameter should be one of these: "
"None, '1st', '2nd', '3rd', '1st-2nd', 'ALL'")
def _validate_outgroup(self, outgroup):
"""All voucher codes in our datasets have dashes converted to underscores."""
if outgroup:
outgroup = outgroup.replace("-", "_")
good_outgroup = False
for seq_record in self.seq_records:
if seq_record.voucher_code == outgroup:
good_outgroup = True
break
if good_outgroup:
self.outgroup = outgroup
else:
raise ValueError("The given outgroup {0!r} cannot be found in the "
"input sequence records.".format(outgroup))
else:
self.outgroup = None
def _prepare_data(self):
"""
Creates named tuple with info needed to create a dataset.
:return: named tuple
"""
self._extract_genes()
self._extract_total_number_of_chars()
self._extract_number_of_taxa()
self._extract_reading_frames()
Data = namedtuple('Data', ['gene_codes', 'number_taxa', 'number_chars',
'seq_records', 'gene_codes_and_lengths',
'reading_frames'])
self.data = Data(self.gene_codes, self.number_taxa, self.number_chars,
self.seq_records, self._gene_codes_and_lengths,
self.reading_frames)
def _extract_genes(self):
gene_codes = [i.gene_code for i in self.seq_records]
unique_gene_codes = list(set(gene_codes))
# this is better: unique_gene_codes.sort(key=str.lower)
# but will not work in python2
unique_gene_codes.sort(key=lambda x: x.lower())
self.gene_codes = unique_gene_codes
def _extract_total_number_of_chars(self):
"""
sets `self.number_chars` to the number of characters as string.
"""
self._get_gene_codes_and_seq_lengths()
sum = 0
for seq_length in self._gene_codes_and_lengths.values():
sum += sorted(seq_length, reverse=True)[0]
self.number_chars = str(sum)
def _get_gene_codes_and_seq_lengths(self):
for seq_record in self.seq_records:
if seq_record.gene_code not in self._gene_codes_and_lengths:
self._gene_codes_and_lengths[seq_record.gene_code] = []
if self.aminoacids:
seq = seq_record.translate()
elif not self.aminoacids and self.degenerate is not None:
seq = seq_record.degenerate(method=self.degenerate)
else:
sequence = get_seq(seq_record, self.codon_positions)
seq = sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
self._gene_codes_and_lengths[seq_record.gene_code].append(len(seq))
def _extract_number_of_taxa(self):
"""
sets `self.number_taxa` to the number of taxa as string
"""
n_taxa = dict()
for i in self.seq_records:
if i.gene_code not in n_taxa:
n_taxa[i.gene_code] = 0
n_taxa[i.gene_code] += 1
number_taxa = sorted([i for i in n_taxa.values()], reverse=True)[0]
self.number_taxa = str(number_taxa)
def _extract_reading_frames(self):
for seq_record in self.seq_records:
if seq_record.gene_code not in self.reading_frames:
self.reading_frames[seq_record.gene_code] = seq_record.reading_frame
def _create_dataset(self):
creator = Creator(self.data, format=self.format,
codon_positions=self.codon_positions,
partitioning=self.partitioning,
aminoacids=self.aminoacids,
degenerate=self.degenerate,
outgroup=self.outgroup,
)
self.warnings = creator.warnings
self.extra_dataset_str = creator.extra_dataset_str
dataset_str = creator.dataset_str
return dataset_str
|
carlosp420/dataset-creator
|
dataset_creator/dataset.py
|
Dataset._validate_outgroup
|
python
|
def _validate_outgroup(self, outgroup):
if outgroup:
outgroup = outgroup.replace("-", "_")
good_outgroup = False
for seq_record in self.seq_records:
if seq_record.voucher_code == outgroup:
good_outgroup = True
break
if good_outgroup:
self.outgroup = outgroup
else:
raise ValueError("The given outgroup {0!r} cannot be found in the "
"input sequence records.".format(outgroup))
else:
self.outgroup = None
|
All voucher codes in our datasets have dashes converted to underscores.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/dataset.py#L131-L146
| null |
class Dataset(object):
"""User's class for making datasets of several formats. It needs as input
a list of SeqRecord-expanded objects with as much info as possible:
Parameters:
seq_records (list): SeqRecordExpanded objects. The list should be
sorted by gene_code and then voucher code.
format (str): NEXUS, PHYLIP, TNT, MEGA, GenBankFASTA.
partitioning (str): Partitioning scheme: ``by gene`` (default),
``by codon position`` (each) and ``1st-2nd, 3rd``.
codon_positions (str): Can be ``1st``, ``2nd``, ``3rd``, ``1st-2nd``,
``ALL`` (default).
aminoacids (boolean): Returns the dataset as aminoacid sequences.
degenerate (str): Method to degenerate nucleotide sequences,
following Zwick et al. Can be ``S``, ``Z``,
``SZ`` and ``normal``.
outgroup (str): voucher code to be used as outgroup for NEXUS
and TNT files.
Attributes:
_gene_codes_and_lengths (dict): in the form ``gene_code: list``
The list contains sequence lengths for its
sequences. We assume the longest to be the
real gene_code sequence length.
Example:
>>> dataset = Dataset(seq_records, format='NEXUS', codon_positions='1st',
... partitioning='by gene',
... )
>>> print(dataset.dataset_str)
'#NEXUS
blah blah
'
>>> dataset = Dataset(seq_records, format='PHYLIP', codon_positions='ALL',
... partitioning='by gene',
... )
>>> print(dataset.dataset_str)
'100 10
blah blah
'
"""
def __init__(self, seq_records, format=None, partitioning=None,
codon_positions=None, aminoacids=None, degenerate=None,
outgroup=None):
self.warnings = []
self.seq_records = self.sort_seq_records(seq_records)
self.gene_codes = None
self.number_taxa = None
self.number_chars = None
self.reading_frames = {}
self.format = format
self.partitioning = partitioning
self.codon_positions = codon_positions
self.aminoacids = aminoacids
self.degenerate = degenerate
self.outgroup = None
self._validate_codon_positions(codon_positions)
self._validate_partitioning(partitioning)
self._validate_outgroup(outgroup)
self.data = None
self._gene_codes_and_lengths = OrderedDict()
self._prepare_data()
self.extra_dataset_str = None
self.dataset_str = self._create_dataset()
def sort_seq_records(self, seq_records):
"""Checks that SeqExpandedRecords are sorted by gene_code and then by voucher code.
The dashes in taxon names need to be converted to underscores so the
dataset will be accepted by Biopython to do format conversions.
"""
for seq_record in seq_records:
seq_record.voucher_code = seq_record.voucher_code.replace("-", "_")
unsorted_gene_codes = set([i.gene_code for i in seq_records])
sorted_gene_codes = list(unsorted_gene_codes)
sorted_gene_codes.sort(key=lambda x: x.lower())
unsorted_voucher_codes = set([i.voucher_code for i in seq_records])
sorted_voucher_codes = list(unsorted_voucher_codes)
sorted_voucher_codes.sort(key=lambda x: x.lower())
sorted_seq_records = []
for gene_code in sorted_gene_codes:
for voucher_code in sorted_voucher_codes:
for seq_record in seq_records:
should_be_done = (
seq_record.gene_code == gene_code and
seq_record.voucher_code == voucher_code
)
if should_be_done:
sorted_seq_records.append(seq_record)
return sorted_seq_records
def _validate_partitioning(self, partitioning):
if partitioning is None:
self.partitioning = 'by gene'
elif partitioning not in ['by gene', 'by codon position', '1st-2nd, 3rd']:
raise AttributeError("Partitioning parameter should be one of these: "
"None, 'by gene', 'by codon position', '1st-2nd, 3rd")
elif partitioning in ['by codon position', '1st-2nd, 3rd'] \
and self.degenerate:
raise ValueError("Cannot degenerate if partitions scheme is {0!r}".format(
partitioning))
elif partitioning in ['by codon position', '1st-2nd, 3rd'] and self.format == 'MEGA':
raise ValueError("Cannot produce MEGA dataset with codon positions in different partitions")
def _validate_codon_positions(self, codon_positions):
if codon_positions is None:
self.codon_positions = 'ALL'
elif codon_positions not in ['1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise AttributeError("Codon positions parameter should be one of these: "
"None, '1st', '2nd', '3rd', '1st-2nd', 'ALL'")
def _prepare_data(self):
"""
Creates named tuple with info needed to create a dataset.
:return: named tuple
"""
self._extract_genes()
self._extract_total_number_of_chars()
self._extract_number_of_taxa()
self._extract_reading_frames()
Data = namedtuple('Data', ['gene_codes', 'number_taxa', 'number_chars',
'seq_records', 'gene_codes_and_lengths',
'reading_frames'])
self.data = Data(self.gene_codes, self.number_taxa, self.number_chars,
self.seq_records, self._gene_codes_and_lengths,
self.reading_frames)
def _extract_genes(self):
gene_codes = [i.gene_code for i in self.seq_records]
unique_gene_codes = list(set(gene_codes))
# this is better: unique_gene_codes.sort(key=str.lower)
# but will not work in python2
unique_gene_codes.sort(key=lambda x: x.lower())
self.gene_codes = unique_gene_codes
def _extract_total_number_of_chars(self):
"""
sets `self.number_chars` to the number of characters as string.
"""
self._get_gene_codes_and_seq_lengths()
sum = 0
for seq_length in self._gene_codes_and_lengths.values():
sum += sorted(seq_length, reverse=True)[0]
self.number_chars = str(sum)
def _get_gene_codes_and_seq_lengths(self):
for seq_record in self.seq_records:
if seq_record.gene_code not in self._gene_codes_and_lengths:
self._gene_codes_and_lengths[seq_record.gene_code] = []
if self.aminoacids:
seq = seq_record.translate()
elif not self.aminoacids and self.degenerate is not None:
seq = seq_record.degenerate(method=self.degenerate)
else:
sequence = get_seq(seq_record, self.codon_positions)
seq = sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
self._gene_codes_and_lengths[seq_record.gene_code].append(len(seq))
def _extract_number_of_taxa(self):
"""
sets `self.number_taxa` to the number of taxa as string
"""
n_taxa = dict()
for i in self.seq_records:
if i.gene_code not in n_taxa:
n_taxa[i.gene_code] = 0
n_taxa[i.gene_code] += 1
number_taxa = sorted([i for i in n_taxa.values()], reverse=True)[0]
self.number_taxa = str(number_taxa)
def _extract_reading_frames(self):
for seq_record in self.seq_records:
if seq_record.gene_code not in self.reading_frames:
self.reading_frames[seq_record.gene_code] = seq_record.reading_frame
def _create_dataset(self):
creator = Creator(self.data, format=self.format,
codon_positions=self.codon_positions,
partitioning=self.partitioning,
aminoacids=self.aminoacids,
degenerate=self.degenerate,
outgroup=self.outgroup,
)
self.warnings = creator.warnings
self.extra_dataset_str = creator.extra_dataset_str
dataset_str = creator.dataset_str
return dataset_str
|
carlosp420/dataset-creator
|
dataset_creator/dataset.py
|
Dataset._prepare_data
|
python
|
def _prepare_data(self):
self._extract_genes()
self._extract_total_number_of_chars()
self._extract_number_of_taxa()
self._extract_reading_frames()
Data = namedtuple('Data', ['gene_codes', 'number_taxa', 'number_chars',
'seq_records', 'gene_codes_and_lengths',
'reading_frames'])
self.data = Data(self.gene_codes, self.number_taxa, self.number_chars,
self.seq_records, self._gene_codes_and_lengths,
self.reading_frames)
|
Creates named tuple with info needed to create a dataset.
:return: named tuple
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/dataset.py#L148-L164
|
[
"def _extract_genes(self):\n gene_codes = [i.gene_code for i in self.seq_records]\n unique_gene_codes = list(set(gene_codes))\n # this is better: unique_gene_codes.sort(key=str.lower)\n # but will not work in python2\n unique_gene_codes.sort(key=lambda x: x.lower())\n self.gene_codes = unique_gene_codes\n",
"def _extract_total_number_of_chars(self):\n \"\"\"\n sets `self.number_chars` to the number of characters as string.\n \"\"\"\n self._get_gene_codes_and_seq_lengths()\n\n sum = 0\n for seq_length in self._gene_codes_and_lengths.values():\n sum += sorted(seq_length, reverse=True)[0]\n self.number_chars = str(sum)\n",
"def _extract_number_of_taxa(self):\n \"\"\"\n sets `self.number_taxa` to the number of taxa as string\n \"\"\"\n n_taxa = dict()\n for i in self.seq_records:\n if i.gene_code not in n_taxa:\n n_taxa[i.gene_code] = 0\n n_taxa[i.gene_code] += 1\n number_taxa = sorted([i for i in n_taxa.values()], reverse=True)[0]\n self.number_taxa = str(number_taxa)\n",
"def _extract_reading_frames(self):\n for seq_record in self.seq_records:\n if seq_record.gene_code not in self.reading_frames:\n self.reading_frames[seq_record.gene_code] = seq_record.reading_frame\n"
] |
class Dataset(object):
"""User's class for making datasets of several formats. It needs as input
a list of SeqRecord-expanded objects with as much info as possible:
Parameters:
seq_records (list): SeqRecordExpanded objects. The list should be
sorted by gene_code and then voucher code.
format (str): NEXUS, PHYLIP, TNT, MEGA, GenBankFASTA.
partitioning (str): Partitioning scheme: ``by gene`` (default),
``by codon position`` (each) and ``1st-2nd, 3rd``.
codon_positions (str): Can be ``1st``, ``2nd``, ``3rd``, ``1st-2nd``,
``ALL`` (default).
aminoacids (boolean): Returns the dataset as aminoacid sequences.
degenerate (str): Method to degenerate nucleotide sequences,
following Zwick et al. Can be ``S``, ``Z``,
``SZ`` and ``normal``.
outgroup (str): voucher code to be used as outgroup for NEXUS
and TNT files.
Attributes:
_gene_codes_and_lengths (dict): in the form ``gene_code: list``
The list contains sequence lengths for its
sequences. We assume the longest to be the
real gene_code sequence length.
Example:
>>> dataset = Dataset(seq_records, format='NEXUS', codon_positions='1st',
... partitioning='by gene',
... )
>>> print(dataset.dataset_str)
'#NEXUS
blah blah
'
>>> dataset = Dataset(seq_records, format='PHYLIP', codon_positions='ALL',
... partitioning='by gene',
... )
>>> print(dataset.dataset_str)
'100 10
blah blah
'
"""
def __init__(self, seq_records, format=None, partitioning=None,
codon_positions=None, aminoacids=None, degenerate=None,
outgroup=None):
self.warnings = []
self.seq_records = self.sort_seq_records(seq_records)
self.gene_codes = None
self.number_taxa = None
self.number_chars = None
self.reading_frames = {}
self.format = format
self.partitioning = partitioning
self.codon_positions = codon_positions
self.aminoacids = aminoacids
self.degenerate = degenerate
self.outgroup = None
self._validate_codon_positions(codon_positions)
self._validate_partitioning(partitioning)
self._validate_outgroup(outgroup)
self.data = None
self._gene_codes_and_lengths = OrderedDict()
self._prepare_data()
self.extra_dataset_str = None
self.dataset_str = self._create_dataset()
def sort_seq_records(self, seq_records):
"""Checks that SeqExpandedRecords are sorted by gene_code and then by voucher code.
The dashes in taxon names need to be converted to underscores so the
dataset will be accepted by Biopython to do format conversions.
"""
for seq_record in seq_records:
seq_record.voucher_code = seq_record.voucher_code.replace("-", "_")
unsorted_gene_codes = set([i.gene_code for i in seq_records])
sorted_gene_codes = list(unsorted_gene_codes)
sorted_gene_codes.sort(key=lambda x: x.lower())
unsorted_voucher_codes = set([i.voucher_code for i in seq_records])
sorted_voucher_codes = list(unsorted_voucher_codes)
sorted_voucher_codes.sort(key=lambda x: x.lower())
sorted_seq_records = []
for gene_code in sorted_gene_codes:
for voucher_code in sorted_voucher_codes:
for seq_record in seq_records:
should_be_done = (
seq_record.gene_code == gene_code and
seq_record.voucher_code == voucher_code
)
if should_be_done:
sorted_seq_records.append(seq_record)
return sorted_seq_records
def _validate_partitioning(self, partitioning):
if partitioning is None:
self.partitioning = 'by gene'
elif partitioning not in ['by gene', 'by codon position', '1st-2nd, 3rd']:
raise AttributeError("Partitioning parameter should be one of these: "
"None, 'by gene', 'by codon position', '1st-2nd, 3rd")
elif partitioning in ['by codon position', '1st-2nd, 3rd'] \
and self.degenerate:
raise ValueError("Cannot degenerate if partitions scheme is {0!r}".format(
partitioning))
elif partitioning in ['by codon position', '1st-2nd, 3rd'] and self.format == 'MEGA':
raise ValueError("Cannot produce MEGA dataset with codon positions in different partitions")
def _validate_codon_positions(self, codon_positions):
if codon_positions is None:
self.codon_positions = 'ALL'
elif codon_positions not in ['1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise AttributeError("Codon positions parameter should be one of these: "
"None, '1st', '2nd', '3rd', '1st-2nd', 'ALL'")
def _validate_outgroup(self, outgroup):
"""All voucher codes in our datasets have dashes converted to underscores."""
if outgroup:
outgroup = outgroup.replace("-", "_")
good_outgroup = False
for seq_record in self.seq_records:
if seq_record.voucher_code == outgroup:
good_outgroup = True
break
if good_outgroup:
self.outgroup = outgroup
else:
raise ValueError("The given outgroup {0!r} cannot be found in the "
"input sequence records.".format(outgroup))
else:
self.outgroup = None
def _extract_genes(self):
gene_codes = [i.gene_code for i in self.seq_records]
unique_gene_codes = list(set(gene_codes))
# this is better: unique_gene_codes.sort(key=str.lower)
# but will not work in python2
unique_gene_codes.sort(key=lambda x: x.lower())
self.gene_codes = unique_gene_codes
def _extract_total_number_of_chars(self):
"""
sets `self.number_chars` to the number of characters as string.
"""
self._get_gene_codes_and_seq_lengths()
sum = 0
for seq_length in self._gene_codes_and_lengths.values():
sum += sorted(seq_length, reverse=True)[0]
self.number_chars = str(sum)
def _get_gene_codes_and_seq_lengths(self):
for seq_record in self.seq_records:
if seq_record.gene_code not in self._gene_codes_and_lengths:
self._gene_codes_and_lengths[seq_record.gene_code] = []
if self.aminoacids:
seq = seq_record.translate()
elif not self.aminoacids and self.degenerate is not None:
seq = seq_record.degenerate(method=self.degenerate)
else:
sequence = get_seq(seq_record, self.codon_positions)
seq = sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
self._gene_codes_and_lengths[seq_record.gene_code].append(len(seq))
def _extract_number_of_taxa(self):
"""
sets `self.number_taxa` to the number of taxa as string
"""
n_taxa = dict()
for i in self.seq_records:
if i.gene_code not in n_taxa:
n_taxa[i.gene_code] = 0
n_taxa[i.gene_code] += 1
number_taxa = sorted([i for i in n_taxa.values()], reverse=True)[0]
self.number_taxa = str(number_taxa)
def _extract_reading_frames(self):
for seq_record in self.seq_records:
if seq_record.gene_code not in self.reading_frames:
self.reading_frames[seq_record.gene_code] = seq_record.reading_frame
def _create_dataset(self):
creator = Creator(self.data, format=self.format,
codon_positions=self.codon_positions,
partitioning=self.partitioning,
aminoacids=self.aminoacids,
degenerate=self.degenerate,
outgroup=self.outgroup,
)
self.warnings = creator.warnings
self.extra_dataset_str = creator.extra_dataset_str
dataset_str = creator.dataset_str
return dataset_str
|
carlosp420/dataset-creator
|
dataset_creator/dataset.py
|
Dataset._extract_total_number_of_chars
|
python
|
def _extract_total_number_of_chars(self):
self._get_gene_codes_and_seq_lengths()
sum = 0
for seq_length in self._gene_codes_and_lengths.values():
sum += sorted(seq_length, reverse=True)[0]
self.number_chars = str(sum)
|
sets `self.number_chars` to the number of characters as string.
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/dataset.py#L174-L183
| null |
class Dataset(object):
"""User's class for making datasets of several formats. It needs as input
a list of SeqRecord-expanded objects with as much info as possible:
Parameters:
seq_records (list): SeqRecordExpanded objects. The list should be
sorted by gene_code and then voucher code.
format (str): NEXUS, PHYLIP, TNT, MEGA, GenBankFASTA.
partitioning (str): Partitioning scheme: ``by gene`` (default),
``by codon position`` (each) and ``1st-2nd, 3rd``.
codon_positions (str): Can be ``1st``, ``2nd``, ``3rd``, ``1st-2nd``,
``ALL`` (default).
aminoacids (boolean): Returns the dataset as aminoacid sequences.
degenerate (str): Method to degenerate nucleotide sequences,
following Zwick et al. Can be ``S``, ``Z``,
``SZ`` and ``normal``.
outgroup (str): voucher code to be used as outgroup for NEXUS
and TNT files.
Attributes:
_gene_codes_and_lengths (dict): in the form ``gene_code: list``
The list contains sequence lengths for its
sequences. We assume the longest to be the
real gene_code sequence length.
Example:
>>> dataset = Dataset(seq_records, format='NEXUS', codon_positions='1st',
... partitioning='by gene',
... )
>>> print(dataset.dataset_str)
'#NEXUS
blah blah
'
>>> dataset = Dataset(seq_records, format='PHYLIP', codon_positions='ALL',
... partitioning='by gene',
... )
>>> print(dataset.dataset_str)
'100 10
blah blah
'
"""
def __init__(self, seq_records, format=None, partitioning=None,
codon_positions=None, aminoacids=None, degenerate=None,
outgroup=None):
self.warnings = []
self.seq_records = self.sort_seq_records(seq_records)
self.gene_codes = None
self.number_taxa = None
self.number_chars = None
self.reading_frames = {}
self.format = format
self.partitioning = partitioning
self.codon_positions = codon_positions
self.aminoacids = aminoacids
self.degenerate = degenerate
self.outgroup = None
self._validate_codon_positions(codon_positions)
self._validate_partitioning(partitioning)
self._validate_outgroup(outgroup)
self.data = None
self._gene_codes_and_lengths = OrderedDict()
self._prepare_data()
self.extra_dataset_str = None
self.dataset_str = self._create_dataset()
def sort_seq_records(self, seq_records):
"""Checks that SeqExpandedRecords are sorted by gene_code and then by voucher code.
The dashes in taxon names need to be converted to underscores so the
dataset will be accepted by Biopython to do format conversions.
"""
for seq_record in seq_records:
seq_record.voucher_code = seq_record.voucher_code.replace("-", "_")
unsorted_gene_codes = set([i.gene_code for i in seq_records])
sorted_gene_codes = list(unsorted_gene_codes)
sorted_gene_codes.sort(key=lambda x: x.lower())
unsorted_voucher_codes = set([i.voucher_code for i in seq_records])
sorted_voucher_codes = list(unsorted_voucher_codes)
sorted_voucher_codes.sort(key=lambda x: x.lower())
sorted_seq_records = []
for gene_code in sorted_gene_codes:
for voucher_code in sorted_voucher_codes:
for seq_record in seq_records:
should_be_done = (
seq_record.gene_code == gene_code and
seq_record.voucher_code == voucher_code
)
if should_be_done:
sorted_seq_records.append(seq_record)
return sorted_seq_records
def _validate_partitioning(self, partitioning):
if partitioning is None:
self.partitioning = 'by gene'
elif partitioning not in ['by gene', 'by codon position', '1st-2nd, 3rd']:
raise AttributeError("Partitioning parameter should be one of these: "
"None, 'by gene', 'by codon position', '1st-2nd, 3rd")
elif partitioning in ['by codon position', '1st-2nd, 3rd'] \
and self.degenerate:
raise ValueError("Cannot degenerate if partitions scheme is {0!r}".format(
partitioning))
elif partitioning in ['by codon position', '1st-2nd, 3rd'] and self.format == 'MEGA':
raise ValueError("Cannot produce MEGA dataset with codon positions in different partitions")
def _validate_codon_positions(self, codon_positions):
if codon_positions is None:
self.codon_positions = 'ALL'
elif codon_positions not in ['1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise AttributeError("Codon positions parameter should be one of these: "
"None, '1st', '2nd', '3rd', '1st-2nd', 'ALL'")
def _validate_outgroup(self, outgroup):
"""All voucher codes in our datasets have dashes converted to underscores."""
if outgroup:
outgroup = outgroup.replace("-", "_")
good_outgroup = False
for seq_record in self.seq_records:
if seq_record.voucher_code == outgroup:
good_outgroup = True
break
if good_outgroup:
self.outgroup = outgroup
else:
raise ValueError("The given outgroup {0!r} cannot be found in the "
"input sequence records.".format(outgroup))
else:
self.outgroup = None
def _prepare_data(self):
"""
Creates named tuple with info needed to create a dataset.
:return: named tuple
"""
self._extract_genes()
self._extract_total_number_of_chars()
self._extract_number_of_taxa()
self._extract_reading_frames()
Data = namedtuple('Data', ['gene_codes', 'number_taxa', 'number_chars',
'seq_records', 'gene_codes_and_lengths',
'reading_frames'])
self.data = Data(self.gene_codes, self.number_taxa, self.number_chars,
self.seq_records, self._gene_codes_and_lengths,
self.reading_frames)
def _extract_genes(self):
gene_codes = [i.gene_code for i in self.seq_records]
unique_gene_codes = list(set(gene_codes))
# this is better: unique_gene_codes.sort(key=str.lower)
# but will not work in python2
unique_gene_codes.sort(key=lambda x: x.lower())
self.gene_codes = unique_gene_codes
def _get_gene_codes_and_seq_lengths(self):
for seq_record in self.seq_records:
if seq_record.gene_code not in self._gene_codes_and_lengths:
self._gene_codes_and_lengths[seq_record.gene_code] = []
if self.aminoacids:
seq = seq_record.translate()
elif not self.aminoacids and self.degenerate is not None:
seq = seq_record.degenerate(method=self.degenerate)
else:
sequence = get_seq(seq_record, self.codon_positions)
seq = sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
self._gene_codes_and_lengths[seq_record.gene_code].append(len(seq))
def _extract_number_of_taxa(self):
"""
sets `self.number_taxa` to the number of taxa as string
"""
n_taxa = dict()
for i in self.seq_records:
if i.gene_code not in n_taxa:
n_taxa[i.gene_code] = 0
n_taxa[i.gene_code] += 1
number_taxa = sorted([i for i in n_taxa.values()], reverse=True)[0]
self.number_taxa = str(number_taxa)
def _extract_reading_frames(self):
for seq_record in self.seq_records:
if seq_record.gene_code not in self.reading_frames:
self.reading_frames[seq_record.gene_code] = seq_record.reading_frame
def _create_dataset(self):
creator = Creator(self.data, format=self.format,
codon_positions=self.codon_positions,
partitioning=self.partitioning,
aminoacids=self.aminoacids,
degenerate=self.degenerate,
outgroup=self.outgroup,
)
self.warnings = creator.warnings
self.extra_dataset_str = creator.extra_dataset_str
dataset_str = creator.dataset_str
return dataset_str
|
carlosp420/dataset-creator
|
dataset_creator/dataset.py
|
Dataset._extract_number_of_taxa
|
python
|
def _extract_number_of_taxa(self):
n_taxa = dict()
for i in self.seq_records:
if i.gene_code not in n_taxa:
n_taxa[i.gene_code] = 0
n_taxa[i.gene_code] += 1
number_taxa = sorted([i for i in n_taxa.values()], reverse=True)[0]
self.number_taxa = str(number_taxa)
|
sets `self.number_taxa` to the number of taxa as string
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/dataset.py#L201-L211
| null |
class Dataset(object):
"""User's class for making datasets of several formats. It needs as input
a list of SeqRecord-expanded objects with as much info as possible:
Parameters:
seq_records (list): SeqRecordExpanded objects. The list should be
sorted by gene_code and then voucher code.
format (str): NEXUS, PHYLIP, TNT, MEGA, GenBankFASTA.
partitioning (str): Partitioning scheme: ``by gene`` (default),
``by codon position`` (each) and ``1st-2nd, 3rd``.
codon_positions (str): Can be ``1st``, ``2nd``, ``3rd``, ``1st-2nd``,
``ALL`` (default).
aminoacids (boolean): Returns the dataset as aminoacid sequences.
degenerate (str): Method to degenerate nucleotide sequences,
following Zwick et al. Can be ``S``, ``Z``,
``SZ`` and ``normal``.
outgroup (str): voucher code to be used as outgroup for NEXUS
and TNT files.
Attributes:
_gene_codes_and_lengths (dict): in the form ``gene_code: list``
The list contains sequence lengths for its
sequences. We assume the longest to be the
real gene_code sequence length.
Example:
>>> dataset = Dataset(seq_records, format='NEXUS', codon_positions='1st',
... partitioning='by gene',
... )
>>> print(dataset.dataset_str)
'#NEXUS
blah blah
'
>>> dataset = Dataset(seq_records, format='PHYLIP', codon_positions='ALL',
... partitioning='by gene',
... )
>>> print(dataset.dataset_str)
'100 10
blah blah
'
"""
def __init__(self, seq_records, format=None, partitioning=None,
codon_positions=None, aminoacids=None, degenerate=None,
outgroup=None):
self.warnings = []
self.seq_records = self.sort_seq_records(seq_records)
self.gene_codes = None
self.number_taxa = None
self.number_chars = None
self.reading_frames = {}
self.format = format
self.partitioning = partitioning
self.codon_positions = codon_positions
self.aminoacids = aminoacids
self.degenerate = degenerate
self.outgroup = None
self._validate_codon_positions(codon_positions)
self._validate_partitioning(partitioning)
self._validate_outgroup(outgroup)
self.data = None
self._gene_codes_and_lengths = OrderedDict()
self._prepare_data()
self.extra_dataset_str = None
self.dataset_str = self._create_dataset()
def sort_seq_records(self, seq_records):
"""Checks that SeqExpandedRecords are sorted by gene_code and then by voucher code.
The dashes in taxon names need to be converted to underscores so the
dataset will be accepted by Biopython to do format conversions.
"""
for seq_record in seq_records:
seq_record.voucher_code = seq_record.voucher_code.replace("-", "_")
unsorted_gene_codes = set([i.gene_code for i in seq_records])
sorted_gene_codes = list(unsorted_gene_codes)
sorted_gene_codes.sort(key=lambda x: x.lower())
unsorted_voucher_codes = set([i.voucher_code for i in seq_records])
sorted_voucher_codes = list(unsorted_voucher_codes)
sorted_voucher_codes.sort(key=lambda x: x.lower())
sorted_seq_records = []
for gene_code in sorted_gene_codes:
for voucher_code in sorted_voucher_codes:
for seq_record in seq_records:
should_be_done = (
seq_record.gene_code == gene_code and
seq_record.voucher_code == voucher_code
)
if should_be_done:
sorted_seq_records.append(seq_record)
return sorted_seq_records
def _validate_partitioning(self, partitioning):
if partitioning is None:
self.partitioning = 'by gene'
elif partitioning not in ['by gene', 'by codon position', '1st-2nd, 3rd']:
raise AttributeError("Partitioning parameter should be one of these: "
"None, 'by gene', 'by codon position', '1st-2nd, 3rd")
elif partitioning in ['by codon position', '1st-2nd, 3rd'] \
and self.degenerate:
raise ValueError("Cannot degenerate if partitions scheme is {0!r}".format(
partitioning))
elif partitioning in ['by codon position', '1st-2nd, 3rd'] and self.format == 'MEGA':
raise ValueError("Cannot produce MEGA dataset with codon positions in different partitions")
def _validate_codon_positions(self, codon_positions):
if codon_positions is None:
self.codon_positions = 'ALL'
elif codon_positions not in ['1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise AttributeError("Codon positions parameter should be one of these: "
"None, '1st', '2nd', '3rd', '1st-2nd', 'ALL'")
def _validate_outgroup(self, outgroup):
"""All voucher codes in our datasets have dashes converted to underscores."""
if outgroup:
outgroup = outgroup.replace("-", "_")
good_outgroup = False
for seq_record in self.seq_records:
if seq_record.voucher_code == outgroup:
good_outgroup = True
break
if good_outgroup:
self.outgroup = outgroup
else:
raise ValueError("The given outgroup {0!r} cannot be found in the "
"input sequence records.".format(outgroup))
else:
self.outgroup = None
def _prepare_data(self):
"""
Creates named tuple with info needed to create a dataset.
:return: named tuple
"""
self._extract_genes()
self._extract_total_number_of_chars()
self._extract_number_of_taxa()
self._extract_reading_frames()
Data = namedtuple('Data', ['gene_codes', 'number_taxa', 'number_chars',
'seq_records', 'gene_codes_and_lengths',
'reading_frames'])
self.data = Data(self.gene_codes, self.number_taxa, self.number_chars,
self.seq_records, self._gene_codes_and_lengths,
self.reading_frames)
def _extract_genes(self):
gene_codes = [i.gene_code for i in self.seq_records]
unique_gene_codes = list(set(gene_codes))
# this is better: unique_gene_codes.sort(key=str.lower)
# but will not work in python2
unique_gene_codes.sort(key=lambda x: x.lower())
self.gene_codes = unique_gene_codes
def _extract_total_number_of_chars(self):
"""
sets `self.number_chars` to the number of characters as string.
"""
self._get_gene_codes_and_seq_lengths()
sum = 0
for seq_length in self._gene_codes_and_lengths.values():
sum += sorted(seq_length, reverse=True)[0]
self.number_chars = str(sum)
def _get_gene_codes_and_seq_lengths(self):
for seq_record in self.seq_records:
if seq_record.gene_code not in self._gene_codes_and_lengths:
self._gene_codes_and_lengths[seq_record.gene_code] = []
if self.aminoacids:
seq = seq_record.translate()
elif not self.aminoacids and self.degenerate is not None:
seq = seq_record.degenerate(method=self.degenerate)
else:
sequence = get_seq(seq_record, self.codon_positions)
seq = sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
self._gene_codes_and_lengths[seq_record.gene_code].append(len(seq))
def _extract_reading_frames(self):
for seq_record in self.seq_records:
if seq_record.gene_code not in self.reading_frames:
self.reading_frames[seq_record.gene_code] = seq_record.reading_frame
def _create_dataset(self):
creator = Creator(self.data, format=self.format,
codon_positions=self.codon_positions,
partitioning=self.partitioning,
aminoacids=self.aminoacids,
degenerate=self.degenerate,
outgroup=self.outgroup,
)
self.warnings = creator.warnings
self.extra_dataset_str = creator.extra_dataset_str
dataset_str = creator.dataset_str
return dataset_str
|
carlosp420/dataset-creator
|
dataset_creator/mega.py
|
MegaDatasetBlock.convert_blocks_to_string
|
python
|
def convert_blocks_to_string(self):
taxa_ids = [[]] * int(self.data.number_taxa)
sequences = [''] * int(self.data.number_taxa)
for block in self._blocks:
for index, seq_record in enumerate(block):
taxa_ids[index] = '{0}_{1}_{2}'.format(seq_record.voucher_code,
seq_record.taxonomy['genus'],
seq_record.taxonomy['species'],
)
sequence = get_seq(seq_record, self.codon_positions,
aminoacids=self.aminoacids,
degenerate=self.degenerate)
sequences[index] += sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
out = ''
for index, value in enumerate(taxa_ids):
out += '#{0}\n{1}\n'.format(taxa_ids[index], sequences[index])
return out
|
New method, only in MegaDatasetBlock class.
:return: flattened data blocks as string
|
train
|
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/mega.py#L10-L35
|
[
"def get_seq(seq_record, codon_positions, aminoacids=False, degenerate=None):\n \"\"\"\n Checks parameters such as codon_positions, aminoacids... to return the\n required sequence as string.\n\n Parameters:\n seq_record (SeqRecordExpanded object):\n codon_positions (str):\n aminoacids (boolean):\n\n Returns:\n Namedtuple containing ``seq (str)`` and ``warning (str)``.\n \"\"\"\n Sequence = namedtuple('Sequence', ['seq', 'warning'])\n\n if codon_positions not in [None, '1st', '2nd', '3rd', '1st-2nd', 'ALL']:\n raise WrongParameterFormat(\"`codon_positions` argument should be any of the following\"\n \": 1st, 2nd, 3rd, 1st-2nd or ALL\")\n if aminoacids:\n aa = seq_record.translate()\n if '*' in aa:\n warning = \"Gene {0}, sequence {1} contains stop codons '*'\".format(seq_record.gene_code,\n seq_record.voucher_code)\n else:\n warning = None\n return Sequence(seq=aa, warning=warning)\n\n if degenerate:\n return Sequence(seq=seq_record.degenerate(degenerate), warning=None)\n\n if codon_positions == '1st':\n return Sequence(seq=seq_record.first_codon_position(), warning=None)\n elif codon_positions == '2nd':\n return Sequence(seq=seq_record.second_codon_position(), warning=None)\n elif codon_positions == '3rd':\n return Sequence(seq=seq_record.third_codon_position(), warning=None)\n elif codon_positions == '1st-2nd':\n return Sequence(seq=seq_record.first_and_second_codon_positions(), warning=None)\n else: # None and ALL\n return Sequence(seq=str(seq_record.seq), warning=None)\n"
] |
class MegaDatasetBlock(DatasetBlock):
def dataset_block(self):
self.split_data()
return self.convert_blocks_to_string()
|
SeabornGames/Meta
|
seaborn_meta/calling_function.py
|
function_arguments
|
python
|
def function_arguments(func):
if getattr(inspect, 'signature', None) is None:
return list(inspect.getargspec(func).args)
else:
return list(inspect.signature(func).parameters.keys())
|
This returns a list of all arguments
:param func: callable object
:return: list of str of the arguments for the function
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L13-L22
| null |
"""
This module is used to get the calling class, function, file, line_number, and locals
Issues:
builtin functions like eval, break this
pycharm will also break this, although it sometimes can recover
"""
import inspect
import os
import traceback
def function_defaults(func):
"""
This returns a list of the default arguments
:param func: callable object
:return: list of obj of default parameters
"""
if getattr(inspect, 'signature',None) is None:
return inspect.getargspec(func)[-1] or []
else:
return [v.default for k,v in list(
inspect.signature(func).parameters.items())
if v.default is not inspect._empty]
def function_doc(function_index=1, function_name=None):
"""
This will return the doc of the calling function
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the doc from the target function
"""
frm = func_frame(function_index + 1, function_name)
try:
func = getattr(frm.f_locals['self'], frm.f_code.co_name)
except:
func = frm.f_globals[frm.f_code.co_name]
return func.__doc__
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/')
def file_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
function_index of 2 will give the parent of the caller
function_name should not be used with function_index
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the code from the target function
"""
info = function_info(function_index + 1, function_name)
with open(info['file'], 'r') as fn:
return fn.read()
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args}
def function_args(function):
"""
This will return a list of the non-keyword arguments
:param function: function to check arguments for
:return: list of arguments
"""
try:
return function.__code__.co_varnames
except:
return function.f_code.co_varnames
def function_kwargs(function_index=1, function_name=None,
exclude_keys='self', exclude_values=None):
"""
This will return a dict of the keyword
arguments of the function that calls it
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of the function name (should not
be used with function_index)
:param exclude_keys: str,list,func if not a function it
will be turned into one, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be
turned into one, defaults to excluding 'self'
:return: dict of arguments passed into the
function making this call
"""
if not hasattr(exclude_values, '__call__'):
_exclude_values = isinstance(exclude_values, list) and\
exclude_values or [exclude_values]
exclude_values = lambda x: x in _exclude_values
if not hasattr(exclude_keys, '__call__'):
_exclude_keys = isinstance(exclude_keys, list) and\
exclude_keys or [exclude_keys]
exclude_keys = lambda x: x in _exclude_keys
frm = func_frame(function_index + 1, function_name)
args = frm.f_code.co_varnames[:frm.f_code.co_argcount]
ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\
not exclude_values(frm.f_locals[k])])
return ret
def function_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for
(should not be used with function_index)
:return: str of the code from the target function
"""
frm = function_info(function_index + 1, function_name)
raise NotImplemented
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm}
def function_history():
"""
This will return a list of all function calls going back to the beginning
:return: list of str of function name
"""
ret = []
frm = inspect.currentframe()
for i in range(100):
try:
if frm.f_code.co_name != 'run_code': # this is pycharm debugger
# inserting middleware
ret.append(frm.f_code.co_name)
frm = frm.f_back
except Exception as e:
break
return ret
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm
def function_linenumber(function_index=1, function_name=None, width=5):
"""
This will return the line number of the indicated function in the stack
:param width:
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return str of the current linenumber
"""
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width)
def function_name(function_index=1):
ret = function_info(function_index=function_index + 1)
return ret['class_name'], ret['function_name']
def path(function_index=1, function_name=None, deliminator='__'):
ret = function_info(function_index=function_index + 1,
function_name=function_name)
file_ = os.path.basename(ret['file']).split('.')[0]
if ret['class_name']:
return '%s%s%s%s%s' % (file_, deliminator, ret['class_name'],
deliminator, ret['function_name'])
else:
return '%s%s%s' % (file_, deliminator, ret['function_name'])
def current_folder(function_index=1, function_name=None, deliminator='__'):
info = function_info(function_index + 1, function_name)
return os.path.split(info['file'])[0].replace('\\','/')
def trace_error(function_index=2):
"""
This will return the line number and line text of the last error
:param function_index: int to tell what frame to look from
:return: int, str of the line number and line text
"""
info = function_info(function_index)
traces = traceback.format_stack(limit=10)
for trace in traces:
file_, line_number, line_text = trace.split(',', 2)
if file_ == ' File "%s"' % info['file'] and\
line_number != 'line %s' % info['line_number']:
return line_number.split()[-1], line_text.strip()
return None, None
|
SeabornGames/Meta
|
seaborn_meta/calling_function.py
|
function_defaults
|
python
|
def function_defaults(func):
if getattr(inspect, 'signature',None) is None:
return inspect.getargspec(func)[-1] or []
else:
return [v.default for k,v in list(
inspect.signature(func).parameters.items())
if v.default is not inspect._empty]
|
This returns a list of the default arguments
:param func: callable object
:return: list of obj of default parameters
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L25-L36
| null |
"""
This module is used to get the calling class, function, file, line_number, and locals
Issues:
builtin functions like eval, break this
pycharm will also break this, although it sometimes can recover
"""
import inspect
import os
import traceback
def function_arguments(func):
"""
This returns a list of all arguments
:param func: callable object
:return: list of str of the arguments for the function
"""
if getattr(inspect, 'signature', None) is None:
return list(inspect.getargspec(func).args)
else:
return list(inspect.signature(func).parameters.keys())
def function_doc(function_index=1, function_name=None):
"""
This will return the doc of the calling function
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the doc from the target function
"""
frm = func_frame(function_index + 1, function_name)
try:
func = getattr(frm.f_locals['self'], frm.f_code.co_name)
except:
func = frm.f_globals[frm.f_code.co_name]
return func.__doc__
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/')
def file_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
function_index of 2 will give the parent of the caller
function_name should not be used with function_index
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the code from the target function
"""
info = function_info(function_index + 1, function_name)
with open(info['file'], 'r') as fn:
return fn.read()
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args}
def function_args(function):
"""
This will return a list of the non-keyword arguments
:param function: function to check arguments for
:return: list of arguments
"""
try:
return function.__code__.co_varnames
except:
return function.f_code.co_varnames
def function_kwargs(function_index=1, function_name=None,
exclude_keys='self', exclude_values=None):
"""
This will return a dict of the keyword
arguments of the function that calls it
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of the function name (should not
be used with function_index)
:param exclude_keys: str,list,func if not a function it
will be turned into one, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be
turned into one, defaults to excluding 'self'
:return: dict of arguments passed into the
function making this call
"""
if not hasattr(exclude_values, '__call__'):
_exclude_values = isinstance(exclude_values, list) and\
exclude_values or [exclude_values]
exclude_values = lambda x: x in _exclude_values
if not hasattr(exclude_keys, '__call__'):
_exclude_keys = isinstance(exclude_keys, list) and\
exclude_keys or [exclude_keys]
exclude_keys = lambda x: x in _exclude_keys
frm = func_frame(function_index + 1, function_name)
args = frm.f_code.co_varnames[:frm.f_code.co_argcount]
ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\
not exclude_values(frm.f_locals[k])])
return ret
def function_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for
(should not be used with function_index)
:return: str of the code from the target function
"""
frm = function_info(function_index + 1, function_name)
raise NotImplemented
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm}
def function_history():
"""
This will return a list of all function calls going back to the beginning
:return: list of str of function name
"""
ret = []
frm = inspect.currentframe()
for i in range(100):
try:
if frm.f_code.co_name != 'run_code': # this is pycharm debugger
# inserting middleware
ret.append(frm.f_code.co_name)
frm = frm.f_back
except Exception as e:
break
return ret
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm
def function_linenumber(function_index=1, function_name=None, width=5):
"""
This will return the line number of the indicated function in the stack
:param width:
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return str of the current linenumber
"""
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width)
def function_name(function_index=1):
ret = function_info(function_index=function_index + 1)
return ret['class_name'], ret['function_name']
def path(function_index=1, function_name=None, deliminator='__'):
ret = function_info(function_index=function_index + 1,
function_name=function_name)
file_ = os.path.basename(ret['file']).split('.')[0]
if ret['class_name']:
return '%s%s%s%s%s' % (file_, deliminator, ret['class_name'],
deliminator, ret['function_name'])
else:
return '%s%s%s' % (file_, deliminator, ret['function_name'])
def current_folder(function_index=1, function_name=None, deliminator='__'):
info = function_info(function_index + 1, function_name)
return os.path.split(info['file'])[0].replace('\\','/')
def trace_error(function_index=2):
"""
This will return the line number and line text of the last error
:param function_index: int to tell what frame to look from
:return: int, str of the line number and line text
"""
info = function_info(function_index)
traces = traceback.format_stack(limit=10)
for trace in traces:
file_, line_number, line_text = trace.split(',', 2)
if file_ == ' File "%s"' % info['file'] and\
line_number != 'line %s' % info['line_number']:
return line_number.split()[-1], line_text.strip()
return None, None
|
SeabornGames/Meta
|
seaborn_meta/calling_function.py
|
function_doc
|
python
|
def function_doc(function_index=1, function_name=None):
frm = func_frame(function_index + 1, function_name)
try:
func = getattr(frm.f_locals['self'], frm.f_code.co_name)
except:
func = frm.f_globals[frm.f_code.co_name]
return func.__doc__
|
This will return the doc of the calling function
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the doc from the target function
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L39-L51
|
[
"def func_frame(function_index, function_name=None):\n \"\"\"\n This will return the class_name and function_name of the\n function traced back two functions.\n\n :param function_index: int of how many frames back the program \n should look (2 will give the parent of the caller)\n :param function_name: str of what function to look for (should \n not be used with function_index\n :return frame: this will return the frame of the calling function \"\"\"\n frm = inspect.currentframe()\n if function_name is not None:\n function_name = function_name.split('*')[0] # todo replace this\n # todo with regex\n for i in range(1000):\n if frm.f_code.co_name.startswith(function_name):\n break\n frm = frm.f_back\n else:\n for i in range(function_index):\n frm = frm.f_back\n try: # this is pycharm debugger inserting middleware\n if frm.f_code.co_name == 'run_code':\n frm = frm.f_back\n except:\n pass\n return frm\n"
] |
"""
This module is used to get the calling class, function, file, line_number, and locals
Issues:
builtin functions like eval, break this
pycharm will also break this, although it sometimes can recover
"""
import inspect
import os
import traceback
def function_arguments(func):
"""
This returns a list of all arguments
:param func: callable object
:return: list of str of the arguments for the function
"""
if getattr(inspect, 'signature', None) is None:
return list(inspect.getargspec(func).args)
else:
return list(inspect.signature(func).parameters.keys())
def function_defaults(func):
"""
This returns a list of the default arguments
:param func: callable object
:return: list of obj of default parameters
"""
if getattr(inspect, 'signature',None) is None:
return inspect.getargspec(func)[-1] or []
else:
return [v.default for k,v in list(
inspect.signature(func).parameters.items())
if v.default is not inspect._empty]
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/')
def file_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
function_index of 2 will give the parent of the caller
function_name should not be used with function_index
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the code from the target function
"""
info = function_info(function_index + 1, function_name)
with open(info['file'], 'r') as fn:
return fn.read()
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args}
def function_args(function):
"""
This will return a list of the non-keyword arguments
:param function: function to check arguments for
:return: list of arguments
"""
try:
return function.__code__.co_varnames
except:
return function.f_code.co_varnames
def function_kwargs(function_index=1, function_name=None,
exclude_keys='self', exclude_values=None):
"""
This will return a dict of the keyword
arguments of the function that calls it
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of the function name (should not
be used with function_index)
:param exclude_keys: str,list,func if not a function it
will be turned into one, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be
turned into one, defaults to excluding 'self'
:return: dict of arguments passed into the
function making this call
"""
if not hasattr(exclude_values, '__call__'):
_exclude_values = isinstance(exclude_values, list) and\
exclude_values or [exclude_values]
exclude_values = lambda x: x in _exclude_values
if not hasattr(exclude_keys, '__call__'):
_exclude_keys = isinstance(exclude_keys, list) and\
exclude_keys or [exclude_keys]
exclude_keys = lambda x: x in _exclude_keys
frm = func_frame(function_index + 1, function_name)
args = frm.f_code.co_varnames[:frm.f_code.co_argcount]
ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\
not exclude_values(frm.f_locals[k])])
return ret
def function_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for
(should not be used with function_index)
:return: str of the code from the target function
"""
frm = function_info(function_index + 1, function_name)
raise NotImplemented
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm}
def function_history():
"""
This will return a list of all function calls going back to the beginning
:return: list of str of function name
"""
ret = []
frm = inspect.currentframe()
for i in range(100):
try:
if frm.f_code.co_name != 'run_code': # this is pycharm debugger
# inserting middleware
ret.append(frm.f_code.co_name)
frm = frm.f_back
except Exception as e:
break
return ret
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm
def function_linenumber(function_index=1, function_name=None, width=5):
"""
This will return the line number of the indicated function in the stack
:param width:
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return str of the current linenumber
"""
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width)
def function_name(function_index=1):
ret = function_info(function_index=function_index + 1)
return ret['class_name'], ret['function_name']
def path(function_index=1, function_name=None, deliminator='__'):
ret = function_info(function_index=function_index + 1,
function_name=function_name)
file_ = os.path.basename(ret['file']).split('.')[0]
if ret['class_name']:
return '%s%s%s%s%s' % (file_, deliminator, ret['class_name'],
deliminator, ret['function_name'])
else:
return '%s%s%s' % (file_, deliminator, ret['function_name'])
def current_folder(function_index=1, function_name=None, deliminator='__'):
info = function_info(function_index + 1, function_name)
return os.path.split(info['file'])[0].replace('\\','/')
def trace_error(function_index=2):
"""
This will return the line number and line text of the last error
:param function_index: int to tell what frame to look from
:return: int, str of the line number and line text
"""
info = function_info(function_index)
traces = traceback.format_stack(limit=10)
for trace in traces:
file_, line_number, line_text = trace.split(',', 2)
if file_ == ' File "%s"' % info['file'] and\
line_number != 'line %s' % info['line_number']:
return line_number.split()[-1], line_text.strip()
return None, None
|
SeabornGames/Meta
|
seaborn_meta/calling_function.py
|
function_path
|
python
|
def function_path(func):
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/')
|
This will return the path to the calling function
:param func:
:return:
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L54-L63
| null |
"""
This module is used to get the calling class, function, file, line_number, and locals
Issues:
builtin functions like eval, break this
pycharm will also break this, although it sometimes can recover
"""
import inspect
import os
import traceback
def function_arguments(func):
"""
This returns a list of all arguments
:param func: callable object
:return: list of str of the arguments for the function
"""
if getattr(inspect, 'signature', None) is None:
return list(inspect.getargspec(func).args)
else:
return list(inspect.signature(func).parameters.keys())
def function_defaults(func):
"""
This returns a list of the default arguments
:param func: callable object
:return: list of obj of default parameters
"""
if getattr(inspect, 'signature',None) is None:
return inspect.getargspec(func)[-1] or []
else:
return [v.default for k,v in list(
inspect.signature(func).parameters.items())
if v.default is not inspect._empty]
def function_doc(function_index=1, function_name=None):
"""
This will return the doc of the calling function
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the doc from the target function
"""
frm = func_frame(function_index + 1, function_name)
try:
func = getattr(frm.f_locals['self'], frm.f_code.co_name)
except:
func = frm.f_globals[frm.f_code.co_name]
return func.__doc__
def file_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
function_index of 2 will give the parent of the caller
function_name should not be used with function_index
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the code from the target function
"""
info = function_info(function_index + 1, function_name)
with open(info['file'], 'r') as fn:
return fn.read()
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args}
def function_args(function):
"""
This will return a list of the non-keyword arguments
:param function: function to check arguments for
:return: list of arguments
"""
try:
return function.__code__.co_varnames
except:
return function.f_code.co_varnames
def function_kwargs(function_index=1, function_name=None,
exclude_keys='self', exclude_values=None):
"""
This will return a dict of the keyword
arguments of the function that calls it
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of the function name (should not
be used with function_index)
:param exclude_keys: str,list,func if not a function it
will be turned into one, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be
turned into one, defaults to excluding 'self'
:return: dict of arguments passed into the
function making this call
"""
if not hasattr(exclude_values, '__call__'):
_exclude_values = isinstance(exclude_values, list) and\
exclude_values or [exclude_values]
exclude_values = lambda x: x in _exclude_values
if not hasattr(exclude_keys, '__call__'):
_exclude_keys = isinstance(exclude_keys, list) and\
exclude_keys or [exclude_keys]
exclude_keys = lambda x: x in _exclude_keys
frm = func_frame(function_index + 1, function_name)
args = frm.f_code.co_varnames[:frm.f_code.co_argcount]
ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\
not exclude_values(frm.f_locals[k])])
return ret
def function_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for
(should not be used with function_index)
:return: str of the code from the target function
"""
frm = function_info(function_index + 1, function_name)
raise NotImplemented
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm}
def function_history():
"""
This will return a list of all function calls going back to the beginning
:return: list of str of function name
"""
ret = []
frm = inspect.currentframe()
for i in range(100):
try:
if frm.f_code.co_name != 'run_code': # this is pycharm debugger
# inserting middleware
ret.append(frm.f_code.co_name)
frm = frm.f_back
except Exception as e:
break
return ret
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm
def function_linenumber(function_index=1, function_name=None, width=5):
"""
This will return the line number of the indicated function in the stack
:param width:
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return str of the current linenumber
"""
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width)
def function_name(function_index=1):
ret = function_info(function_index=function_index + 1)
return ret['class_name'], ret['function_name']
def path(function_index=1, function_name=None, deliminator='__'):
ret = function_info(function_index=function_index + 1,
function_name=function_name)
file_ = os.path.basename(ret['file']).split('.')[0]
if ret['class_name']:
return '%s%s%s%s%s' % (file_, deliminator, ret['class_name'],
deliminator, ret['function_name'])
else:
return '%s%s%s' % (file_, deliminator, ret['function_name'])
def current_folder(function_index=1, function_name=None, deliminator='__'):
info = function_info(function_index + 1, function_name)
return os.path.split(info['file'])[0].replace('\\','/')
def trace_error(function_index=2):
"""
This will return the line number and line text of the last error
:param function_index: int to tell what frame to look from
:return: int, str of the line number and line text
"""
info = function_info(function_index)
traces = traceback.format_stack(limit=10)
for trace in traces:
file_, line_number, line_text = trace.split(',', 2)
if file_ == ' File "%s"' % info['file'] and\
line_number != 'line %s' % info['line_number']:
return line_number.split()[-1], line_text.strip()
return None, None
|
SeabornGames/Meta
|
seaborn_meta/calling_function.py
|
file_code
|
python
|
def file_code(function_index=1, function_name=None):
info = function_info(function_index + 1, function_name)
with open(info['file'], 'r') as fn:
return fn.read()
|
This will return the code of the calling function
function_index of 2 will give the parent of the caller
function_name should not be used with function_index
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the code from the target function
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L66-L78
|
[
"def function_info(function_index=1, function_name=None, line_number=None):\n \"\"\"\n This will return the class_name and function_name of the\n function traced back two functions.\n\n :param function_index: int of how many frames back the program \n should look (2 will give the parent of the caller)\n :param function_name: str of what function to look for (should \n not be used with function_index)\n :param line_number: int, some times the user may want to override \n this for testing purposes\n :return tuple: ('cls_name','func_name',line_number,globals())\n \"\"\"\n frm = func_frame(function_index + 1, function_name)\n\n file_ = os.path.abspath(frm.f_code.co_filename)\n class_name = frm.f_locals.get('self', None)\n if class_name is not None: # and not skip_class:\n class_name = str(type(class_name)).split('.',1)[-1].split(\"'\")[0]\n # try:\n # class_name = str(class_name).split(None, 1)[1]\n # class_name = class_name.split('.')[-1].replace(')', '')\n # except:\n # class_name = repr(class_name).split()[0].split('.')[-1]\n # if 'object at' in str(class_name):\n # class_name = str(class_name).split(' object at')[0].split('.')[-1]\n\n args, _, _, kwargs = inspect.getargvalues(frm)\n line_number = line_number or frm.f_lineno\n return {'class_name': class_name or '',\n 'function_name': frm.f_code.co_name,\n 'file': file_,\n 'path': os.path.split(file_)[0],\n 'basename': os.path.basename(file_).split('.')[0],\n 'line_number': line_number or frm.f_lineno,\n 'globals': frm.f_globals,\n 'locals': frm.f_locals,\n 'arguments': args,\n 'kwargs': kwargs,\n 'frame': frm}\n"
] |
"""
This module is used to get the calling class, function, file, line_number, and locals
Issues:
builtin functions like eval, break this
pycharm will also break this, although it sometimes can recover
"""
import inspect
import os
import traceback
def function_arguments(func):
"""
This returns a list of all arguments
:param func: callable object
:return: list of str of the arguments for the function
"""
if getattr(inspect, 'signature', None) is None:
return list(inspect.getargspec(func).args)
else:
return list(inspect.signature(func).parameters.keys())
def function_defaults(func):
"""
This returns a list of the default arguments
:param func: callable object
:return: list of obj of default parameters
"""
if getattr(inspect, 'signature',None) is None:
return inspect.getargspec(func)[-1] or []
else:
return [v.default for k,v in list(
inspect.signature(func).parameters.items())
if v.default is not inspect._empty]
def function_doc(function_index=1, function_name=None):
"""
This will return the doc of the calling function
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the doc from the target function
"""
frm = func_frame(function_index + 1, function_name)
try:
func = getattr(frm.f_locals['self'], frm.f_code.co_name)
except:
func = frm.f_globals[frm.f_code.co_name]
return func.__doc__
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/')
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args}
def function_args(function):
"""
This will return a list of the non-keyword arguments
:param function: function to check arguments for
:return: list of arguments
"""
try:
return function.__code__.co_varnames
except:
return function.f_code.co_varnames
def function_kwargs(function_index=1, function_name=None,
exclude_keys='self', exclude_values=None):
"""
This will return a dict of the keyword
arguments of the function that calls it
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of the function name (should not
be used with function_index)
:param exclude_keys: str,list,func if not a function it
will be turned into one, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be
turned into one, defaults to excluding 'self'
:return: dict of arguments passed into the
function making this call
"""
if not hasattr(exclude_values, '__call__'):
_exclude_values = isinstance(exclude_values, list) and\
exclude_values or [exclude_values]
exclude_values = lambda x: x in _exclude_values
if not hasattr(exclude_keys, '__call__'):
_exclude_keys = isinstance(exclude_keys, list) and\
exclude_keys or [exclude_keys]
exclude_keys = lambda x: x in _exclude_keys
frm = func_frame(function_index + 1, function_name)
args = frm.f_code.co_varnames[:frm.f_code.co_argcount]
ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\
not exclude_values(frm.f_locals[k])])
return ret
def function_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for
(should not be used with function_index)
:return: str of the code from the target function
"""
frm = function_info(function_index + 1, function_name)
raise NotImplemented
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm}
def function_history():
"""
This will return a list of all function calls going back to the beginning
:return: list of str of function name
"""
ret = []
frm = inspect.currentframe()
for i in range(100):
try:
if frm.f_code.co_name != 'run_code': # this is pycharm debugger
# inserting middleware
ret.append(frm.f_code.co_name)
frm = frm.f_back
except Exception as e:
break
return ret
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm
def function_linenumber(function_index=1, function_name=None, width=5):
"""
This will return the line number of the indicated function in the stack
:param width:
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return str of the current linenumber
"""
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width)
def function_name(function_index=1):
ret = function_info(function_index=function_index + 1)
return ret['class_name'], ret['function_name']
def path(function_index=1, function_name=None, deliminator='__'):
ret = function_info(function_index=function_index + 1,
function_name=function_name)
file_ = os.path.basename(ret['file']).split('.')[0]
if ret['class_name']:
return '%s%s%s%s%s' % (file_, deliminator, ret['class_name'],
deliminator, ret['function_name'])
else:
return '%s%s%s' % (file_, deliminator, ret['function_name'])
def current_folder(function_index=1, function_name=None, deliminator='__'):
info = function_info(function_index + 1, function_name)
return os.path.split(info['file'])[0].replace('\\','/')
def trace_error(function_index=2):
"""
This will return the line number and line text of the last error
:param function_index: int to tell what frame to look from
:return: int, str of the line number and line text
"""
info = function_info(function_index)
traces = traceback.format_stack(limit=10)
for trace in traces:
file_, line_number, line_text = trace.split(',', 2)
if file_ == ' File "%s"' % info['file'] and\
line_number != 'line %s' % info['line_number']:
return line_number.split()[-1], line_text.strip()
return None, None
|
SeabornGames/Meta
|
seaborn_meta/calling_function.py
|
relevant_kwargs
|
python
|
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args}
|
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L81-L102
|
[
"def function_args(function):\n \"\"\"\n This will return a list of the non-keyword arguments\n :param function: function to check arguments for\n :return: list of arguments\n \"\"\"\n try:\n return function.__code__.co_varnames\n except:\n return function.f_code.co_varnames\n",
"def function_kwargs(function_index=1, function_name=None,\n exclude_keys='self', exclude_values=None):\n \"\"\"\n This will return a dict of the keyword \n arguments of the function that calls it\n :param function_index: int of how many frames back the program \n should look (2 will give the parent of the caller)\n :param function_name: str of the function name (should not \n be used with function_index)\n :param exclude_keys: str,list,func if not a function it \n will be turned into one, defaults to excluding None\n :param exclude_values: obj,list,func if not a function it will be \n turned into one, defaults to excluding 'self'\n :return: dict of arguments passed into the \n function making this call\n \"\"\"\n if not hasattr(exclude_values, '__call__'):\n _exclude_values = isinstance(exclude_values, list) and\\\n exclude_values or [exclude_values]\n exclude_values = lambda x: x in _exclude_values\n\n if not hasattr(exclude_keys, '__call__'):\n _exclude_keys = isinstance(exclude_keys, list) and\\\n exclude_keys or [exclude_keys]\n exclude_keys = lambda x: x in _exclude_keys\n\n frm = func_frame(function_index + 1, function_name)\n args = frm.f_code.co_varnames[:frm.f_code.co_argcount]\n ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\\\n not exclude_values(frm.f_locals[k])])\n return ret\n"
] |
"""
This module is used to get the calling class, function, file, line_number, and locals
Issues:
builtin functions like eval, break this
pycharm will also break this, although it sometimes can recover
"""
import inspect
import os
import traceback
def function_arguments(func):
"""
This returns a list of all arguments
:param func: callable object
:return: list of str of the arguments for the function
"""
if getattr(inspect, 'signature', None) is None:
return list(inspect.getargspec(func).args)
else:
return list(inspect.signature(func).parameters.keys())
def function_defaults(func):
"""
This returns a list of the default arguments
:param func: callable object
:return: list of obj of default parameters
"""
if getattr(inspect, 'signature',None) is None:
return inspect.getargspec(func)[-1] or []
else:
return [v.default for k,v in list(
inspect.signature(func).parameters.items())
if v.default is not inspect._empty]
def function_doc(function_index=1, function_name=None):
"""
This will return the doc of the calling function
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the doc from the target function
"""
frm = func_frame(function_index + 1, function_name)
try:
func = getattr(frm.f_locals['self'], frm.f_code.co_name)
except:
func = frm.f_globals[frm.f_code.co_name]
return func.__doc__
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/')
def file_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
function_index of 2 will give the parent of the caller
function_name should not be used with function_index
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the code from the target function
"""
info = function_info(function_index + 1, function_name)
with open(info['file'], 'r') as fn:
return fn.read()
def function_args(function):
"""
This will return a list of the non-keyword arguments
:param function: function to check arguments for
:return: list of arguments
"""
try:
return function.__code__.co_varnames
except:
return function.f_code.co_varnames
def function_kwargs(function_index=1, function_name=None,
exclude_keys='self', exclude_values=None):
"""
This will return a dict of the keyword
arguments of the function that calls it
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of the function name (should not
be used with function_index)
:param exclude_keys: str,list,func if not a function it
will be turned into one, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be
turned into one, defaults to excluding 'self'
:return: dict of arguments passed into the
function making this call
"""
if not hasattr(exclude_values, '__call__'):
_exclude_values = isinstance(exclude_values, list) and\
exclude_values or [exclude_values]
exclude_values = lambda x: x in _exclude_values
if not hasattr(exclude_keys, '__call__'):
_exclude_keys = isinstance(exclude_keys, list) and\
exclude_keys or [exclude_keys]
exclude_keys = lambda x: x in _exclude_keys
frm = func_frame(function_index + 1, function_name)
args = frm.f_code.co_varnames[:frm.f_code.co_argcount]
ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\
not exclude_values(frm.f_locals[k])])
return ret
def function_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for
(should not be used with function_index)
:return: str of the code from the target function
"""
frm = function_info(function_index + 1, function_name)
raise NotImplemented
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm}
def function_history():
"""
This will return a list of all function calls going back to the beginning
:return: list of str of function name
"""
ret = []
frm = inspect.currentframe()
for i in range(100):
try:
if frm.f_code.co_name != 'run_code': # this is pycharm debugger
# inserting middleware
ret.append(frm.f_code.co_name)
frm = frm.f_back
except Exception as e:
break
return ret
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm
def function_linenumber(function_index=1, function_name=None, width=5):
"""
This will return the line number of the indicated function in the stack
:param width:
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return str of the current linenumber
"""
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width)
def function_name(function_index=1):
ret = function_info(function_index=function_index + 1)
return ret['class_name'], ret['function_name']
def path(function_index=1, function_name=None, deliminator='__'):
ret = function_info(function_index=function_index + 1,
function_name=function_name)
file_ = os.path.basename(ret['file']).split('.')[0]
if ret['class_name']:
return '%s%s%s%s%s' % (file_, deliminator, ret['class_name'],
deliminator, ret['function_name'])
else:
return '%s%s%s' % (file_, deliminator, ret['function_name'])
def current_folder(function_index=1, function_name=None, deliminator='__'):
info = function_info(function_index + 1, function_name)
return os.path.split(info['file'])[0].replace('\\','/')
def trace_error(function_index=2):
"""
This will return the line number and line text of the last error
:param function_index: int to tell what frame to look from
:return: int, str of the line number and line text
"""
info = function_info(function_index)
traces = traceback.format_stack(limit=10)
for trace in traces:
file_, line_number, line_text = trace.split(',', 2)
if file_ == ' File "%s"' % info['file'] and\
line_number != 'line %s' % info['line_number']:
return line_number.split()[-1], line_text.strip()
return None, None
|
SeabornGames/Meta
|
seaborn_meta/calling_function.py
|
function_kwargs
|
python
|
def function_kwargs(function_index=1, function_name=None,
exclude_keys='self', exclude_values=None):
if not hasattr(exclude_values, '__call__'):
_exclude_values = isinstance(exclude_values, list) and\
exclude_values or [exclude_values]
exclude_values = lambda x: x in _exclude_values
if not hasattr(exclude_keys, '__call__'):
_exclude_keys = isinstance(exclude_keys, list) and\
exclude_keys or [exclude_keys]
exclude_keys = lambda x: x in _exclude_keys
frm = func_frame(function_index + 1, function_name)
args = frm.f_code.co_varnames[:frm.f_code.co_argcount]
ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\
not exclude_values(frm.f_locals[k])])
return ret
|
This will return a dict of the keyword
arguments of the function that calls it
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of the function name (should not
be used with function_index)
:param exclude_keys: str,list,func if not a function it
will be turned into one, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be
turned into one, defaults to excluding 'self'
:return: dict of arguments passed into the
function making this call
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L117-L147
|
[
"def func_frame(function_index, function_name=None):\n \"\"\"\n This will return the class_name and function_name of the\n function traced back two functions.\n\n :param function_index: int of how many frames back the program \n should look (2 will give the parent of the caller)\n :param function_name: str of what function to look for (should \n not be used with function_index\n :return frame: this will return the frame of the calling function \"\"\"\n frm = inspect.currentframe()\n if function_name is not None:\n function_name = function_name.split('*')[0] # todo replace this\n # todo with regex\n for i in range(1000):\n if frm.f_code.co_name.startswith(function_name):\n break\n frm = frm.f_back\n else:\n for i in range(function_index):\n frm = frm.f_back\n try: # this is pycharm debugger inserting middleware\n if frm.f_code.co_name == 'run_code':\n frm = frm.f_back\n except:\n pass\n return frm\n"
] |
"""
This module is used to get the calling class, function, file, line_number, and locals
Issues:
builtin functions like eval, break this
pycharm will also break this, although it sometimes can recover
"""
import inspect
import os
import traceback
def function_arguments(func):
"""
This returns a list of all arguments
:param func: callable object
:return: list of str of the arguments for the function
"""
if getattr(inspect, 'signature', None) is None:
return list(inspect.getargspec(func).args)
else:
return list(inspect.signature(func).parameters.keys())
def function_defaults(func):
"""
This returns a list of the default arguments
:param func: callable object
:return: list of obj of default parameters
"""
if getattr(inspect, 'signature',None) is None:
return inspect.getargspec(func)[-1] or []
else:
return [v.default for k,v in list(
inspect.signature(func).parameters.items())
if v.default is not inspect._empty]
def function_doc(function_index=1, function_name=None):
"""
This will return the doc of the calling function
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the doc from the target function
"""
frm = func_frame(function_index + 1, function_name)
try:
func = getattr(frm.f_locals['self'], frm.f_code.co_name)
except:
func = frm.f_globals[frm.f_code.co_name]
return func.__doc__
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/')
def file_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
function_index of 2 will give the parent of the caller
function_name should not be used with function_index
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the code from the target function
"""
info = function_info(function_index + 1, function_name)
with open(info['file'], 'r') as fn:
return fn.read()
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args}
def function_args(function):
"""
This will return a list of the non-keyword arguments
:param function: function to check arguments for
:return: list of arguments
"""
try:
return function.__code__.co_varnames
except:
return function.f_code.co_varnames
def function_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for
(should not be used with function_index)
:return: str of the code from the target function
"""
frm = function_info(function_index + 1, function_name)
raise NotImplemented
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm}
def function_history():
"""
This will return a list of all function calls going back to the beginning
:return: list of str of function name
"""
ret = []
frm = inspect.currentframe()
for i in range(100):
try:
if frm.f_code.co_name != 'run_code': # this is pycharm debugger
# inserting middleware
ret.append(frm.f_code.co_name)
frm = frm.f_back
except Exception as e:
break
return ret
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm
def function_linenumber(function_index=1, function_name=None, width=5):
"""
This will return the line number of the indicated function in the stack
:param width:
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return str of the current linenumber
"""
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width)
def function_name(function_index=1):
ret = function_info(function_index=function_index + 1)
return ret['class_name'], ret['function_name']
def path(function_index=1, function_name=None, deliminator='__'):
ret = function_info(function_index=function_index + 1,
function_name=function_name)
file_ = os.path.basename(ret['file']).split('.')[0]
if ret['class_name']:
return '%s%s%s%s%s' % (file_, deliminator, ret['class_name'],
deliminator, ret['function_name'])
else:
return '%s%s%s' % (file_, deliminator, ret['function_name'])
def current_folder(function_index=1, function_name=None, deliminator='__'):
info = function_info(function_index + 1, function_name)
return os.path.split(info['file'])[0].replace('\\','/')
def trace_error(function_index=2):
"""
This will return the line number and line text of the last error
:param function_index: int to tell what frame to look from
:return: int, str of the line number and line text
"""
info = function_info(function_index)
traces = traceback.format_stack(limit=10)
for trace in traces:
file_, line_number, line_text = trace.split(',', 2)
if file_ == ' File "%s"' % info['file'] and\
line_number != 'line %s' % info['line_number']:
return line_number.split()[-1], line_text.strip()
return None, None
|
SeabornGames/Meta
|
seaborn_meta/calling_function.py
|
function_info
|
python
|
def function_info(function_index=1, function_name=None, line_number=None):
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm}
|
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L163-L202
|
[
"def func_frame(function_index, function_name=None):\n \"\"\"\n This will return the class_name and function_name of the\n function traced back two functions.\n\n :param function_index: int of how many frames back the program \n should look (2 will give the parent of the caller)\n :param function_name: str of what function to look for (should \n not be used with function_index\n :return frame: this will return the frame of the calling function \"\"\"\n frm = inspect.currentframe()\n if function_name is not None:\n function_name = function_name.split('*')[0] # todo replace this\n # todo with regex\n for i in range(1000):\n if frm.f_code.co_name.startswith(function_name):\n break\n frm = frm.f_back\n else:\n for i in range(function_index):\n frm = frm.f_back\n try: # this is pycharm debugger inserting middleware\n if frm.f_code.co_name == 'run_code':\n frm = frm.f_back\n except:\n pass\n return frm\n"
] |
"""
This module is used to get the calling class, function, file, line_number, and locals
Issues:
builtin functions like eval, break this
pycharm will also break this, although it sometimes can recover
"""
import inspect
import os
import traceback
def function_arguments(func):
"""
This returns a list of all arguments
:param func: callable object
:return: list of str of the arguments for the function
"""
if getattr(inspect, 'signature', None) is None:
return list(inspect.getargspec(func).args)
else:
return list(inspect.signature(func).parameters.keys())
def function_defaults(func):
"""
This returns a list of the default arguments
:param func: callable object
:return: list of obj of default parameters
"""
if getattr(inspect, 'signature',None) is None:
return inspect.getargspec(func)[-1] or []
else:
return [v.default for k,v in list(
inspect.signature(func).parameters.items())
if v.default is not inspect._empty]
def function_doc(function_index=1, function_name=None):
"""
This will return the doc of the calling function
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the doc from the target function
"""
frm = func_frame(function_index + 1, function_name)
try:
func = getattr(frm.f_locals['self'], frm.f_code.co_name)
except:
func = frm.f_globals[frm.f_code.co_name]
return func.__doc__
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/')
def file_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
function_index of 2 will give the parent of the caller
function_name should not be used with function_index
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the code from the target function
"""
info = function_info(function_index + 1, function_name)
with open(info['file'], 'r') as fn:
return fn.read()
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args}
def function_args(function):
"""
This will return a list of the non-keyword arguments
:param function: function to check arguments for
:return: list of arguments
"""
try:
return function.__code__.co_varnames
except:
return function.f_code.co_varnames
def function_kwargs(function_index=1, function_name=None,
exclude_keys='self', exclude_values=None):
"""
This will return a dict of the keyword
arguments of the function that calls it
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of the function name (should not
be used with function_index)
:param exclude_keys: str,list,func if not a function it
will be turned into one, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be
turned into one, defaults to excluding 'self'
:return: dict of arguments passed into the
function making this call
"""
if not hasattr(exclude_values, '__call__'):
_exclude_values = isinstance(exclude_values, list) and\
exclude_values or [exclude_values]
exclude_values = lambda x: x in _exclude_values
if not hasattr(exclude_keys, '__call__'):
_exclude_keys = isinstance(exclude_keys, list) and\
exclude_keys or [exclude_keys]
exclude_keys = lambda x: x in _exclude_keys
frm = func_frame(function_index + 1, function_name)
args = frm.f_code.co_varnames[:frm.f_code.co_argcount]
ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\
not exclude_values(frm.f_locals[k])])
return ret
def function_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for
(should not be used with function_index)
:return: str of the code from the target function
"""
frm = function_info(function_index + 1, function_name)
raise NotImplemented
def function_history():
"""
This will return a list of all function calls going back to the beginning
:return: list of str of function name
"""
ret = []
frm = inspect.currentframe()
for i in range(100):
try:
if frm.f_code.co_name != 'run_code': # this is pycharm debugger
# inserting middleware
ret.append(frm.f_code.co_name)
frm = frm.f_back
except Exception as e:
break
return ret
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm
def function_linenumber(function_index=1, function_name=None, width=5):
"""
This will return the line number of the indicated function in the stack
:param width:
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return str of the current linenumber
"""
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width)
def function_name(function_index=1):
ret = function_info(function_index=function_index + 1)
return ret['class_name'], ret['function_name']
def path(function_index=1, function_name=None, deliminator='__'):
ret = function_info(function_index=function_index + 1,
function_name=function_name)
file_ = os.path.basename(ret['file']).split('.')[0]
if ret['class_name']:
return '%s%s%s%s%s' % (file_, deliminator, ret['class_name'],
deliminator, ret['function_name'])
else:
return '%s%s%s' % (file_, deliminator, ret['function_name'])
def current_folder(function_index=1, function_name=None, deliminator='__'):
info = function_info(function_index + 1, function_name)
return os.path.split(info['file'])[0].replace('\\','/')
def trace_error(function_index=2):
"""
This will return the line number and line text of the last error
:param function_index: int to tell what frame to look from
:return: int, str of the line number and line text
"""
info = function_info(function_index)
traces = traceback.format_stack(limit=10)
for trace in traces:
file_, line_number, line_text = trace.split(',', 2)
if file_ == ' File "%s"' % info['file'] and\
line_number != 'line %s' % info['line_number']:
return line_number.split()[-1], line_text.strip()
return None, None
|
SeabornGames/Meta
|
seaborn_meta/calling_function.py
|
function_history
|
python
|
def function_history():
ret = []
frm = inspect.currentframe()
for i in range(100):
try:
if frm.f_code.co_name != 'run_code': # this is pycharm debugger
# inserting middleware
ret.append(frm.f_code.co_name)
frm = frm.f_back
except Exception as e:
break
return ret
|
This will return a list of all function calls going back to the beginning
:return: list of str of function name
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L205-L220
| null |
"""
This module is used to get the calling class, function, file, line_number, and locals
Issues:
builtin functions like eval, break this
pycharm will also break this, although it sometimes can recover
"""
import inspect
import os
import traceback
def function_arguments(func):
"""
This returns a list of all arguments
:param func: callable object
:return: list of str of the arguments for the function
"""
if getattr(inspect, 'signature', None) is None:
return list(inspect.getargspec(func).args)
else:
return list(inspect.signature(func).parameters.keys())
def function_defaults(func):
"""
This returns a list of the default arguments
:param func: callable object
:return: list of obj of default parameters
"""
if getattr(inspect, 'signature',None) is None:
return inspect.getargspec(func)[-1] or []
else:
return [v.default for k,v in list(
inspect.signature(func).parameters.items())
if v.default is not inspect._empty]
def function_doc(function_index=1, function_name=None):
"""
This will return the doc of the calling function
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the doc from the target function
"""
frm = func_frame(function_index + 1, function_name)
try:
func = getattr(frm.f_locals['self'], frm.f_code.co_name)
except:
func = frm.f_globals[frm.f_code.co_name]
return func.__doc__
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/')
def file_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
function_index of 2 will give the parent of the caller
function_name should not be used with function_index
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the code from the target function
"""
info = function_info(function_index + 1, function_name)
with open(info['file'], 'r') as fn:
return fn.read()
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args}
def function_args(function):
"""
This will return a list of the non-keyword arguments
:param function: function to check arguments for
:return: list of arguments
"""
try:
return function.__code__.co_varnames
except:
return function.f_code.co_varnames
def function_kwargs(function_index=1, function_name=None,
exclude_keys='self', exclude_values=None):
"""
This will return a dict of the keyword
arguments of the function that calls it
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of the function name (should not
be used with function_index)
:param exclude_keys: str,list,func if not a function it
will be turned into one, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be
turned into one, defaults to excluding 'self'
:return: dict of arguments passed into the
function making this call
"""
if not hasattr(exclude_values, '__call__'):
_exclude_values = isinstance(exclude_values, list) and\
exclude_values or [exclude_values]
exclude_values = lambda x: x in _exclude_values
if not hasattr(exclude_keys, '__call__'):
_exclude_keys = isinstance(exclude_keys, list) and\
exclude_keys or [exclude_keys]
exclude_keys = lambda x: x in _exclude_keys
frm = func_frame(function_index + 1, function_name)
args = frm.f_code.co_varnames[:frm.f_code.co_argcount]
ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\
not exclude_values(frm.f_locals[k])])
return ret
def function_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for
(should not be used with function_index)
:return: str of the code from the target function
"""
frm = function_info(function_index + 1, function_name)
raise NotImplemented
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm}
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm
def function_linenumber(function_index=1, function_name=None, width=5):
"""
This will return the line number of the indicated function in the stack
:param width:
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return str of the current linenumber
"""
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width)
def function_name(function_index=1):
ret = function_info(function_index=function_index + 1)
return ret['class_name'], ret['function_name']
def path(function_index=1, function_name=None, deliminator='__'):
ret = function_info(function_index=function_index + 1,
function_name=function_name)
file_ = os.path.basename(ret['file']).split('.')[0]
if ret['class_name']:
return '%s%s%s%s%s' % (file_, deliminator, ret['class_name'],
deliminator, ret['function_name'])
else:
return '%s%s%s' % (file_, deliminator, ret['function_name'])
def current_folder(function_index=1, function_name=None, deliminator='__'):
info = function_info(function_index + 1, function_name)
return os.path.split(info['file'])[0].replace('\\','/')
def trace_error(function_index=2):
"""
This will return the line number and line text of the last error
:param function_index: int to tell what frame to look from
:return: int, str of the line number and line text
"""
info = function_info(function_index)
traces = traceback.format_stack(limit=10)
for trace in traces:
file_, line_number, line_text = trace.split(',', 2)
if file_ == ' File "%s"' % info['file'] and\
line_number != 'line %s' % info['line_number']:
return line_number.split()[-1], line_text.strip()
return None, None
|
SeabornGames/Meta
|
seaborn_meta/calling_function.py
|
func_frame
|
python
|
def func_frame(function_index, function_name=None):
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm
|
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L223-L249
| null |
"""
This module is used to get the calling class, function, file, line_number, and locals
Issues:
builtin functions like eval, break this
pycharm will also break this, although it sometimes can recover
"""
import inspect
import os
import traceback
def function_arguments(func):
"""
This returns a list of all arguments
:param func: callable object
:return: list of str of the arguments for the function
"""
if getattr(inspect, 'signature', None) is None:
return list(inspect.getargspec(func).args)
else:
return list(inspect.signature(func).parameters.keys())
def function_defaults(func):
"""
This returns a list of the default arguments
:param func: callable object
:return: list of obj of default parameters
"""
if getattr(inspect, 'signature',None) is None:
return inspect.getargspec(func)[-1] or []
else:
return [v.default for k,v in list(
inspect.signature(func).parameters.items())
if v.default is not inspect._empty]
def function_doc(function_index=1, function_name=None):
"""
This will return the doc of the calling function
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the doc from the target function
"""
frm = func_frame(function_index + 1, function_name)
try:
func = getattr(frm.f_locals['self'], frm.f_code.co_name)
except:
func = frm.f_globals[frm.f_code.co_name]
return func.__doc__
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/')
def file_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
function_index of 2 will give the parent of the caller
function_name should not be used with function_index
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the code from the target function
"""
info = function_info(function_index + 1, function_name)
with open(info['file'], 'r') as fn:
return fn.read()
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args}
def function_args(function):
"""
This will return a list of the non-keyword arguments
:param function: function to check arguments for
:return: list of arguments
"""
try:
return function.__code__.co_varnames
except:
return function.f_code.co_varnames
def function_kwargs(function_index=1, function_name=None,
exclude_keys='self', exclude_values=None):
"""
This will return a dict of the keyword
arguments of the function that calls it
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of the function name (should not
be used with function_index)
:param exclude_keys: str,list,func if not a function it
will be turned into one, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be
turned into one, defaults to excluding 'self'
:return: dict of arguments passed into the
function making this call
"""
if not hasattr(exclude_values, '__call__'):
_exclude_values = isinstance(exclude_values, list) and\
exclude_values or [exclude_values]
exclude_values = lambda x: x in _exclude_values
if not hasattr(exclude_keys, '__call__'):
_exclude_keys = isinstance(exclude_keys, list) and\
exclude_keys or [exclude_keys]
exclude_keys = lambda x: x in _exclude_keys
frm = func_frame(function_index + 1, function_name)
args = frm.f_code.co_varnames[:frm.f_code.co_argcount]
ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\
not exclude_values(frm.f_locals[k])])
return ret
def function_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for
(should not be used with function_index)
:return: str of the code from the target function
"""
frm = function_info(function_index + 1, function_name)
raise NotImplemented
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm}
def function_history():
"""
This will return a list of all function calls going back to the beginning
:return: list of str of function name
"""
ret = []
frm = inspect.currentframe()
for i in range(100):
try:
if frm.f_code.co_name != 'run_code': # this is pycharm debugger
# inserting middleware
ret.append(frm.f_code.co_name)
frm = frm.f_back
except Exception as e:
break
return ret
def function_linenumber(function_index=1, function_name=None, width=5):
"""
This will return the line number of the indicated function in the stack
:param width:
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return str of the current linenumber
"""
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width)
def function_name(function_index=1):
ret = function_info(function_index=function_index + 1)
return ret['class_name'], ret['function_name']
def path(function_index=1, function_name=None, deliminator='__'):
ret = function_info(function_index=function_index + 1,
function_name=function_name)
file_ = os.path.basename(ret['file']).split('.')[0]
if ret['class_name']:
return '%s%s%s%s%s' % (file_, deliminator, ret['class_name'],
deliminator, ret['function_name'])
else:
return '%s%s%s' % (file_, deliminator, ret['function_name'])
def current_folder(function_index=1, function_name=None, deliminator='__'):
info = function_info(function_index + 1, function_name)
return os.path.split(info['file'])[0].replace('\\','/')
def trace_error(function_index=2):
"""
This will return the line number and line text of the last error
:param function_index: int to tell what frame to look from
:return: int, str of the line number and line text
"""
info = function_info(function_index)
traces = traceback.format_stack(limit=10)
for trace in traces:
file_, line_number, line_text = trace.split(',', 2)
if file_ == ' File "%s"' % info['file'] and\
line_number != 'line %s' % info['line_number']:
return line_number.split()[-1], line_text.strip()
return None, None
|
SeabornGames/Meta
|
seaborn_meta/calling_function.py
|
function_linenumber
|
python
|
def function_linenumber(function_index=1, function_name=None, width=5):
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width)
|
This will return the line number of the indicated function in the stack
:param width:
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return str of the current linenumber
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L252-L265
|
[
"def func_frame(function_index, function_name=None):\n \"\"\"\n This will return the class_name and function_name of the\n function traced back two functions.\n\n :param function_index: int of how many frames back the program \n should look (2 will give the parent of the caller)\n :param function_name: str of what function to look for (should \n not be used with function_index\n :return frame: this will return the frame of the calling function \"\"\"\n frm = inspect.currentframe()\n if function_name is not None:\n function_name = function_name.split('*')[0] # todo replace this\n # todo with regex\n for i in range(1000):\n if frm.f_code.co_name.startswith(function_name):\n break\n frm = frm.f_back\n else:\n for i in range(function_index):\n frm = frm.f_back\n try: # this is pycharm debugger inserting middleware\n if frm.f_code.co_name == 'run_code':\n frm = frm.f_back\n except:\n pass\n return frm\n"
] |
"""
This module is used to get the calling class, function, file, line_number, and locals
Issues:
builtin functions like eval, break this
pycharm will also break this, although it sometimes can recover
"""
import inspect
import os
import traceback
def function_arguments(func):
"""
This returns a list of all arguments
:param func: callable object
:return: list of str of the arguments for the function
"""
if getattr(inspect, 'signature', None) is None:
return list(inspect.getargspec(func).args)
else:
return list(inspect.signature(func).parameters.keys())
def function_defaults(func):
"""
This returns a list of the default arguments
:param func: callable object
:return: list of obj of default parameters
"""
if getattr(inspect, 'signature',None) is None:
return inspect.getargspec(func)[-1] or []
else:
return [v.default for k,v in list(
inspect.signature(func).parameters.items())
if v.default is not inspect._empty]
def function_doc(function_index=1, function_name=None):
"""
This will return the doc of the calling function
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the doc from the target function
"""
frm = func_frame(function_index + 1, function_name)
try:
func = getattr(frm.f_locals['self'], frm.f_code.co_name)
except:
func = frm.f_globals[frm.f_code.co_name]
return func.__doc__
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/')
def file_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
function_index of 2 will give the parent of the caller
function_name should not be used with function_index
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the code from the target function
"""
info = function_info(function_index + 1, function_name)
with open(info['file'], 'r') as fn:
return fn.read()
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args}
def function_args(function):
"""
This will return a list of the non-keyword arguments
:param function: function to check arguments for
:return: list of arguments
"""
try:
return function.__code__.co_varnames
except:
return function.f_code.co_varnames
def function_kwargs(function_index=1, function_name=None,
exclude_keys='self', exclude_values=None):
"""
This will return a dict of the keyword
arguments of the function that calls it
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of the function name (should not
be used with function_index)
:param exclude_keys: str,list,func if not a function it
will be turned into one, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be
turned into one, defaults to excluding 'self'
:return: dict of arguments passed into the
function making this call
"""
if not hasattr(exclude_values, '__call__'):
_exclude_values = isinstance(exclude_values, list) and\
exclude_values or [exclude_values]
exclude_values = lambda x: x in _exclude_values
if not hasattr(exclude_keys, '__call__'):
_exclude_keys = isinstance(exclude_keys, list) and\
exclude_keys or [exclude_keys]
exclude_keys = lambda x: x in _exclude_keys
frm = func_frame(function_index + 1, function_name)
args = frm.f_code.co_varnames[:frm.f_code.co_argcount]
ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\
not exclude_values(frm.f_locals[k])])
return ret
def function_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for
(should not be used with function_index)
:return: str of the code from the target function
"""
frm = function_info(function_index + 1, function_name)
raise NotImplemented
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm}
def function_history():
"""
This will return a list of all function calls going back to the beginning
:return: list of str of function name
"""
ret = []
frm = inspect.currentframe()
for i in range(100):
try:
if frm.f_code.co_name != 'run_code': # this is pycharm debugger
# inserting middleware
ret.append(frm.f_code.co_name)
frm = frm.f_back
except Exception as e:
break
return ret
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm
def function_name(function_index=1):
ret = function_info(function_index=function_index + 1)
return ret['class_name'], ret['function_name']
def path(function_index=1, function_name=None, deliminator='__'):
ret = function_info(function_index=function_index + 1,
function_name=function_name)
file_ = os.path.basename(ret['file']).split('.')[0]
if ret['class_name']:
return '%s%s%s%s%s' % (file_, deliminator, ret['class_name'],
deliminator, ret['function_name'])
else:
return '%s%s%s' % (file_, deliminator, ret['function_name'])
def current_folder(function_index=1, function_name=None, deliminator='__'):
info = function_info(function_index + 1, function_name)
return os.path.split(info['file'])[0].replace('\\','/')
def trace_error(function_index=2):
"""
This will return the line number and line text of the last error
:param function_index: int to tell what frame to look from
:return: int, str of the line number and line text
"""
info = function_info(function_index)
traces = traceback.format_stack(limit=10)
for trace in traces:
file_, line_number, line_text = trace.split(',', 2)
if file_ == ' File "%s"' % info['file'] and\
line_number != 'line %s' % info['line_number']:
return line_number.split()[-1], line_text.strip()
return None, None
|
SeabornGames/Meta
|
seaborn_meta/calling_function.py
|
trace_error
|
python
|
def trace_error(function_index=2):
info = function_info(function_index)
traces = traceback.format_stack(limit=10)
for trace in traces:
file_, line_number, line_text = trace.split(',', 2)
if file_ == ' File "%s"' % info['file'] and\
line_number != 'line %s' % info['line_number']:
return line_number.split()[-1], line_text.strip()
return None, None
|
This will return the line number and line text of the last error
:param function_index: int to tell what frame to look from
:return: int, str of the line number and line text
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L290-L303
|
[
"def function_info(function_index=1, function_name=None, line_number=None):\n \"\"\"\n This will return the class_name and function_name of the\n function traced back two functions.\n\n :param function_index: int of how many frames back the program \n should look (2 will give the parent of the caller)\n :param function_name: str of what function to look for (should \n not be used with function_index)\n :param line_number: int, some times the user may want to override \n this for testing purposes\n :return tuple: ('cls_name','func_name',line_number,globals())\n \"\"\"\n frm = func_frame(function_index + 1, function_name)\n\n file_ = os.path.abspath(frm.f_code.co_filename)\n class_name = frm.f_locals.get('self', None)\n if class_name is not None: # and not skip_class:\n class_name = str(type(class_name)).split('.',1)[-1].split(\"'\")[0]\n # try:\n # class_name = str(class_name).split(None, 1)[1]\n # class_name = class_name.split('.')[-1].replace(')', '')\n # except:\n # class_name = repr(class_name).split()[0].split('.')[-1]\n # if 'object at' in str(class_name):\n # class_name = str(class_name).split(' object at')[0].split('.')[-1]\n\n args, _, _, kwargs = inspect.getargvalues(frm)\n line_number = line_number or frm.f_lineno\n return {'class_name': class_name or '',\n 'function_name': frm.f_code.co_name,\n 'file': file_,\n 'path': os.path.split(file_)[0],\n 'basename': os.path.basename(file_).split('.')[0],\n 'line_number': line_number or frm.f_lineno,\n 'globals': frm.f_globals,\n 'locals': frm.f_locals,\n 'arguments': args,\n 'kwargs': kwargs,\n 'frame': frm}\n"
] |
"""
This module is used to get the calling class, function, file, line_number, and locals
Issues:
builtin functions like eval, break this
pycharm will also break this, although it sometimes can recover
"""
import inspect
import os
import traceback
def function_arguments(func):
"""
This returns a list of all arguments
:param func: callable object
:return: list of str of the arguments for the function
"""
if getattr(inspect, 'signature', None) is None:
return list(inspect.getargspec(func).args)
else:
return list(inspect.signature(func).parameters.keys())
def function_defaults(func):
"""
This returns a list of the default arguments
:param func: callable object
:return: list of obj of default parameters
"""
if getattr(inspect, 'signature',None) is None:
return inspect.getargspec(func)[-1] or []
else:
return [v.default for k,v in list(
inspect.signature(func).parameters.items())
if v.default is not inspect._empty]
def function_doc(function_index=1, function_name=None):
"""
This will return the doc of the calling function
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the doc from the target function
"""
frm = func_frame(function_index + 1, function_name)
try:
func = getattr(frm.f_locals['self'], frm.f_code.co_name)
except:
func = frm.f_globals[frm.f_code.co_name]
return func.__doc__
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/')
def file_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
function_index of 2 will give the parent of the caller
function_name should not be used with function_index
:param function_index: int of how many frames back the program should look
:param function_name: str of what function to look for
:return: str of the code from the target function
"""
info = function_info(function_index + 1, function_name)
with open(info['file'], 'r') as fn:
return fn.read()
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args}
def function_args(function):
"""
This will return a list of the non-keyword arguments
:param function: function to check arguments for
:return: list of arguments
"""
try:
return function.__code__.co_varnames
except:
return function.f_code.co_varnames
def function_kwargs(function_index=1, function_name=None,
exclude_keys='self', exclude_values=None):
"""
This will return a dict of the keyword
arguments of the function that calls it
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of the function name (should not
be used with function_index)
:param exclude_keys: str,list,func if not a function it
will be turned into one, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be
turned into one, defaults to excluding 'self'
:return: dict of arguments passed into the
function making this call
"""
if not hasattr(exclude_values, '__call__'):
_exclude_values = isinstance(exclude_values, list) and\
exclude_values or [exclude_values]
exclude_values = lambda x: x in _exclude_values
if not hasattr(exclude_keys, '__call__'):
_exclude_keys = isinstance(exclude_keys, list) and\
exclude_keys or [exclude_keys]
exclude_keys = lambda x: x in _exclude_keys
frm = func_frame(function_index + 1, function_name)
args = frm.f_code.co_varnames[:frm.f_code.co_argcount]
ret = dict([(k, frm.f_locals[k]) for k in args if not exclude_keys(k) and\
not exclude_values(frm.f_locals[k])])
return ret
def function_code(function_index=1, function_name=None):
"""
This will return the code of the calling function
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for
(should not be used with function_index)
:return: str of the code from the target function
"""
frm = function_info(function_index + 1, function_name)
raise NotImplemented
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm}
def function_history():
"""
This will return a list of all function calls going back to the beginning
:return: list of str of function name
"""
ret = []
frm = inspect.currentframe()
for i in range(100):
try:
if frm.f_code.co_name != 'run_code': # this is pycharm debugger
# inserting middleware
ret.append(frm.f_code.co_name)
frm = frm.f_back
except Exception as e:
break
return ret
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm
def function_linenumber(function_index=1, function_name=None, width=5):
"""
This will return the line number of the indicated function in the stack
:param width:
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return str of the current linenumber
"""
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width)
def function_name(function_index=1):
ret = function_info(function_index=function_index + 1)
return ret['class_name'], ret['function_name']
def path(function_index=1, function_name=None, deliminator='__'):
ret = function_info(function_index=function_index + 1,
function_name=function_name)
file_ = os.path.basename(ret['file']).split('.')[0]
if ret['class_name']:
return '%s%s%s%s%s' % (file_, deliminator, ret['class_name'],
deliminator, ret['function_name'])
else:
return '%s%s%s' % (file_, deliminator, ret['function_name'])
def current_folder(function_index=1, function_name=None, deliminator='__'):
info = function_info(function_index + 1, function_name)
return os.path.split(info['file'])[0].replace('\\','/')
|
SeabornGames/Meta
|
seaborn_meta/parse_doc.py
|
parse_doc_dict
|
python
|
def parse_doc_dict(text=None, split_character="::"):
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
text = text.strip().split('\n')
def clean(t): return t.split(':', 1)[0].strip(), t.split(':', 1)[1].strip()
return dict(clean(line) for line in text)
|
Returns a dictionary of the parsed doc for
example the following would return {'a':'A','b':'B'} ::
a:A
b:B
:param split_character:
:param text: str of the text to parse, by default uses calling function doc
:param split_character: str of the characters to split on in the doc string
:return: dict
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/parse_doc.py#L14-L32
|
[
"def function_doc(function_index=1, function_name=None):\n \"\"\"\n This will return the doc of the calling function\n :param function_index: int of how many frames back the program should look\n :param function_name: str of what function to look for\n :return: str of the doc from the target function\n \"\"\"\n frm = func_frame(function_index + 1, function_name)\n try:\n func = getattr(frm.f_locals['self'], frm.f_code.co_name)\n except:\n func = frm.f_globals[frm.f_code.co_name]\n return func.__doc__\n"
] |
""" This module is to provide support for auto parsing function doc.
It looks for the text in the doc string after :: but before :param
"""
from .calling_function import function_doc
from datetime import datetime
import sys
if sys.version_info[0] == 2:
STR = 'basestring'
else:
STR = 'str'
def parse_doc_list(text=None, is_stripped=True, split_character="::"):
"""
Returns a list of the parsed doc for
example the following would return ['a:A','b:'B] ::
a:A
b:B
:param text: str of the text to parse, by default uses calling function doc
:param is_stripped: bool if True each line will be stripped
:param split_character: str of the characters to split on in the doc string
:return: list
"""
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
text = text.strip().split('\n')
def clean(t): return is_stripped and t.strip() or t
return [clean(line) for line in text]
def parse_doc_str(text=None, is_untabbed=True, is_stripped=True,
tab=None, split_character="::"):
"""
Returns a str of the parsed doc for example
the following would return 'a:A\nb:B' ::
a:A
b:B
:param text: str of the text to parse, by
default uses calling function doc
:param is_untabbed: bool if True will untab the text
:param is_stripped: bool if True will strip the text
:param tab: str of the tab to use when untabbing,
by default it will self determine tab size
:param split_character: str of the character to split the text on
:return: dict
"""
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
tab = is_untabbed and \
(tab or text[:-1 * len(text.lstrip())].split('\n')[-1]) or ''
text = is_stripped and text.strip() or text
return text.replace('\n%s' % tab, '\n')
def parse_arg_types(text=None, is_return_included=False):
"""
:param text: str of the text to parse, by default
uses calling function doc
:param is_return_included: bool if True return will be return as well
:return: dict of args and variable types
"""
text = text or function_doc(2)
if is_return_included:
text = text.replace(':return:', ':param return:')
ret = {}
def evl(text_):
try:
return eval(text_)
except Exception as e:
return text_
if ':param' in text:
for param in text.split(':param ')[1:]:
name, desc = param.split(':', 1)
name = name.strip()
if desc.strip().startswith('list of '):
ret[name] = (list, evl(desc.split()[2].replace('str', STR)))
elif desc.strip().startswith('str timestamp'):
ret[name] = datetime
else:
ret[name] = evl(desc.split(None, 1)[0].replace('str', STR))
return ret
|
SeabornGames/Meta
|
seaborn_meta/parse_doc.py
|
parse_doc_list
|
python
|
def parse_doc_list(text=None, is_stripped=True, split_character="::"):
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
text = text.strip().split('\n')
def clean(t): return is_stripped and t.strip() or t
return [clean(line) for line in text]
|
Returns a list of the parsed doc for
example the following would return ['a:A','b:'B] ::
a:A
b:B
:param text: str of the text to parse, by default uses calling function doc
:param is_stripped: bool if True each line will be stripped
:param split_character: str of the characters to split on in the doc string
:return: list
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/parse_doc.py#L35-L53
|
[
"def function_doc(function_index=1, function_name=None):\n \"\"\"\n This will return the doc of the calling function\n :param function_index: int of how many frames back the program should look\n :param function_name: str of what function to look for\n :return: str of the doc from the target function\n \"\"\"\n frm = func_frame(function_index + 1, function_name)\n try:\n func = getattr(frm.f_locals['self'], frm.f_code.co_name)\n except:\n func = frm.f_globals[frm.f_code.co_name]\n return func.__doc__\n"
] |
""" This module is to provide support for auto parsing function doc.
It looks for the text in the doc string after :: but before :param
"""
from .calling_function import function_doc
from datetime import datetime
import sys
if sys.version_info[0] == 2:
STR = 'basestring'
else:
STR = 'str'
def parse_doc_dict(text=None, split_character="::"):
"""
Returns a dictionary of the parsed doc for
example the following would return {'a':'A','b':'B'} ::
a:A
b:B
:param split_character:
:param text: str of the text to parse, by default uses calling function doc
:param split_character: str of the characters to split on in the doc string
:return: dict
"""
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
text = text.strip().split('\n')
def clean(t): return t.split(':', 1)[0].strip(), t.split(':', 1)[1].strip()
return dict(clean(line) for line in text)
def parse_doc_str(text=None, is_untabbed=True, is_stripped=True,
tab=None, split_character="::"):
"""
Returns a str of the parsed doc for example
the following would return 'a:A\nb:B' ::
a:A
b:B
:param text: str of the text to parse, by
default uses calling function doc
:param is_untabbed: bool if True will untab the text
:param is_stripped: bool if True will strip the text
:param tab: str of the tab to use when untabbing,
by default it will self determine tab size
:param split_character: str of the character to split the text on
:return: dict
"""
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
tab = is_untabbed and \
(tab or text[:-1 * len(text.lstrip())].split('\n')[-1]) or ''
text = is_stripped and text.strip() or text
return text.replace('\n%s' % tab, '\n')
def parse_arg_types(text=None, is_return_included=False):
"""
:param text: str of the text to parse, by default
uses calling function doc
:param is_return_included: bool if True return will be return as well
:return: dict of args and variable types
"""
text = text or function_doc(2)
if is_return_included:
text = text.replace(':return:', ':param return:')
ret = {}
def evl(text_):
try:
return eval(text_)
except Exception as e:
return text_
if ':param' in text:
for param in text.split(':param ')[1:]:
name, desc = param.split(':', 1)
name = name.strip()
if desc.strip().startswith('list of '):
ret[name] = (list, evl(desc.split()[2].replace('str', STR)))
elif desc.strip().startswith('str timestamp'):
ret[name] = datetime
else:
ret[name] = evl(desc.split(None, 1)[0].replace('str', STR))
return ret
|
SeabornGames/Meta
|
seaborn_meta/parse_doc.py
|
parse_doc_str
|
python
|
def parse_doc_str(text=None, is_untabbed=True, is_stripped=True,
tab=None, split_character="::"):
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
tab = is_untabbed and \
(tab or text[:-1 * len(text.lstrip())].split('\n')[-1]) or ''
text = is_stripped and text.strip() or text
return text.replace('\n%s' % tab, '\n')
|
Returns a str of the parsed doc for example
the following would return 'a:A\nb:B' ::
a:A
b:B
:param text: str of the text to parse, by
default uses calling function doc
:param is_untabbed: bool if True will untab the text
:param is_stripped: bool if True will strip the text
:param tab: str of the tab to use when untabbing,
by default it will self determine tab size
:param split_character: str of the character to split the text on
:return: dict
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/parse_doc.py#L56-L78
|
[
"def function_doc(function_index=1, function_name=None):\n \"\"\"\n This will return the doc of the calling function\n :param function_index: int of how many frames back the program should look\n :param function_name: str of what function to look for\n :return: str of the doc from the target function\n \"\"\"\n frm = func_frame(function_index + 1, function_name)\n try:\n func = getattr(frm.f_locals['self'], frm.f_code.co_name)\n except:\n func = frm.f_globals[frm.f_code.co_name]\n return func.__doc__\n"
] |
""" This module is to provide support for auto parsing function doc.
It looks for the text in the doc string after :: but before :param
"""
from .calling_function import function_doc
from datetime import datetime
import sys
if sys.version_info[0] == 2:
STR = 'basestring'
else:
STR = 'str'
def parse_doc_dict(text=None, split_character="::"):
"""
Returns a dictionary of the parsed doc for
example the following would return {'a':'A','b':'B'} ::
a:A
b:B
:param split_character:
:param text: str of the text to parse, by default uses calling function doc
:param split_character: str of the characters to split on in the doc string
:return: dict
"""
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
text = text.strip().split('\n')
def clean(t): return t.split(':', 1)[0].strip(), t.split(':', 1)[1].strip()
return dict(clean(line) for line in text)
def parse_doc_list(text=None, is_stripped=True, split_character="::"):
"""
Returns a list of the parsed doc for
example the following would return ['a:A','b:'B] ::
a:A
b:B
:param text: str of the text to parse, by default uses calling function doc
:param is_stripped: bool if True each line will be stripped
:param split_character: str of the characters to split on in the doc string
:return: list
"""
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
text = text.strip().split('\n')
def clean(t): return is_stripped and t.strip() or t
return [clean(line) for line in text]
def parse_arg_types(text=None, is_return_included=False):
"""
:param text: str of the text to parse, by default
uses calling function doc
:param is_return_included: bool if True return will be return as well
:return: dict of args and variable types
"""
text = text or function_doc(2)
if is_return_included:
text = text.replace(':return:', ':param return:')
ret = {}
def evl(text_):
try:
return eval(text_)
except Exception as e:
return text_
if ':param' in text:
for param in text.split(':param ')[1:]:
name, desc = param.split(':', 1)
name = name.strip()
if desc.strip().startswith('list of '):
ret[name] = (list, evl(desc.split()[2].replace('str', STR)))
elif desc.strip().startswith('str timestamp'):
ret[name] = datetime
else:
ret[name] = evl(desc.split(None, 1)[0].replace('str', STR))
return ret
|
SeabornGames/Meta
|
seaborn_meta/parse_doc.py
|
parse_arg_types
|
python
|
def parse_arg_types(text=None, is_return_included=False):
text = text or function_doc(2)
if is_return_included:
text = text.replace(':return:', ':param return:')
ret = {}
def evl(text_):
try:
return eval(text_)
except Exception as e:
return text_
if ':param' in text:
for param in text.split(':param ')[1:]:
name, desc = param.split(':', 1)
name = name.strip()
if desc.strip().startswith('list of '):
ret[name] = (list, evl(desc.split()[2].replace('str', STR)))
elif desc.strip().startswith('str timestamp'):
ret[name] = datetime
else:
ret[name] = evl(desc.split(None, 1)[0].replace('str', STR))
return ret
|
:param text: str of the text to parse, by default
uses calling function doc
:param is_return_included: bool if True return will be return as well
:return: dict of args and variable types
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/parse_doc.py#L81-L109
|
[
"def function_doc(function_index=1, function_name=None):\n \"\"\"\n This will return the doc of the calling function\n :param function_index: int of how many frames back the program should look\n :param function_name: str of what function to look for\n :return: str of the doc from the target function\n \"\"\"\n frm = func_frame(function_index + 1, function_name)\n try:\n func = getattr(frm.f_locals['self'], frm.f_code.co_name)\n except:\n func = frm.f_globals[frm.f_code.co_name]\n return func.__doc__\n"
] |
""" This module is to provide support for auto parsing function doc.
It looks for the text in the doc string after :: but before :param
"""
from .calling_function import function_doc
from datetime import datetime
import sys
if sys.version_info[0] == 2:
STR = 'basestring'
else:
STR = 'str'
def parse_doc_dict(text=None, split_character="::"):
"""
Returns a dictionary of the parsed doc for
example the following would return {'a':'A','b':'B'} ::
a:A
b:B
:param split_character:
:param text: str of the text to parse, by default uses calling function doc
:param split_character: str of the characters to split on in the doc string
:return: dict
"""
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
text = text.strip().split('\n')
def clean(t): return t.split(':', 1)[0].strip(), t.split(':', 1)[1].strip()
return dict(clean(line) for line in text)
def parse_doc_list(text=None, is_stripped=True, split_character="::"):
"""
Returns a list of the parsed doc for
example the following would return ['a:A','b:'B] ::
a:A
b:B
:param text: str of the text to parse, by default uses calling function doc
:param is_stripped: bool if True each line will be stripped
:param split_character: str of the characters to split on in the doc string
:return: list
"""
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
text = text.strip().split('\n')
def clean(t): return is_stripped and t.strip() or t
return [clean(line) for line in text]
def parse_doc_str(text=None, is_untabbed=True, is_stripped=True,
tab=None, split_character="::"):
"""
Returns a str of the parsed doc for example
the following would return 'a:A\nb:B' ::
a:A
b:B
:param text: str of the text to parse, by
default uses calling function doc
:param is_untabbed: bool if True will untab the text
:param is_stripped: bool if True will strip the text
:param tab: str of the tab to use when untabbing,
by default it will self determine tab size
:param split_character: str of the character to split the text on
:return: dict
"""
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
tab = is_untabbed and \
(tab or text[:-1 * len(text.lstrip())].split('\n')[-1]) or ''
text = is_stripped and text.strip() or text
return text.replace('\n%s' % tab, '\n')
|
SeabornGames/Meta
|
seaborn_meta/class_name.py
|
class_name_to_instant_name
|
python
|
def class_name_to_instant_name(name):
name = name.replace('/', '_')
ret = name[0].lower()
for i in range(1, len(name)):
if name[i] == '_':
ret += '.'
elif '9' < name[i] < 'a' and name[i - 1] != '_':
ret += '_' + name[i].lower()
else:
ret += name[i].lower()
return ret
|
This will convert from 'ParentName_ChildName' to
'parent_name.child_name'
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/class_name.py#L8-L20
| null |
""" This module contains some functionality that would be helpful for
meta programming
"""
import glob
import os
def instant_name_to_class_name(name):
"""
This will convert from 'parent_name.child_name' to
'ParentName_ChildName'
:param name: str of the name to convert
:return: str of the converted name
"""
name2 = ''.join([e.title() for e in name.split('_')])
return '_'.join([e[0].upper() + e[1:] for e in name2.split('.')])
def url_name_to_class_name(name):
"""
This will convert a class name to the url path name
:param name: str of the name to convert
:return: str of the converted name
"""
name = name.replace('/', '.')
return instant_name_to_class_name(name)
def create_init_files(path):
"""
This will create __init__.py for a module path and every subdirectory
:param path: str of the path to start adding __init__.py to
:return: None
"""
python_files = sorted([os.path.basename(file_)[:-3] for file_ in
glob.glob(os.path.join(path, '*.py'))
if not file_.endswith('__init__.py')])
folders = sorted([os.path.basename(folder) for folder in os.listdir(path)
if os.path.isdir(os.path.join(path, folder))])
with open(path + '/__init__.py', 'w') as fn:
if python_files:
[fn.write('from %s import *\n' % file_) for file_ in python_files]
[fn.write('import %s\n' % folder) for folder in folders]
for folder in folders:
create_init_files(os.path.join(path, folder))
|
SeabornGames/Meta
|
seaborn_meta/class_name.py
|
instant_name_to_class_name
|
python
|
def instant_name_to_class_name(name):
name2 = ''.join([e.title() for e in name.split('_')])
return '_'.join([e[0].upper() + e[1:] for e in name2.split('.')])
|
This will convert from 'parent_name.child_name' to
'ParentName_ChildName'
:param name: str of the name to convert
:return: str of the converted name
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/class_name.py#L23-L31
| null |
""" This module contains some functionality that would be helpful for
meta programming
"""
import glob
import os
def class_name_to_instant_name(name):
""" This will convert from 'ParentName_ChildName' to
'parent_name.child_name' """
name = name.replace('/', '_')
ret = name[0].lower()
for i in range(1, len(name)):
if name[i] == '_':
ret += '.'
elif '9' < name[i] < 'a' and name[i - 1] != '_':
ret += '_' + name[i].lower()
else:
ret += name[i].lower()
return ret
def url_name_to_class_name(name):
"""
This will convert a class name to the url path name
:param name: str of the name to convert
:return: str of the converted name
"""
name = name.replace('/', '.')
return instant_name_to_class_name(name)
def create_init_files(path):
"""
This will create __init__.py for a module path and every subdirectory
:param path: str of the path to start adding __init__.py to
:return: None
"""
python_files = sorted([os.path.basename(file_)[:-3] for file_ in
glob.glob(os.path.join(path, '*.py'))
if not file_.endswith('__init__.py')])
folders = sorted([os.path.basename(folder) for folder in os.listdir(path)
if os.path.isdir(os.path.join(path, folder))])
with open(path + '/__init__.py', 'w') as fn:
if python_files:
[fn.write('from %s import *\n' % file_) for file_ in python_files]
[fn.write('import %s\n' % folder) for folder in folders]
for folder in folders:
create_init_files(os.path.join(path, folder))
|
SeabornGames/Meta
|
seaborn_meta/class_name.py
|
create_init_files
|
python
|
def create_init_files(path):
python_files = sorted([os.path.basename(file_)[:-3] for file_ in
glob.glob(os.path.join(path, '*.py'))
if not file_.endswith('__init__.py')])
folders = sorted([os.path.basename(folder) for folder in os.listdir(path)
if os.path.isdir(os.path.join(path, folder))])
with open(path + '/__init__.py', 'w') as fn:
if python_files:
[fn.write('from %s import *\n' % file_) for file_ in python_files]
[fn.write('import %s\n' % folder) for folder in folders]
for folder in folders:
create_init_files(os.path.join(path, folder))
|
This will create __init__.py for a module path and every subdirectory
:param path: str of the path to start adding __init__.py to
:return: None
|
train
|
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/class_name.py#L44-L61
|
[
"def create_init_files(path):\n \"\"\"\n This will create __init__.py for a module path and every subdirectory\n :param path: str of the path to start adding __init__.py to\n :return: None\n \"\"\"\n python_files = sorted([os.path.basename(file_)[:-3] for file_ in\n glob.glob(os.path.join(path, '*.py'))\n if not file_.endswith('__init__.py')])\n\n folders = sorted([os.path.basename(folder) for folder in os.listdir(path)\n if os.path.isdir(os.path.join(path, folder))])\n with open(path + '/__init__.py', 'w') as fn:\n if python_files:\n [fn.write('from %s import *\\n' % file_) for file_ in python_files]\n [fn.write('import %s\\n' % folder) for folder in folders]\n for folder in folders:\n create_init_files(os.path.join(path, folder))\n"
] |
""" This module contains some functionality that would be helpful for
meta programming
"""
import glob
import os
def class_name_to_instant_name(name):
""" This will convert from 'ParentName_ChildName' to
'parent_name.child_name' """
name = name.replace('/', '_')
ret = name[0].lower()
for i in range(1, len(name)):
if name[i] == '_':
ret += '.'
elif '9' < name[i] < 'a' and name[i - 1] != '_':
ret += '_' + name[i].lower()
else:
ret += name[i].lower()
return ret
def instant_name_to_class_name(name):
"""
This will convert from 'parent_name.child_name' to
'ParentName_ChildName'
:param name: str of the name to convert
:return: str of the converted name
"""
name2 = ''.join([e.title() for e in name.split('_')])
return '_'.join([e[0].upper() + e[1:] for e in name2.split('.')])
def url_name_to_class_name(name):
"""
This will convert a class name to the url path name
:param name: str of the name to convert
:return: str of the converted name
"""
name = name.replace('/', '.')
return instant_name_to_class_name(name)
|
fakedrake/overlay_parse
|
overlay_parse/util.py
|
rx_int_extra
|
python
|
def rx_int_extra(rxmatch):
rxmatch = re.search("\d+", rxmatch.group(0))
return int(rxmatch.group(0))
|
We didn't just match an int but the int is what we need.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/util.py#L42-L48
| null |
import re
class Rng(object):
"""
Open-closed range of numbers that supports `in'
"""
def __init__(self, s, e, rng=(True, False)):
self.s = s
self.e = e
self.sclosed, self.eclosed = rng
def __contains__(self, num):
return ((not self.eclosed and num < self.e) or
(self.eclosed and num <= self.e)) and \
((self.sclosed and num >= self.s) or
(not self.sclosed and num < self.s))
def w(s):
"""
Most of the time we just want words.
"""
return r"\b%s\b" % s
def words(l):
return [w(i) for i in l]
def starts_with(txt, pre):
return txt[:len(pre)].lower() == pre.lower()
def rx_int(rxmatch):
return int(rxmatch.group(0))
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
mf
|
python
|
def mf(pred, props=None, value_fn=None, props_on_match=False, priority=None):
if isinstance(pred, BaseMatcher):
ret = pred if props_on_match else pred.props
if isinstance(pred, basestring) or \
type(pred).__name__ == 'SRE_Pattern':
ret = RegexMatcher(pred, props=props, value_fn=value_fn)
if isinstance(pred, set):
return OverlayMatcher(pred, props=props, value_fn=value_fn)
if isinstance(pred, list):
deps = [p for p in pred if isinstance(p, BaseMatcher)]
ret = ListMatcher([mf(p, props_on_match=True) for p in pred],
props=props, value_fn=value_fn,
dependencies=deps)
if priority is not None:
ret.priority = priority
return ret
|
Matcher factory.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L255-L279
| null |
import re
from .overlays import Overlay, OverlayedText
from functools import reduce
class BaseMatcher(object):
"""
An interface for Matcher objects.
"""
def offset_overlays(self, text, offset=0, **kw):
raise NotImplementedError("Class %s has not implemented \
offset_overlays" % type(self))
def fit_overlays(self, text, start=None, end=None, **kw):
raise NotImplementedError("Class %s has not implemented \
fit_overlays" % type(self))
def value(self, **kw):
if hasattr(self, 'value_fn') and self.value_fn:
return self.value_fn(**kw)
def __repr__(self):
return "<%s with props %s>" % (type(self).__name__, self.id)
class RegexMatcher(BaseMatcher):
"""
Regex matching for matching.
"""
def __init__(self, regex, props, value_fn=None):
"""
Provide the regex to be matched.
"""
if isinstance(regex, basestring):
self.regex = re.compile(regex, re.UNICODE)
else:
self.regex = regex
self.value_fn = value_fn
self.props = props
self.id = unicode(regex)
def offset_overlays(self, text, offset=0, **kw):
"""
Generate overlays after offset.
:param text: The text to be searched.
:param offset: Match starting that index. If none just search.
:returns: An overlay or None
"""
# This may be a bit slower but overlayedtext takes care of
# unicode issues.
if not isinstance(text, OverlayedText):
text = OverlayedText(text)
for m in self.regex.finditer(unicode(text)[offset:]):
yield Overlay(text, (offset + m.start(), offset + m.end()),
props=self.props,
value=self.value(rxmatch=m))
def fit_overlays(self, text, start=None, end=None, **kw):
"""
Get an overlay thet fits the range [start, end).
"""
_text = text[start or 0:]
if end:
_text = _text[:end]
m = self.regex.match(unicode(_text))
if m:
yield Overlay(text, (start + m.start(), start + m.end()),
props=self.props,
value=self.value(rxmatch=m))
class OverlayMatcher(BaseMatcher):
"""
Match a matcher. A matcher matches in 3 ways:
- Freely with an offset
- Fitting in a range.
The value_fn should accept the `overlay' keyword.
"""
def __init__(self, props_match, props=None, value_fn=None):
"""
:param props: Set of props to be matched.
"""
self.props_match = props_match
self.props = props or set()
self.id = unicode(self.props_match)
def offset_overlays(self, text, offset=0, **kw):
"""
Get overlays for the text.
:param text: The text to be searched. This is an overlay
object.
:param offset: Match starting that index. If none just search.
:returns: A generator for overlays.
"""
for ovl in text.overlays:
if ovl.match(offset=offset, props=self.props_match):
yield ovl.copy(props=self.props,
value=self.value(overlay=ovl))
def fit_overlays(self, text, start=None, end=None, **kw):
"""
Get an overlay thet fits the range [start, end).
"""
for ovl in text.overlays:
if ovl.match(props=self.props_match, rng=(start, end)):
yield ovl
class ListMatcher(BaseMatcher):
"""
Match as a concatenated series of other matchers. It is greedy in
the snse that it just matches everything.
value_fn should accept the `ovls' keyword which is a list of the
overlays that compose the result.
"""
def __init__(self, matchers, props=None, value_fn=None, dependencies=None):
self.matchers = matchers
self.props = props or set()
self.value_fn = value_fn
self.dependencies = dependencies or []
def _merge_ovls(self, ovls):
"""
Merge ovls and also setup the value and props.
"""
ret = reduce(lambda x, y: x.merge(y), ovls)
ret.value = self.value(ovls=ovls)
ret.set_props(self.props)
return ret
def _fit_overlay_lists(self, text, start, matchers, **kw):
"""
Return a list of overlays that start at start.
"""
if matchers:
for o in matchers[0].fit_overlays(text, start):
for rest in self._fit_overlay_lists(text, o.end, matchers[1:]):
yield [o] + rest
else:
yield []
def offset_overlays(self, text, offset=0, run_deps=True, **kw):
"""
The heavy lifting is done by fit_overlays. Override just that for
alternatie implementation.
"""
if run_deps and self.dependencies:
text.overlay(self.dependencies)
for ovlf in self.matchers[0].offset_overlays(text,
goffset=offset,
**kw):
for ovll in self._fit_overlay_lists(text, ovlf.end,
self.matchers[1:]):
yield self._merge_ovls([ovlf] + ovll)
def fit_overlays(self, text, start=None, _matchers=None,
run_deps=True, ovls=None, **kw):
# Each matcher will create generate a series of overlays with
# it's fit overlay. Ignore end for now.
if run_deps and self.dependencies:
text.overlay(self.dependencies)
ovls = ovls or []
if _matchers is None:
_matchers = self.matchers
for ret in self._fit_overlay_lists(text, start=start,
matchers=_matchers, **kw):
yield self._merge_ovls(ret)
class MatcherMatcher(BaseMatcher):
"""
Match the matchers.
"""
def __init__(self, matchers, props=None, value_fn=None):
self.matchers = matchers
self.props = props
self.value_fn = value_fn
self._list_match = ListMatcher(
[OverlayMatcher(m.props) for m in matchers], props=self.props)
self._overlayed_already = []
def _maybe_run_matchers(self, text, run_matchers):
"""
OverlayedText should be smart enough to not run twice the same
matchers but this is an extra handle of control over that.
"""
if run_matchers is True or \
(run_matchers is not False and text not in self._overlayed_already):
text.overlay(self.matchers)
self._overlayed_already.append(text)
def fit_overlays(self, text, run_matchers=None, **kw):
"""
First all matchers will run and then I will try to combine
them. Use run_matchers to force running(True) or not
running(False) the matchers.
See ListMatcher for arguments.
"""
self._maybe_run_matchers(text, run_matchers)
for i in self._list_match.fit_overlay(text, **kw):
yield i
def offset_overlays(self, text, run_matchers=None, **kw):
"""
First all matchers will run and then I will try to combine
them. Use run_matchers to force running(True) or not
running(False) the matchers.
See ListMatcher for arguments.
"""
self._maybe_run_matchers(text, run_matchers)
for i in self._list_match.offset_overlays(text, **kw):
yield i
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
RegexMatcher.offset_overlays
|
python
|
def offset_overlays(self, text, offset=0, **kw):
# This may be a bit slower but overlayedtext takes care of
# unicode issues.
if not isinstance(text, OverlayedText):
text = OverlayedText(text)
for m in self.regex.finditer(unicode(text)[offset:]):
yield Overlay(text, (offset + m.start(), offset + m.end()),
props=self.props,
value=self.value(rxmatch=m))
|
Generate overlays after offset.
:param text: The text to be searched.
:param offset: Match starting that index. If none just search.
:returns: An overlay or None
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L50-L66
|
[
"def value(self, **kw):\n if hasattr(self, 'value_fn') and self.value_fn:\n return self.value_fn(**kw)\n"
] |
class RegexMatcher(BaseMatcher):
"""
Regex matching for matching.
"""
def __init__(self, regex, props, value_fn=None):
"""
Provide the regex to be matched.
"""
if isinstance(regex, basestring):
self.regex = re.compile(regex, re.UNICODE)
else:
self.regex = regex
self.value_fn = value_fn
self.props = props
self.id = unicode(regex)
def fit_overlays(self, text, start=None, end=None, **kw):
"""
Get an overlay thet fits the range [start, end).
"""
_text = text[start or 0:]
if end:
_text = _text[:end]
m = self.regex.match(unicode(_text))
if m:
yield Overlay(text, (start + m.start(), start + m.end()),
props=self.props,
value=self.value(rxmatch=m))
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
RegexMatcher.fit_overlays
|
python
|
def fit_overlays(self, text, start=None, end=None, **kw):
_text = text[start or 0:]
if end:
_text = _text[:end]
m = self.regex.match(unicode(_text))
if m:
yield Overlay(text, (start + m.start(), start + m.end()),
props=self.props,
value=self.value(rxmatch=m))
|
Get an overlay thet fits the range [start, end).
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L68-L82
|
[
"def value(self, **kw):\n if hasattr(self, 'value_fn') and self.value_fn:\n return self.value_fn(**kw)\n"
] |
class RegexMatcher(BaseMatcher):
"""
Regex matching for matching.
"""
def __init__(self, regex, props, value_fn=None):
"""
Provide the regex to be matched.
"""
if isinstance(regex, basestring):
self.regex = re.compile(regex, re.UNICODE)
else:
self.regex = regex
self.value_fn = value_fn
self.props = props
self.id = unicode(regex)
def offset_overlays(self, text, offset=0, **kw):
"""
Generate overlays after offset.
:param text: The text to be searched.
:param offset: Match starting that index. If none just search.
:returns: An overlay or None
"""
# This may be a bit slower but overlayedtext takes care of
# unicode issues.
if not isinstance(text, OverlayedText):
text = OverlayedText(text)
for m in self.regex.finditer(unicode(text)[offset:]):
yield Overlay(text, (offset + m.start(), offset + m.end()),
props=self.props,
value=self.value(rxmatch=m))
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
OverlayMatcher.offset_overlays
|
python
|
def offset_overlays(self, text, offset=0, **kw):
for ovl in text.overlays:
if ovl.match(offset=offset, props=self.props_match):
yield ovl.copy(props=self.props,
value=self.value(overlay=ovl))
|
Get overlays for the text.
:param text: The text to be searched. This is an overlay
object.
:param offset: Match starting that index. If none just search.
:returns: A generator for overlays.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L105-L117
|
[
"def value(self, **kw):\n if hasattr(self, 'value_fn') and self.value_fn:\n return self.value_fn(**kw)\n"
] |
class OverlayMatcher(BaseMatcher):
"""
Match a matcher. A matcher matches in 3 ways:
- Freely with an offset
- Fitting in a range.
The value_fn should accept the `overlay' keyword.
"""
def __init__(self, props_match, props=None, value_fn=None):
"""
:param props: Set of props to be matched.
"""
self.props_match = props_match
self.props = props or set()
self.id = unicode(self.props_match)
def fit_overlays(self, text, start=None, end=None, **kw):
"""
Get an overlay thet fits the range [start, end).
"""
for ovl in text.overlays:
if ovl.match(props=self.props_match, rng=(start, end)):
yield ovl
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
OverlayMatcher.fit_overlays
|
python
|
def fit_overlays(self, text, start=None, end=None, **kw):
for ovl in text.overlays:
if ovl.match(props=self.props_match, rng=(start, end)):
yield ovl
|
Get an overlay thet fits the range [start, end).
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L119-L126
| null |
class OverlayMatcher(BaseMatcher):
"""
Match a matcher. A matcher matches in 3 ways:
- Freely with an offset
- Fitting in a range.
The value_fn should accept the `overlay' keyword.
"""
def __init__(self, props_match, props=None, value_fn=None):
"""
:param props: Set of props to be matched.
"""
self.props_match = props_match
self.props = props or set()
self.id = unicode(self.props_match)
def offset_overlays(self, text, offset=0, **kw):
"""
Get overlays for the text.
:param text: The text to be searched. This is an overlay
object.
:param offset: Match starting that index. If none just search.
:returns: A generator for overlays.
"""
for ovl in text.overlays:
if ovl.match(offset=offset, props=self.props_match):
yield ovl.copy(props=self.props,
value=self.value(overlay=ovl))
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
ListMatcher._merge_ovls
|
python
|
def _merge_ovls(self, ovls):
ret = reduce(lambda x, y: x.merge(y), ovls)
ret.value = self.value(ovls=ovls)
ret.set_props(self.props)
return ret
|
Merge ovls and also setup the value and props.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L145-L153
|
[
"def value(self, **kw):\n if hasattr(self, 'value_fn') and self.value_fn:\n return self.value_fn(**kw)\n"
] |
class ListMatcher(BaseMatcher):
"""
Match as a concatenated series of other matchers. It is greedy in
the snse that it just matches everything.
value_fn should accept the `ovls' keyword which is a list of the
overlays that compose the result.
"""
def __init__(self, matchers, props=None, value_fn=None, dependencies=None):
self.matchers = matchers
self.props = props or set()
self.value_fn = value_fn
self.dependencies = dependencies or []
def _fit_overlay_lists(self, text, start, matchers, **kw):
"""
Return a list of overlays that start at start.
"""
if matchers:
for o in matchers[0].fit_overlays(text, start):
for rest in self._fit_overlay_lists(text, o.end, matchers[1:]):
yield [o] + rest
else:
yield []
def offset_overlays(self, text, offset=0, run_deps=True, **kw):
"""
The heavy lifting is done by fit_overlays. Override just that for
alternatie implementation.
"""
if run_deps and self.dependencies:
text.overlay(self.dependencies)
for ovlf in self.matchers[0].offset_overlays(text,
goffset=offset,
**kw):
for ovll in self._fit_overlay_lists(text, ovlf.end,
self.matchers[1:]):
yield self._merge_ovls([ovlf] + ovll)
def fit_overlays(self, text, start=None, _matchers=None,
run_deps=True, ovls=None, **kw):
# Each matcher will create generate a series of overlays with
# it's fit overlay. Ignore end for now.
if run_deps and self.dependencies:
text.overlay(self.dependencies)
ovls = ovls or []
if _matchers is None:
_matchers = self.matchers
for ret in self._fit_overlay_lists(text, start=start,
matchers=_matchers, **kw):
yield self._merge_ovls(ret)
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
ListMatcher._fit_overlay_lists
|
python
|
def _fit_overlay_lists(self, text, start, matchers, **kw):
if matchers:
for o in matchers[0].fit_overlays(text, start):
for rest in self._fit_overlay_lists(text, o.end, matchers[1:]):
yield [o] + rest
else:
yield []
|
Return a list of overlays that start at start.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L155-L166
|
[
"def _fit_overlay_lists(self, text, start, matchers, **kw):\n \"\"\"\n Return a list of overlays that start at start.\n \"\"\"\n\n if matchers:\n for o in matchers[0].fit_overlays(text, start):\n for rest in self._fit_overlay_lists(text, o.end, matchers[1:]):\n yield [o] + rest\n\n else:\n yield []\n"
] |
class ListMatcher(BaseMatcher):
"""
Match as a concatenated series of other matchers. It is greedy in
the snse that it just matches everything.
value_fn should accept the `ovls' keyword which is a list of the
overlays that compose the result.
"""
def __init__(self, matchers, props=None, value_fn=None, dependencies=None):
self.matchers = matchers
self.props = props or set()
self.value_fn = value_fn
self.dependencies = dependencies or []
def _merge_ovls(self, ovls):
"""
Merge ovls and also setup the value and props.
"""
ret = reduce(lambda x, y: x.merge(y), ovls)
ret.value = self.value(ovls=ovls)
ret.set_props(self.props)
return ret
def offset_overlays(self, text, offset=0, run_deps=True, **kw):
"""
The heavy lifting is done by fit_overlays. Override just that for
alternatie implementation.
"""
if run_deps and self.dependencies:
text.overlay(self.dependencies)
for ovlf in self.matchers[0].offset_overlays(text,
goffset=offset,
**kw):
for ovll in self._fit_overlay_lists(text, ovlf.end,
self.matchers[1:]):
yield self._merge_ovls([ovlf] + ovll)
def fit_overlays(self, text, start=None, _matchers=None,
run_deps=True, ovls=None, **kw):
# Each matcher will create generate a series of overlays with
# it's fit overlay. Ignore end for now.
if run_deps and self.dependencies:
text.overlay(self.dependencies)
ovls = ovls or []
if _matchers is None:
_matchers = self.matchers
for ret in self._fit_overlay_lists(text, start=start,
matchers=_matchers, **kw):
yield self._merge_ovls(ret)
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
ListMatcher.offset_overlays
|
python
|
def offset_overlays(self, text, offset=0, run_deps=True, **kw):
if run_deps and self.dependencies:
text.overlay(self.dependencies)
for ovlf in self.matchers[0].offset_overlays(text,
goffset=offset,
**kw):
for ovll in self._fit_overlay_lists(text, ovlf.end,
self.matchers[1:]):
yield self._merge_ovls([ovlf] + ovll)
|
The heavy lifting is done by fit_overlays. Override just that for
alternatie implementation.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L168-L182
|
[
"def _merge_ovls(self, ovls):\n \"\"\"\n Merge ovls and also setup the value and props.\n \"\"\"\n\n ret = reduce(lambda x, y: x.merge(y), ovls)\n ret.value = self.value(ovls=ovls)\n ret.set_props(self.props)\n return ret\n",
"def _fit_overlay_lists(self, text, start, matchers, **kw):\n \"\"\"\n Return a list of overlays that start at start.\n \"\"\"\n\n if matchers:\n for o in matchers[0].fit_overlays(text, start):\n for rest in self._fit_overlay_lists(text, o.end, matchers[1:]):\n yield [o] + rest\n\n else:\n yield []\n"
] |
class ListMatcher(BaseMatcher):
"""
Match as a concatenated series of other matchers. It is greedy in
the snse that it just matches everything.
value_fn should accept the `ovls' keyword which is a list of the
overlays that compose the result.
"""
def __init__(self, matchers, props=None, value_fn=None, dependencies=None):
self.matchers = matchers
self.props = props or set()
self.value_fn = value_fn
self.dependencies = dependencies or []
def _merge_ovls(self, ovls):
"""
Merge ovls and also setup the value and props.
"""
ret = reduce(lambda x, y: x.merge(y), ovls)
ret.value = self.value(ovls=ovls)
ret.set_props(self.props)
return ret
def _fit_overlay_lists(self, text, start, matchers, **kw):
"""
Return a list of overlays that start at start.
"""
if matchers:
for o in matchers[0].fit_overlays(text, start):
for rest in self._fit_overlay_lists(text, o.end, matchers[1:]):
yield [o] + rest
else:
yield []
def fit_overlays(self, text, start=None, _matchers=None,
run_deps=True, ovls=None, **kw):
# Each matcher will create generate a series of overlays with
# it's fit overlay. Ignore end for now.
if run_deps and self.dependencies:
text.overlay(self.dependencies)
ovls = ovls or []
if _matchers is None:
_matchers = self.matchers
for ret in self._fit_overlay_lists(text, start=start,
matchers=_matchers, **kw):
yield self._merge_ovls(ret)
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
MatcherMatcher._maybe_run_matchers
|
python
|
def _maybe_run_matchers(self, text, run_matchers):
if run_matchers is True or \
(run_matchers is not False and text not in self._overlayed_already):
text.overlay(self.matchers)
self._overlayed_already.append(text)
|
OverlayedText should be smart enough to not run twice the same
matchers but this is an extra handle of control over that.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L218-L227
| null |
class MatcherMatcher(BaseMatcher):
"""
Match the matchers.
"""
def __init__(self, matchers, props=None, value_fn=None):
self.matchers = matchers
self.props = props
self.value_fn = value_fn
self._list_match = ListMatcher(
[OverlayMatcher(m.props) for m in matchers], props=self.props)
self._overlayed_already = []
def fit_overlays(self, text, run_matchers=None, **kw):
"""
First all matchers will run and then I will try to combine
them. Use run_matchers to force running(True) or not
running(False) the matchers.
See ListMatcher for arguments.
"""
self._maybe_run_matchers(text, run_matchers)
for i in self._list_match.fit_overlay(text, **kw):
yield i
def offset_overlays(self, text, run_matchers=None, **kw):
"""
First all matchers will run and then I will try to combine
them. Use run_matchers to force running(True) or not
running(False) the matchers.
See ListMatcher for arguments.
"""
self._maybe_run_matchers(text, run_matchers)
for i in self._list_match.offset_overlays(text, **kw):
yield i
|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
MatcherMatcher.fit_overlays
|
python
|
def fit_overlays(self, text, run_matchers=None, **kw):
self._maybe_run_matchers(text, run_matchers)
for i in self._list_match.fit_overlay(text, **kw):
yield i
|
First all matchers will run and then I will try to combine
them. Use run_matchers to force running(True) or not
running(False) the matchers.
See ListMatcher for arguments.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L229-L239
|
[
"def _maybe_run_matchers(self, text, run_matchers):\n \"\"\"\n OverlayedText should be smart enough to not run twice the same\n matchers but this is an extra handle of control over that.\n \"\"\"\n\n if run_matchers is True or \\\n (run_matchers is not False and text not in self._overlayed_already):\n text.overlay(self.matchers)\n self._overlayed_already.append(text)\n"
] |
class MatcherMatcher(BaseMatcher):
"""
Match the matchers.
"""
def __init__(self, matchers, props=None, value_fn=None):
self.matchers = matchers
self.props = props
self.value_fn = value_fn
self._list_match = ListMatcher(
[OverlayMatcher(m.props) for m in matchers], props=self.props)
self._overlayed_already = []
def _maybe_run_matchers(self, text, run_matchers):
"""
OverlayedText should be smart enough to not run twice the same
matchers but this is an extra handle of control over that.
"""
if run_matchers is True or \
(run_matchers is not False and text not in self._overlayed_already):
text.overlay(self.matchers)
self._overlayed_already.append(text)
def offset_overlays(self, text, run_matchers=None, **kw):
"""
First all matchers will run and then I will try to combine
them. Use run_matchers to force running(True) or not
running(False) the matchers.
See ListMatcher for arguments.
"""
self._maybe_run_matchers(text, run_matchers)
for i in self._list_match.offset_overlays(text, **kw):
yield i
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.